2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Reassociation
23 * @author Michael Beck
28 #include "iroptimize.h"
31 #include "irgraph_t.h"
35 #include "iropt_dbg.h"
39 #include "reassoc_t.h"
49 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
51 typedef struct walker_t {
52 int changes; /**< set, if a reassociation take place */
53 waitq *wq; /**< a wait queue */
57 NO_CONSTANT = 0, /**< node is not constant */
58 REAL_CONSTANT = 1, /**< node is a Const that is suitable for constant folding */
59 REGION_CONST = 4 /**< node is a constant expression in the current context,
60 use 4 here to simplify implementation of get_comm_Binop_ops() */
64 * returns whether a node is constant ie is a constant or
65 * is loop invariant (called region constant)
67 * @param n the node to be checked for constant
68 * @param block a block that might be in a loop
70 static const_class_t get_const_class(const ir_node *n, const ir_node *block)
75 /* constant nodes which can't be folded are region constants */
76 if (is_irn_constlike(n))
80 * Beware: Bad nodes are always loop-invariant, but
81 * cannot handled in later code, so filter them here.
83 if (! is_Bad(n) && is_loop_invariant(n, block))
87 } /* get_const_class */
90 * returns the operands of a commutative bin-op, if one operand is
91 * a region constant, it is returned as the second one.
93 * Beware: Real constants must be returned with higher priority than
94 * region constants, because they might be folded.
96 static void get_comm_Binop_ops(ir_node *binop, ir_node **a, ir_node **c)
98 ir_node *op_a = get_binop_left(binop);
99 ir_node *op_b = get_binop_right(binop);
100 ir_node *block = get_nodes_block(binop);
101 int class_a = get_const_class(op_a, block);
102 int class_b = get_const_class(op_b, block);
104 assert(is_op_commutative(get_irn_op(binop)));
106 switch (class_a + 2*class_b) {
107 case REAL_CONSTANT + 2*REAL_CONSTANT:
108 /* if both are constants, one might be a
109 * pointer constant like NULL, return the other
111 if (mode_is_reference(get_irn_mode(op_a))) {
119 case REAL_CONSTANT + 2*NO_CONSTANT:
120 case REAL_CONSTANT + 2*REGION_CONST:
121 case REGION_CONST + 2*NO_CONSTANT:
130 } /* get_comm_Binop_ops */
133 * reassociate a Sub: x - c = x + (-c)
135 static int reassoc_Sub(ir_node **in)
138 ir_node *right = get_Sub_right(n);
139 ir_mode *rmode = get_irn_mode(right);
142 /* cannot handle SubIs(P, P) */
143 if (mode_is_reference(rmode))
146 block = get_nodes_block(n);
149 * convert x - c => x + (-c)
151 if (get_const_class(right, block) == REAL_CONSTANT) {
152 ir_node *left = get_Sub_left(n);
157 switch (get_const_class(left, block)) {
159 irn = optimize_in_place(n);
169 /* already constant, nothing to do */
173 mode = get_irn_mode(n);
174 dbi = get_irn_dbg_info(n);
176 /* Beware of SubP(P, Is) */
177 irn = new_rd_Minus(dbi, block, right, rmode);
178 irn = new_rd_Add(dbi, block, left, irn, mode);
180 DBG((dbg, LEVEL_5, "Applied: %n - %n => %n + (-%n)\n",
181 get_Sub_left(n), right, get_Sub_left(n), right));
194 /** Retrieve a mode from the operands. We need this, because
195 * Add and Sub are allowed to operate on (P, Is)
197 static ir_mode *get_mode_from_ops(ir_node *op1, ir_node *op2)
201 m1 = get_irn_mode(op1);
202 if (mode_is_reference(m1))
205 m2 = get_irn_mode(op2);
206 if (mode_is_reference(m2))
212 } /* get_mode_from_ops */
217 * reassociate a commutative Binop
219 * BEWARE: this rule leads to a potential loop, if
220 * two operands are region constants and the third is a
221 * constant, so avoid this situation.
223 static int reassoc_commutative(ir_node **node)
226 ir_op *op = get_irn_op(n);
227 ir_node *block = get_nodes_block(n);
230 get_comm_Binop_ops(n, &t1, &c1);
232 if (get_irn_op(t1) == op) {
234 const_class_t c_c1, c_c2, c_t2;
236 get_comm_Binop_ops(t1, &t2, &c2);
238 /* do not optimize Bad nodes, will fail later */
242 c_c1 = get_const_class(c1, block);
243 c_c2 = get_const_class(c2, block);
244 c_t2 = get_const_class(t2, block);
246 if ( ((c_c1 > NO_CONSTANT) & (c_t2 > NO_CONSTANT)) &&
247 ((((c_c1 ^ c_c2 ^ c_t2) & REGION_CONST) == 0) || ((c_c1 & c_c2 & c_t2) == REGION_CONST)) ) {
248 /* All three are constant and either all are constant expressions
249 * or two of them are:
250 * then applying this rule would lead into a cycle
252 * Note that if t2 is a constant so is c2 hence we save one test.
257 if ((c_c1 != NO_CONSTANT) /* & (c_c2 != NO_CONSTANT) */) {
258 /* handles rules R7, R8, R9, R10:
259 * convert c1 .OP. (c2 .OP. x) => x .OP. (c1 .OP. c2)
261 ir_node *irn, *in[2];
262 ir_mode *mode, *mode_c1 = get_irn_mode(c1), *mode_c2 = get_irn_mode(c2);
264 /* It might happen, that c1 and c2 have different modes, for
265 * instance Is and Iu.
268 if (mode_c1 != mode_c2) {
269 if (mode_is_int(mode_c1) && mode_is_int(mode_c2)) {
270 /* get the bigger one */
271 if (get_mode_size_bits(mode_c1) > get_mode_size_bits(mode_c2))
272 c2 = new_r_Conv(block, c2, mode_c1);
273 else if (get_mode_size_bits(mode_c1) < get_mode_size_bits(mode_c2))
274 c1 = new_r_Conv(block, c1, mode_c2);
276 /* Try to cast the real const */
277 if (c_c1 == REAL_CONSTANT)
278 c1 = new_r_Conv(block, c1, mode_c2);
280 c2 = new_r_Conv(block, c2, mode_c1);
288 mode = get_mode_from_ops(in[0], in[1]);
289 in[1] = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
292 mode = get_mode_from_ops(in[0], in[1]);
293 irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
295 DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => %n .%s. (%n .%s. %n)\n",
296 c1, get_irn_opname(n), c2, get_irn_opname(n), t2,
297 t2, get_irn_opname(n), c1, get_irn_opname(n), c2));
299 * In some rare cases it can really happen that we get the same
300 * node back. This might be happen in dead loops, were the Phi
301 * nodes are already gone away. So check this.
310 if (get_irn_op(c1) == op) {
315 if (get_irn_op(t1) == op) {
316 ir_node *l = get_binop_left(t1);
317 ir_node *r = get_binop_right(t1);
325 c_r = get_const_class(r, block);
326 if (c_r != NO_CONSTANT) {
328 * Beware: don't do the following op if a constant was
329 * placed below, else we will fall into a loop.
335 /* convert x .OP. (x .OP. y) => y .OP. (x .OP. x) */
336 ir_mode *mode_res = get_irn_mode(n);
337 ir_mode *mode_c1 = get_irn_mode(c1);
338 ir_node *irn, *in[2];
343 in[1] = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode_c1, 2, in));
346 irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode_res, 2, in));
348 DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => %n .%s. (%n .%s. %n)\n",
349 c1, get_irn_opname(n), l, get_irn_opname(n), r,
350 r, get_irn_opname(n), c1, get_irn_opname(n), c1));
360 } /* reassoc_commutative */
364 static ir_op *commutative_op;
365 static ir_node *commutative_block;
366 static struct obstack commutative_args;
368 static void collect_args(ir_node *node)
370 ir_node *left = get_binop_left(node);
371 ir_node *right = get_binop_right(node);
373 if (get_irn_op(left) == commutative_op
374 && (!get_irn_outs_computed(left) || get_irn_n_outs(left) == 1)) {
377 obstack_ptr_grow(&commutative_args, left);
380 if (get_irn_op(right) == commutative_op
381 && (!get_irn_outs_computed(right) || get_irn_n_outs(right) == 1)) {
384 obstack_ptr_grow(&commutative_args, right);
389 ir_mode *mode = get_irn_mode(node);
390 if (is_Add(node) && mode_is_reference(mode)) {
391 assert(get_irn_mode(left) == mode || get_irn_mode(right) == mode);
393 assert(get_irn_mode(left) == mode);
394 assert(get_irn_mode(right) == mode);
400 static int compare_nodes(const ir_node *node1, const ir_node *node2)
402 const_class_t class1 = get_const_class(node1, commutative_block);
403 const_class_t class2 = get_const_class(node2, commutative_block);
405 if (class1 == class2)
407 // return get_irn_idx(node1) - get_irn_idx(node2);
412 assert(class1 > class2);
416 static int compare_node_ptr(const void *e1, const void *e2)
418 const ir_node *node1 = *((const ir_node *const*) e1);
419 const ir_node *node2 = *((const ir_node *const*) e2);
420 return compare_nodes(node1, node2);
423 static int reassoc_commutative(ir_node **n)
432 commutative_op = get_irn_op(node);
433 commutative_block = get_nodes_block(node);
435 /* collect all nodes with same op type */
438 n_args = obstack_object_size(&commutative_args) / sizeof(ir_node*);
439 args = obstack_finish(&commutative_args);
441 /* shortcut: in most cases there's nothing to do */
442 if (n_args == 2 && compare_nodes(args[0], args[1]) <= 0) {
443 obstack_free(&commutative_args, args);
447 /* sort the arguments */
448 qsort(args, n_args, sizeof(ir_node*), compare_node_ptr);
451 last = args[n_args-1];
452 mode = get_irn_mode(last);
453 for (i = n_args-2; i >= 0; --i) {
461 /* AddP violates the assumption that all modes in args are equal...
462 * we need some hacks to cope with this */
463 mode_right = get_irn_mode(in[1]);
464 if (mode_is_reference(mode_right)) {
465 assert(is_Add(node) && mode_is_reference(get_irn_mode(node)));
466 mode = get_irn_mode(in[1]);
468 if (mode_right != mode) {
469 assert(is_Add(node) && mode_is_reference(get_irn_mode(node)));
470 in[1] = new_r_Conv(current_ir_graph, commutative_block,in[1], mode);
473 /* TODO: produce useful debug info! */
474 new_node = new_ir_node(NULL, current_ir_graph, commutative_block,
475 commutative_op, mode, 2, in);
476 new_node = optimize_node(new_node);
480 /* CSE often returns the old node again, only exchange if needed */
482 exchange(node, last);
491 #define reassoc_Add reassoc_commutative
492 #define reassoc_And reassoc_commutative
493 #define reassoc_Or reassoc_commutative
494 #define reassoc_Eor reassoc_commutative
497 * Reassociate using commutative law for Mul and distributive law for Mul and Add/Sub:
499 static int reassoc_Mul(ir_node **node)
502 ir_node *add_sub, *c;
505 if (reassoc_commutative(&n))
508 get_comm_Binop_ops(n, &add_sub, &c);
509 op = get_irn_op(add_sub);
511 /* handles rules R11, R12, R13, R14, R15, R16, R17, R18, R19, R20 */
512 if (op == op_Add || op == op_Sub) {
513 ir_mode *mode = get_irn_mode(n);
514 ir_node *irn, *block, *t1, *t2, *in[2];
516 block = get_nodes_block(n);
517 t1 = get_binop_left(add_sub);
518 t2 = get_binop_right(add_sub);
520 /* we can only multiplication rules on integer arithmetic */
521 if (mode_is_int(get_irn_mode(t1)) && mode_is_int(get_irn_mode(t2))) {
522 in[0] = new_rd_Mul(NULL, block, c, t1, mode);
523 in[1] = new_rd_Mul(NULL, block, c, t2, mode);
525 irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
527 /* In some cases it might happen that the new irn is equal the old one, for
529 * (x - 1) * y == x * y - y
530 * will be transformed back by simpler optimization
531 * We could switch simple optimizations off, but this only happens iff y
532 * is a loop-invariant expression and that it is not clear if the new form
534 * So, we let the old one.
537 DBG((dbg, LEVEL_5, "Applied: (%n .%s. %n) %n %n => (%n %n %n) .%s. (%n %n %n)\n",
538 t1, get_op_name(op), t2, n, c, t1, n, c, get_op_name(op), t2, n, c));
550 * Reassociate Shl. We transform Shl(x, const) into Mul's if possible.
552 static int reassoc_Shl(ir_node **node)
555 ir_node *c = get_Shl_right(n);
556 ir_node *x, *blk, *irn;
564 mode = get_irn_mode(x);
566 tv = get_mode_one(mode);
567 tv = tarval_shl(tv, get_Const_tarval(c));
569 if (tv == tarval_bad)
572 blk = get_nodes_block(n);
574 irn = new_rd_Mul(get_irn_dbg_info(n), blk, x, c, mode);
585 * The walker for the reassociation.
587 static void wq_walker(ir_node *n, void *env)
589 walker_t *wenv = env;
591 set_irn_link(n, NULL);
593 ir_node *blk = get_nodes_block(n);
595 if (is_Block_dead(blk) || get_Block_dom_depth(blk) < 0) {
596 /* We are in a dead block, do not optimize or we may fall into an endless
597 loop. We check this here instead of requiring that all dead blocks are removed
598 which or cf_opt do not guarantee yet. */
601 waitq_put(wenv->wq, n);
602 set_irn_link(n, wenv->wq);
607 * The walker for the reassociation.
609 static void do_reassociation(walker_t *wenv)
614 while (! waitq_empty(wenv->wq)) {
615 n = waitq_get(wenv->wq);
616 set_irn_link(n, NULL);
618 blk = get_nodes_block(n);
619 if (is_Block_dead(blk) || get_Block_dom_depth(blk) < 0) {
620 /* We are in a dead block, do not optimize or we may fall into an endless
621 loop. We check this here instead of requiring that all dead blocks are removed
622 which or cf_opt do not guarantee yet. */
629 /* reassociation must run until a fixpoint is reached. */
632 ir_op *op = get_irn_op(n);
633 ir_mode *mode = get_irn_mode(n);
637 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
638 if (mode_is_float(mode) && get_irg_fp_model(current_ir_graph) & fp_strict_algebraic)
641 if (op->ops.reassociate) {
642 res = op->ops.reassociate(&n);
649 wenv->changes |= changed;
652 for (i = get_irn_arity(n) - 1; i >= 0; --i) {
653 ir_node *pred = get_irn_n(n, i);
655 if (get_irn_link(pred) != wenv->wq) {
656 waitq_put(wenv->wq, pred);
657 set_irn_link(pred, wenv->wq);
662 } /* do_reassociation */
665 * Returns the earliest were a,b are available.
666 * Note that we know that a, b both dominate
667 * the block of the previous operation, so one must dominate the other.
669 * If the earliest block is the start block, return curr_blk instead
671 static ir_node *earliest_block(ir_node *a, ir_node *b, ir_node *curr_blk)
673 ir_node *blk_a = get_nodes_block(a);
674 ir_node *blk_b = get_nodes_block(b);
677 /* if blk_a != blk_b, one must dominate the other */
678 if (block_dominates(blk_a, blk_b))
682 if (res == get_irg_start_block(current_ir_graph))
685 } /* earliest_block */
688 * Checks whether a node is a Constant expression.
689 * The following trees are constant expressions:
691 * Const, SymConst, Const + SymConst
693 * Handling SymConsts as const might be not a good idea for all
696 static int is_constant_expr(ir_node *irn)
700 switch (get_irn_opcode(irn)) {
705 op = get_irn_op(get_Add_left(irn));
706 if (op != op_Const && op != op_SymConst)
708 op = get_irn_op(get_Add_right(irn));
709 if (op != op_Const && op != op_SymConst)
715 } /* is_constant_expr */
718 * Apply distributive Law for Mul and Add/Sub
720 static int reverse_rule_distributive(ir_node **node)
723 ir_node *left = get_binop_left(n);
724 ir_node *right = get_binop_right(n);
725 ir_node *x, *blk, *curr_blk;
726 ir_node *a, *b, *irn;
731 op = get_irn_op(left);
732 if (op != get_irn_op(right))
736 x = get_Shl_right(left);
738 if (x == get_Shl_right(right)) {
739 /* (a << x) +/- (b << x) ==> (a +/- b) << x */
740 a = get_Shl_left(left);
741 b = get_Shl_left(right);
744 } else if (op == op_Mul) {
745 x = get_Mul_left(left);
747 if (x == get_Mul_left(right)) {
748 /* (x * a) +/- (x * b) ==> (a +/- b) * x */
749 a = get_Mul_right(left);
750 b = get_Mul_right(right);
752 } else if (x == get_Mul_right(right)) {
753 /* (x * a) +/- (b * x) ==> (a +/- b) * x */
754 a = get_Mul_right(left);
755 b = get_Mul_left(right);
759 x = get_Mul_right(left);
761 if (x == get_Mul_right(right)) {
762 /* (a * x) +/- (b * x) ==> (a +/- b) * x */
763 a = get_Mul_left(left);
764 b = get_Mul_left(right);
766 } else if (x == get_Mul_left(right)) {
767 /* (a * x) +/- (x * b) ==> (a +/- b) * x */
768 a = get_Mul_left(left);
769 b = get_Mul_right(right);
776 curr_blk = get_nodes_block(n);
778 blk = earliest_block(a, b, curr_blk);
780 dbg = get_irn_dbg_info(n);
781 mode = get_irn_mode(n);
784 irn = new_rd_Add(dbg, blk, a, b, mode);
786 irn = new_rd_Sub(dbg, blk, a, b, mode);
788 blk = earliest_block(irn, x, curr_blk);
791 irn = new_rd_Mul(dbg, blk, irn, x, mode);
793 irn = new_rd_Shl(dbg, blk, irn, x, mode);
798 } /* reverse_rule_distributive */
801 * Move Constants towards the root.
803 static int move_consts_up(ir_node **node)
807 ir_node *l, *r, *a, *b, *c, *blk, *irn, *in[2];
808 ir_mode *mode, *ma, *mb;
811 l = get_binop_left(n);
812 r = get_binop_right(n);
814 /* check if one is already a constant expression */
815 if (is_constant_expr(l) || is_constant_expr(r))
818 dbg = get_irn_dbg_info(n);
820 if (get_irn_op(l) == op) {
821 /* (a .op. b) .op. r */
822 a = get_binop_left(l);
823 b = get_binop_right(l);
825 if (is_constant_expr(a)) {
826 /* (C .op. b) .op. r ==> (r .op. b) .op. C */
829 blk = get_nodes_block(l);
830 dbg = dbg == get_irn_dbg_info(l) ? dbg : NULL;
832 } else if (is_constant_expr(b)) {
833 /* (a .op. C) .op. r ==> (a .op. r) .op. C */
836 blk = get_nodes_block(l);
837 dbg = dbg == get_irn_dbg_info(l) ? dbg : NULL;
841 if (get_irn_op(r) == op) {
842 /* l .op. (a .op. b) */
843 a = get_binop_left(r);
844 b = get_binop_right(r);
846 if (is_constant_expr(a)) {
847 /* l .op. (C .op. b) ==> (l .op. b) .op. C */
850 blk = get_nodes_block(r);
851 dbg = dbg == get_irn_dbg_info(r) ? dbg : NULL;
853 } else if (is_constant_expr(b)) {
854 /* l .op. (a .op. C) ==> (a .op. l) .op. C */
857 blk = get_nodes_block(r);
858 dbg = dbg == get_irn_dbg_info(r) ? dbg : NULL;
865 /* In some cases a and b might be both of different integer mode, and c a SymConst.
866 * in that case we could either
867 * 1.) cast into unsigned mode
869 * we implement the second here
871 ma = get_irn_mode(a);
872 mb = get_irn_mode(b);
873 if (ma != mb && mode_is_int(ma) && mode_is_int(mb))
876 /* check if (a .op. b) can be calculated in the same block is the old instruction */
877 if (! block_dominates(get_nodes_block(a), blk))
879 if (! block_dominates(get_nodes_block(b), blk))
885 mode = get_mode_from_ops(a, b);
886 in[0] = irn = optimize_node(new_ir_node(dbg, current_ir_graph, blk, op, mode, 2, in));
888 /* beware: optimize_node might have changed the opcode, check again */
889 if (is_Add(irn) || is_Sub(irn)) {
890 reverse_rule_distributive(&in[0]);
894 mode = get_mode_from_ops(in[0], in[1]);
895 irn = optimize_node(new_ir_node(dbg, current_ir_graph, blk, op, mode, 2, in));
900 } /* move_consts_up */
903 * Apply the rules in reverse order, removing code that was not collapsed
905 static void reverse_rules(ir_node *node, void *env)
907 walker_t *wenv = env;
908 ir_mode *mode = get_irn_mode(node);
911 /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
912 if (mode_is_float(mode) && get_irg_fp_model(current_ir_graph) & fp_strict_algebraic)
916 ir_op *op = get_irn_op(node);
919 if (is_op_commutative(op)) {
920 wenv->changes |= res = move_consts_up(&node);
922 /* beware: move_consts_up might have changed the opcode, check again */
923 if (is_Add(node) || is_Sub(node)) {
924 wenv->changes |= res = reverse_rule_distributive(&node);
930 * do the reassociation
932 int optimize_reassociation(ir_graph *irg)
935 irg_loopinfo_state state;
938 assert(get_irg_phase_state(irg) != phase_building);
939 assert(get_irg_pinned(irg) != op_pin_state_floats &&
940 "Reassociation needs pinned graph to work properly");
942 rem = current_ir_graph;
943 current_ir_graph = irg;
945 /* we use dominance to detect dead blocks */
949 assure_irg_outs(irg);
950 obstack_init(&commutative_args);
954 * Calculate loop info, so we could identify loop-invariant
955 * code and threat it like a constant.
956 * We only need control flow loops here but can handle generic
957 * INTRA info as well.
959 state = get_irg_loopinfo_state(irg);
960 if ((state & loopinfo_inter) ||
961 (state & (loopinfo_constructed | loopinfo_valid)) != (loopinfo_constructed | loopinfo_valid))
962 construct_cf_backedges(irg);
965 env.wq = new_waitq();
967 /* disable some optimizations while reassoc is running to prevent endless loops */
968 set_reassoc_running(1);
970 /* now we have collected enough information, optimize */
971 irg_walk_graph(irg, NULL, wq_walker, &env);
972 do_reassociation(&env);
974 /* reverse those rules that do not result in collapsed constants */
975 irg_walk_graph(irg, NULL, reverse_rules, &env);
977 set_reassoc_running(0);
979 /* Handle graph state */
981 set_irg_outs_inconsistent(irg);
982 set_irg_loopinfo_inconsistent(irg);
986 obstack_free(&commutative_args, NULL);
990 current_ir_graph = rem;
992 } /* optimize_reassociation */
994 /* create a pass for the reassociation */
995 ir_graph_pass_t *optimize_reassociation_pass(const char *name)
997 return def_graph_pass_ret(name ? name : "reassoc", optimize_reassociation);
998 } /* optimize_reassociation_pass */
1000 /* Sets the default reassociation operation for an ir_op_ops. */
1001 ir_op_ops *firm_set_default_reassoc(ir_opcode code, ir_op_ops *ops)
1003 #define CASE(a) case iro_##a: ops->reassociate = reassoc_##a; break
1019 } /* firm_set_default_reassoc */
1021 /* initialize the reassociation by adding operations to some opcodes */
1022 void firm_init_reassociation(void)
1024 FIRM_DBG_REGISTER(dbg, "firm.opt.reassoc");
1025 } /* firm_init_reassociation */