X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Freassoc.c;h=dee03840c7b78dc4f636e136e4033a9814ed81ed;hb=11b00882d8ddbf207c31565a51030408c8fd646b;hp=1d06b2b416fd40e0a895b96a03aa413792e41c68;hpb=e540d91dbb89c02f9a766b31df9cacc430f34bea;p=libfirm diff --git a/ir/opt/reassoc.c b/ir/opt/reassoc.c index 1d06b2b41..dee03840c 100644 --- a/ir/opt/reassoc.c +++ b/ir/opt/reassoc.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -21,12 +21,10 @@ * @file * @brief Reassociation * @author Michael Beck - * @version $Id$ */ -#ifdef HAVE_CONFIG_H #include "config.h" -#endif +#include "iroptimize.h" #include "iropt_t.h" #include "irnode_t.h" #include "irgraph_t.h" @@ -36,19 +34,17 @@ #include "iropt_dbg.h" #include "irflag_t.h" #include "irgwalk.h" +#include "irouts.h" #include "reassoc_t.h" +#include "opt_init.h" #include "irhooks.h" #include "irloop.h" #include "pdeq.h" #include "debug.h" +#include "irpass.h" DEBUG_ONLY(static firm_dbg_module_t *dbg;) -typedef struct _walker_t { - int changes; /**< set, if a reassociation take place */ - waitq *wq; /**< a wait queue */ -} walker_t; - typedef enum { NO_CONSTANT = 0, /**< node is not constant */ REAL_CONSTANT = 1, /**< node is a Const that is suitable for constant folding */ @@ -63,16 +59,13 @@ typedef enum { * @param n the node to be checked for constant * @param block a block that might be in a loop */ -static const_class_t get_const_class(ir_node *n, ir_node *block) +static const_class_t get_const_class(const ir_node *n, const ir_node *block) { - ir_op *op = get_irn_op(n); - - if (op == op_Const) + if (is_Const(n)) return REAL_CONSTANT; - /* although SymConst's are of course real constant, we cannot - fold them, so handle them like region constants */ - if (op == op_SymConst) + /* constant nodes which can't be folded are region constants */ + if (is_irn_constlike(n)) return REGION_CONST; /* @@ -168,12 +161,13 @@ static int reassoc_Sub(ir_node **in) /* already constant, nothing to do */ return 0; } + mode = get_irn_mode(n); dbi = get_irn_dbg_info(n); /* Beware of SubP(P, Is) */ - irn = new_rd_Minus(dbi, current_ir_graph, block, right, rmode); - irn = new_rd_Add(dbi, current_ir_graph, block, left, irn, get_irn_mode(n)); + irn = new_rd_Minus(dbi, block, right, rmode); + irn = new_rd_Add(dbi, block, left, irn, mode); DBG((dbg, LEVEL_5, "Applied: %n - %n => %n + (-%n)\n", get_Sub_left(n), right, get_Sub_left(n), right)); @@ -219,7 +213,7 @@ static ir_mode *get_mode_from_ops(ir_node *op1, ir_node *op2) static int reassoc_commutative(ir_node **node) { ir_node *n = *node; - ir_op *op = get_irn_op(n); + ir_op *op = get_irn_op(n); ir_node *block = get_nodes_block(n); ir_node *t1, *c1; @@ -241,7 +235,8 @@ static int reassoc_commutative(ir_node **node) if ( ((c_c1 > NO_CONSTANT) & (c_t2 > NO_CONSTANT)) && ((((c_c1 ^ c_c2 ^ c_t2) & REGION_CONST) == 0) || ((c_c1 & c_c2 & c_t2) == REGION_CONST)) ) { - /* All three are constant and either all are constant expressions or two of them are: + /* All three are constant and either all are constant expressions + * or two of them are: * then applying this rule would lead into a cycle * * Note that if t2 is a constant so is c2 hence we save one test. @@ -255,23 +250,25 @@ static int reassoc_commutative(ir_node **node) */ ir_node *irn, *in[2]; ir_mode *mode, *mode_c1 = get_irn_mode(c1), *mode_c2 = get_irn_mode(c2); + ir_graph *irg = get_irn_irg(c1); - /* It might happen, that c1 and c2 have different modes, for instance Is and Iu. + /* It might happen, that c1 and c2 have different modes, for + * instance Is and Iu. * Handle this here. */ if (mode_c1 != mode_c2) { if (mode_is_int(mode_c1) && mode_is_int(mode_c2)) { /* get the bigger one */ if (get_mode_size_bits(mode_c1) > get_mode_size_bits(mode_c2)) - c2 = new_r_Conv(current_ir_graph, block, c2, mode_c1); + c2 = new_r_Conv(block, c2, mode_c1); else if (get_mode_size_bits(mode_c1) < get_mode_size_bits(mode_c2)) - c1 = new_r_Conv(current_ir_graph, block, c1, mode_c2); + c1 = new_r_Conv(block, c1, mode_c2); else { /* Try to cast the real const */ if (c_c1 == REAL_CONSTANT) - c1 = new_r_Conv(current_ir_graph, block, c1, mode_c2); + c1 = new_r_Conv(block, c1, mode_c2); else - c2 = new_r_Conv(current_ir_graph, block, c2, mode_c1); + c2 = new_r_Conv(block, c2, mode_c1); } } } @@ -280,20 +277,70 @@ static int reassoc_commutative(ir_node **node) in[1] = c2; mode = get_mode_from_ops(in[0], in[1]); - in[1] = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in)); + in[1] = optimize_node(new_ir_node(NULL, irg, block, op, mode, 2, in)); in[0] = t2; mode = get_mode_from_ops(in[0], in[1]); - irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in)); + irn = optimize_node(new_ir_node(NULL, irg, block, op, mode, 2, in)); DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => %n .%s. (%n .%s. %n)\n", c1, get_irn_opname(n), c2, get_irn_opname(n), t2, t2, get_irn_opname(n), c1, get_irn_opname(n), c2)); /* - * In some rare cases it can really happen that we get the same node back. - * This might be happen in dead loops, were the Phi nodes are already gone away. - * So check this. + * In some rare cases it can really happen that we get the same + * node back. This might be happen in dead loops, were the Phi + * nodes are already gone away. So check this. + */ + if (n != irn) { + exchange(n, irn); + *node = irn; + return 1; + } + } + } + if (get_irn_op(c1) == op) { + ir_node *t = c1; + c1 = t1; + t1 = t; + } + if (get_irn_op(t1) == op) { + ir_node *l = get_binop_left(t1); + ir_node *r = get_binop_right(t1); + const_class_t c_r; + + if (r == c1) { + ir_node *t = r; + r = l; + l = t; + } + c_r = get_const_class(r, block); + if (c_r != NO_CONSTANT) { + /* + * Beware: don't do the following op if a constant was + * placed below, else we will fall into a loop. */ + return 0; + } + + if (l == c1) { + /* convert x .OP. (x .OP. y) => y .OP. (x .OP. x) */ + ir_mode *mode_res = get_irn_mode(n); + ir_mode *mode_c1 = get_irn_mode(c1); + ir_graph *irg = get_irn_irg(c1); + ir_node *irn, *in[2]; + + in[0] = c1; + in[1] = c1; + + in[1] = optimize_node(new_ir_node(NULL, irg, block, op, mode_c1, 2, in)); + in[0] = r; + + irn = optimize_node(new_ir_node(NULL, irg, block, op, mode_res, 2, in)); + + DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => %n .%s. (%n .%s. %n)\n", + c1, get_irn_opname(n), l, get_irn_opname(n), r, + r, get_irn_opname(n), c1, get_irn_opname(n), c1)); + if (n != irn) { exchange(n, irn); *node = irn; @@ -335,10 +382,11 @@ static int reassoc_Mul(ir_node **node) /* we can only multiplication rules on integer arithmetic */ if (mode_is_int(get_irn_mode(t1)) && mode_is_int(get_irn_mode(t2))) { - in[0] = new_rd_Mul(NULL, current_ir_graph, block, c, t1, mode); - in[1] = new_rd_Mul(NULL, current_ir_graph, block, c, t2, mode); + ir_graph *irg = get_irn_irg(t1); + in[0] = new_rd_Mul(NULL, block, c, t1, mode); + in[1] = new_rd_Mul(NULL, block, c, t2, mode); - irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in)); + irn = optimize_node(new_ir_node(NULL, irg, block, op, mode, 2, in)); /* In some cases it might happen that the new irn is equal the old one, for * instance in: @@ -365,12 +413,14 @@ static int reassoc_Mul(ir_node **node) /** * Reassociate Shl. We transform Shl(x, const) into Mul's if possible. */ -static int reassoc_Shl(ir_node **node) { - ir_node *n = *node; - ir_node *c = get_Shl_right(n); - ir_node *x, *blk, *irn; - ir_mode *mode; - tarval *tv; +static int reassoc_Shl(ir_node **node) +{ + ir_node *n = *node; + ir_node *c = get_Shl_right(n); + ir_node *x, *blk, *irn; + ir_graph *irg; + ir_mode *mode; + ir_tarval *tv; if (! is_Const(c)) return 0; @@ -385,8 +435,9 @@ static int reassoc_Shl(ir_node **node) { return 0; blk = get_nodes_block(n); - c = new_r_Const(current_ir_graph, blk, mode, tv); - irn = new_rd_Mul(get_irn_dbg_info(n), current_ir_graph, blk, x, c, mode); + irg = get_irn_irg(blk); + c = new_r_Const(irg, tv); + irn = new_rd_Mul(get_irn_dbg_info(n), blk, x, c, mode); if (irn != n) { exchange(n, irn); @@ -401,57 +452,39 @@ static int reassoc_Shl(ir_node **node) { */ static void wq_walker(ir_node *n, void *env) { - walker_t *wenv = env; + waitq *const wq = (waitq*)env; set_irn_link(n, NULL); - if (is_no_Block(n)) { - ir_node *blk = get_nodes_block(n); - - if (is_Block_dead(blk) || get_Block_dom_depth(blk) < 0) { - /* We are in a dead block, do not optimize or we may fall into an endless - loop. We check this here instead of requiring that all dead blocks are removed - which or cf_opt do not guarantee yet. */ - return; - } - waitq_put(wenv->wq, n); - set_irn_link(n, wenv->wq); + if (!is_Block(n)) { + waitq_put(wq, n); + set_irn_link(n, wq); } } /* wq_walker */ /** * The walker for the reassociation. */ -static void do_reassociation(walker_t *wenv) +static void do_reassociation(waitq *const wq) { int i, res, changed; - ir_node *n, *blk; + ir_node *n; - - while (! waitq_empty(wenv->wq)) { - n = waitq_get(wenv->wq); + while (! waitq_empty(wq)) { + n = (ir_node*)waitq_get(wq); set_irn_link(n, NULL); - blk = get_nodes_block(n); - if (is_Block_dead(blk) || get_Block_dom_depth(blk) < 0) { - /* We are in a dead block, do not optimize or we may fall into an endless - loop. We check this here instead of requiring that all dead blocks are removed - which or cf_opt do not guarantee yet. */ - continue; - } - - hook_reassociate(1); /* reassociation must run until a fixpoint is reached. */ changed = 0; do { - ir_op *op = get_irn_op(n); - ir_mode *mode = get_irn_mode(n); + ir_op *op = get_irn_op(n); + ir_mode *mode = get_irn_mode(n); res = 0; /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */ - if (mode_is_float(mode) && get_irg_fp_model(current_ir_graph) & fp_strict_algebraic) + if (mode_is_float(mode) && get_irg_fp_model(get_irn_irg(n)) & fp_strict_algebraic) break; if (op->ops.reassociate) { @@ -462,15 +495,13 @@ static void do_reassociation(walker_t *wenv) } while (res == 1); hook_reassociate(0); - wenv->changes |= changed; - if (changed) { for (i = get_irn_arity(n) - 1; i >= 0; --i) { ir_node *pred = get_irn_n(n, i); - if (get_irn_link(pred) != wenv->wq) { - waitq_put(wenv->wq, pred); - set_irn_link(pred, wenv->wq); + if (get_irn_link(pred) != wq) { + waitq_put(wq, pred); + set_irn_link(pred, wq); } } } @@ -479,12 +510,13 @@ static void do_reassociation(walker_t *wenv) /** * Returns the earliest were a,b are available. - * Note that we know that we know that a, b both dominate + * Note that we know that a, b both dominate * the block of the previous operation, so one must dominate the other. * * If the earliest block is the start block, return curr_blk instead */ -static ir_node *earliest_block(ir_node *a, ir_node *b, ir_node *curr_blk) { +static ir_node *earliest_block(ir_node *a, ir_node *b, ir_node *curr_blk) +{ ir_node *blk_a = get_nodes_block(a); ir_node *blk_b = get_nodes_block(b); ir_node *res; @@ -494,7 +526,7 @@ static ir_node *earliest_block(ir_node *a, ir_node *b, ir_node *curr_blk) { res = blk_b; else res = blk_a; - if (res == get_irg_start_block(current_ir_graph)) + if (res == get_irg_start_block(get_irn_irg(curr_blk))) return curr_blk; return res; } /* earliest_block */ @@ -508,21 +540,23 @@ static ir_node *earliest_block(ir_node *a, ir_node *b, ir_node *curr_blk) { * Handling SymConsts as const might be not a good idea for all * architectures ... */ -static int is_constant_expr(ir_node *irn) { - ir_op *op; - +static int is_constant_expr(ir_node *irn) +{ switch (get_irn_opcode(irn)) { case iro_Const: case iro_SymConst: return 1; - case iro_Add: - op = get_irn_op(get_Add_left(irn)); - if (op != op_Const && op != op_SymConst) + + case iro_Add: { + ir_node *const l = get_Add_left(irn); + if (!is_Const(l) && !is_SymConst(l)) return 0; - op = get_irn_op(get_Add_right(irn)); - if (op != op_Const && op != op_SymConst) + ir_node *const r = get_Add_right(irn); + if (!is_Const(r) && !is_SymConst(r)) return 0; return 1; + } + default: return 0; } @@ -531,7 +565,8 @@ static int is_constant_expr(ir_node *irn) { /** * Apply distributive Law for Mul and Add/Sub */ -static int reverse_rule_distributive(ir_node **node) { +static int reverse_rule_distributive(ir_node **node) +{ ir_node *n = *node; ir_node *left = get_binop_left(n); ir_node *right = get_binop_right(n); @@ -549,7 +584,7 @@ static int reverse_rule_distributive(ir_node **node) { x = get_Shl_right(left); if (x == get_Shl_right(right)) { - /* (a << x) +/- (b << x) */ + /* (a << x) +/- (b << x) ==> (a +/- b) << x */ a = get_Shl_left(left); b = get_Shl_left(right); goto transform; @@ -558,12 +593,12 @@ static int reverse_rule_distributive(ir_node **node) { x = get_Mul_left(left); if (x == get_Mul_left(right)) { - /* (x * a) +/- (x * b) */ + /* (x * a) +/- (x * b) ==> (a +/- b) * x */ a = get_Mul_right(left); b = get_Mul_right(right); goto transform; } else if (x == get_Mul_right(right)) { - /* (x * a) +/- (b * x) */ + /* (x * a) +/- (b * x) ==> (a +/- b) * x */ a = get_Mul_right(left); b = get_Mul_left(right); goto transform; @@ -572,12 +607,12 @@ static int reverse_rule_distributive(ir_node **node) { x = get_Mul_right(left); if (x == get_Mul_right(right)) { - /* (a * x) +/- (b * x) */ + /* (a * x) +/- (b * x) ==> (a +/- b) * x */ a = get_Mul_left(left); b = get_Mul_left(right); goto transform; } else if (x == get_Mul_left(right)) { - /* (a * x) +/- (x * b) */ + /* (a * x) +/- (x * b) ==> (a +/- b) * x */ a = get_Mul_left(left); b = get_Mul_right(right); goto transform; @@ -594,16 +629,16 @@ transform: mode = get_irn_mode(n); if (is_Add(n)) - irn = new_rd_Add(dbg, current_ir_graph, blk, a, b, mode); + irn = new_rd_Add(dbg, blk, a, b, mode); else - irn = new_rd_Sub(dbg, current_ir_graph, blk, a, b, mode); + irn = new_rd_Sub(dbg, blk, a, b, mode); blk = earliest_block(irn, x, curr_blk); if (op == op_Mul) - irn = new_rd_Mul(dbg, current_ir_graph, blk, irn, x, mode); + irn = new_rd_Mul(dbg, blk, irn, x, mode); else - irn = new_rd_Shl(dbg, current_ir_graph, blk, irn, x, mode); + irn = new_rd_Shl(dbg, blk, irn, x, mode); exchange(n, irn); *node = irn; @@ -613,12 +648,14 @@ transform: /** * Move Constants towards the root. */ -static int move_consts_up(ir_node **node) { +static int move_consts_up(ir_node **node) +{ ir_node *n = *node; ir_op *op; ir_node *l, *r, *a, *b, *c, *blk, *irn, *in[2]; ir_mode *mode, *ma, *mb; dbg_info *dbg; + ir_graph *irg; l = get_binop_left(n); r = get_binop_right(n); @@ -630,33 +667,40 @@ static int move_consts_up(ir_node **node) { dbg = get_irn_dbg_info(n); op = get_irn_op(n); if (get_irn_op(l) == op) { + /* (a .op. b) .op. r */ a = get_binop_left(l); b = get_binop_right(l); if (is_constant_expr(a)) { + /* (C .op. b) .op. r ==> (r .op. b) .op. C */ c = a; a = r; blk = get_nodes_block(l); dbg = dbg == get_irn_dbg_info(l) ? dbg : NULL; goto transform; } else if (is_constant_expr(b)) { + /* (a .op. C) .op. r ==> (a .op. r) .op. C */ c = b; b = r; blk = get_nodes_block(l); dbg = dbg == get_irn_dbg_info(l) ? dbg : NULL; goto transform; } - } else if (get_irn_op(r) == op) { + } + if (get_irn_op(r) == op) { + /* l .op. (a .op. b) */ a = get_binop_left(r); b = get_binop_right(r); if (is_constant_expr(a)) { + /* l .op. (C .op. b) ==> (l .op. b) .op. C */ c = a; a = l; blk = get_nodes_block(r); dbg = dbg == get_irn_dbg_info(r) ? dbg : NULL; goto transform; } else if (is_constant_expr(b)) { + /* l .op. (a .op. C) ==> (a .op. l) .op. C */ c = b; b = l; blk = get_nodes_block(r); @@ -678,7 +722,7 @@ transform: if (ma != mb && mode_is_int(ma) && mode_is_int(mb)) return 0; - /* check if a+b can be calculted in the same block is the old instruction */ + /* check if (a .op. b) can be calculated in the same block is the old instruction */ if (! block_dominates(get_nodes_block(a), blk)) return 0; if (! block_dominates(get_nodes_block(b), blk)) @@ -688,15 +732,17 @@ transform: in[1] = b; mode = get_mode_from_ops(a, b); - in[0] = optimize_node(new_ir_node(dbg, current_ir_graph, blk, op, mode, 2, in)); + irg = get_irn_irg(blk); + in[0] = irn = optimize_node(new_ir_node(dbg, irg, blk, op, mode, 2, in)); - if (op == op_Add || op == op_Sub) { + /* beware: optimize_node might have changed the opcode, check again */ + if (is_Add(irn) || is_Sub(irn)) { reverse_rule_distributive(&in[0]); } in[1] = c; mode = get_mode_from_ops(in[0], in[1]); - irn = optimize_node(new_ir_node(dbg, current_ir_graph, blk, op, mode, 2, in)); + irn = optimize_node(new_ir_node(dbg, irg, blk, op, mode, 2, in)); exchange(n, irn); *node = irn; @@ -706,13 +752,16 @@ transform: /** * Apply the rules in reverse order, removing code that was not collapsed */ -static void reverse_rules(ir_node *node, void *env) { - walker_t *wenv = env; - ir_mode *mode = get_irn_mode(node); +static void reverse_rules(ir_node *node, void *env) +{ + (void)env; + + ir_graph *irg = get_irn_irg(node); + ir_mode *mode = get_irn_mode(node); int res; /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */ - if (mode_is_float(mode) && get_irg_fp_model(current_ir_graph) & fp_strict_algebraic) + if (mode_is_float(mode) && get_irg_fp_model(irg) & fp_strict_algebraic) return; do { @@ -720,10 +769,11 @@ static void reverse_rules(ir_node *node, void *env) { res = 0; if (is_op_commutative(op)) { - wenv->changes |= res = move_consts_up(&node); + res = move_consts_up(&node); } - if (op == op_Add || op == op_Sub) { - wenv->changes |= res = reverse_rule_distributive(&node); + /* beware: move_consts_up might have changed the opcode, check again */ + if (is_Add(node) || is_Sub(node)) { + res = reverse_rule_distributive(&node); } } while (res); } @@ -733,70 +783,53 @@ static void reverse_rules(ir_node *node, void *env) { */ void optimize_reassociation(ir_graph *irg) { - walker_t env; - irg_loopinfo_state state; - - assert(get_irg_phase_state(irg) != phase_building); assert(get_irg_pinned(irg) != op_pin_state_floats && "Reassociation needs pinned graph to work properly"); - /* reassociation needs constant folding */ - if (!get_opt_reassociation() || !get_opt_constant_folding()) - return; - - /* we use dominance to detect dead blocks */ - assure_doms(irg); - - /* - * Calculate loop info, so we could identify loop-invariant - * code and threat it like a constant. - * We only need control flow loops here but can handle generic - * INTRA info as well. - */ - state = get_irg_loopinfo_state(irg); - if ((state & loopinfo_inter) || - (state & (loopinfo_constructed | loopinfo_valid)) != (loopinfo_constructed | loopinfo_valid)) - construct_cf_backedges(irg); + assure_irg_properties(irg, + IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE + | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO); - env.changes = 0; - env.wq = new_waitq(); + waitq *const wq = new_waitq(); - /* now we have collected enough information, optimize */ - irg_walk_graph(irg, NULL, wq_walker, &env); - do_reassociation(&env); + /* disable some optimizations while reassoc is running to prevent endless loops */ + set_reassoc_running(1); + { + /* now we have collected enough information, optimize */ + irg_walk_graph(irg, NULL, wq_walker, wq); + do_reassociation(wq); - /* reverse those rules that do not result in collapsed constants */ - irg_walk_graph(irg, NULL, reverse_rules, &env); - - /* Handle graph state */ - if (env.changes) { - set_irg_outs_inconsistent(irg); - set_irg_loopinfo_inconsistent(irg); + /* reverse those rules that do not result in collapsed constants */ + irg_walk_graph(irg, NULL, reverse_rules, NULL); } + set_reassoc_running(0); + + del_waitq(wq); - del_waitq(env.wq); + confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_CONTROL_FLOW); } /* optimize_reassociation */ -/* Sets the default reassociation operation for an ir_op_ops. */ -ir_op_ops *firm_set_default_reassoc(ir_opcode code, ir_op_ops *ops) +/* create a pass for the reassociation */ +ir_graph_pass_t *optimize_reassociation_pass(const char *name) { -#define CASE(a) case iro_##a: ops->reassociate = reassoc_##a; break - - switch (code) { - CASE(Mul); - CASE(Add); - CASE(Sub); - CASE(And); - CASE(Or); - CASE(Eor); - CASE(Shl); - default: - /* leave NULL */; - } + return def_graph_pass(name ? name : "reassoc", optimize_reassociation); +} /* optimize_reassociation_pass */ - return ops; -#undef CASE -} /* firm_set_default_reassoc */ +static void register_node_reassoc_func(ir_op *op, reassociate_func func) +{ + op->ops.reassociate = func; +} + +void ir_register_reassoc_node_ops(void) +{ + register_node_reassoc_func(op_Mul, reassoc_Mul); + register_node_reassoc_func(op_Add, reassoc_Add); + register_node_reassoc_func(op_Sub, reassoc_Sub); + register_node_reassoc_func(op_And, reassoc_And); + register_node_reassoc_func(op_Or, reassoc_Or); + register_node_reassoc_func(op_Eor, reassoc_Eor); + register_node_reassoc_func(op_Shl, reassoc_Shl); +} /* initialize the reassociation by adding operations to some opcodes */ void firm_init_reassociation(void)