X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Freassoc.c;h=28b4526266a07fc8c7863d730059c6d51de3ccf0;hb=77c348bef494a5d4e1fbf754957884de5c0bfe99;hp=1890716b72ad9355c619bb214d3057f8ab241c8a;hpb=f210fb5c29c0d72050c8150db3b814b624389faa;p=libfirm diff --git a/ir/opt/reassoc.c b/ir/opt/reassoc.c index 1890716b7..28b452626 100644 --- a/ir/opt/reassoc.c +++ b/ir/opt/reassoc.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -36,12 +36,15 @@ #include "iropt_dbg.h" #include "irflag_t.h" #include "irgwalk.h" +#include "irouts.h" #include "reassoc_t.h" #include "irhooks.h" #include "irloop.h" #include "pdeq.h" #include "debug.h" +//#define NEW_REASSOC + DEBUG_ONLY(static firm_dbg_module_t *dbg;) typedef struct _walker_t { @@ -63,16 +66,13 @@ typedef enum { * @param n the node to be checked for constant * @param block a block that might be in a loop */ -static const_class_t get_const_class(ir_node *n, ir_node *block) +static const_class_t get_const_class(const ir_node *n, const ir_node *block) { - ir_op *op = get_irn_op(n); - - if (op == op_Const) + if (is_Const(n)) return REAL_CONSTANT; - /* although SymConst's are of course real constant, we cannot - fold them, so handle them like region constants */ - if (op == op_SymConst) + /* constant nodes which can't be folded are region constants */ + if (is_irn_constlike(n)) return REGION_CONST; /* @@ -168,12 +168,13 @@ static int reassoc_Sub(ir_node **in) /* already constant, nothing to do */ return 0; } + mode = get_irn_mode(n); dbi = get_irn_dbg_info(n); /* Beware of SubP(P, Is) */ irn = new_rd_Minus(dbi, current_ir_graph, block, right, rmode); - irn = new_rd_Add(dbi, current_ir_graph, block, left, irn, get_irn_mode(n)); + irn = new_rd_Add(dbi, current_ir_graph, block, left, irn, mode); DBG((dbg, LEVEL_5, "Applied: %n - %n => %n + (-%n)\n", get_Sub_left(n), right, get_Sub_left(n), right)); @@ -209,6 +210,8 @@ static ir_mode *get_mode_from_ops(ir_node *op1, ir_node *op2) return m1; } /* get_mode_from_ops */ +#ifndef NEW_REASSOC + /** * reassociate a commutative Binop * @@ -241,7 +244,8 @@ static int reassoc_commutative(ir_node **node) if ( ((c_c1 > NO_CONSTANT) & (c_t2 > NO_CONSTANT)) && ((((c_c1 ^ c_c2 ^ c_t2) & REGION_CONST) == 0) || ((c_c1 & c_c2 & c_t2) == REGION_CONST)) ) { - /* All three are constant and either all are constant expressions or two of them are: + /* All three are constant and either all are constant expressions + * or two of them are: * then applying this rule would lead into a cycle * * Note that if t2 is a constant so is c2 hence we save one test. @@ -256,7 +260,8 @@ static int reassoc_commutative(ir_node **node) ir_node *irn, *in[2]; ir_mode *mode, *mode_c1 = get_irn_mode(c1), *mode_c2 = get_irn_mode(c2); - /* It might happen, that c1 and c2 have different modes, for instance Is and Iu. + /* It might happen, that c1 and c2 have different modes, for + * instance Is and Iu. * Handle this here. */ if (mode_c1 != mode_c2) { @@ -290,9 +295,9 @@ static int reassoc_commutative(ir_node **node) c1, get_irn_opname(n), c2, get_irn_opname(n), t2, t2, get_irn_opname(n), c1, get_irn_opname(n), c2)); /* - * In some rare cases it can really happen that we get the same node back. - * This might be happen in dead loops, were the Phi nodes are already gone away. - * So check this. + * In some rare cases it can really happen that we get the same + * node back. This might be happen in dead loops, were the Phi + * nodes are already gone away. So check this. */ if (n != irn) { exchange(n, irn); @@ -304,6 +309,135 @@ static int reassoc_commutative(ir_node **node) return 0; } /* reassoc_commutative */ +#else + +static ir_op *commutative_op; +static ir_node *commutative_block; +static struct obstack commutative_args; + +static void collect_args(ir_node *node) +{ + ir_node *left = get_binop_left(node); + ir_node *right = get_binop_right(node); + + if (get_irn_op(left) == commutative_op + && (!get_irn_outs_computed(left) || get_irn_n_outs(left) == 1)) { + collect_args(left); + } else { + obstack_ptr_grow(&commutative_args, left); + } + + if (get_irn_op(right) == commutative_op + && (!get_irn_outs_computed(right) || get_irn_n_outs(right) == 1)) { + collect_args(right); + } else { + obstack_ptr_grow(&commutative_args, right); + } + +#ifndef NDEBUG + { + ir_mode *mode = get_irn_mode(node); + if (is_Add(node) && mode_is_reference(mode)) { + assert(get_irn_mode(left) == mode || get_irn_mode(right) == mode); + } else { + assert(get_irn_mode(left) == mode); + assert(get_irn_mode(right) == mode); + } + } +#endif +} + +static int compare_nodes(const ir_node *node1, const ir_node *node2) +{ + const_class_t class1 = get_const_class(node1, commutative_block); + const_class_t class2 = get_const_class(node2, commutative_block); + + if (class1 == class2) + return 0; + // return get_irn_idx(node1) - get_irn_idx(node2); + + if (class1 < class2) + return -1; + + assert(class1 > class2); + return 1; +} + +static int compare_node_ptr(const void *e1, const void *e2) +{ + const ir_node *node1 = *((const ir_node *const*) e1); + const ir_node *node2 = *((const ir_node *const*) e2); + return compare_nodes(node1, node2); +} + +static int reassoc_commutative(ir_node **n) +{ + int i; + int n_args; + ir_node *last; + ir_node **args; + ir_mode *mode; + ir_node *node = *n; + + commutative_op = get_irn_op(node); + commutative_block = get_nodes_block(node); + + /* collect all nodes with same op type */ + collect_args(node); + + n_args = obstack_object_size(&commutative_args) / sizeof(ir_node*); + args = obstack_finish(&commutative_args); + + /* shortcut: in most cases there's nothing to do */ + if (n_args == 2 && compare_nodes(args[0], args[1]) <= 0) { + obstack_free(&commutative_args, args); + return 0; + } + + /* sort the arguments */ + qsort(args, n_args, sizeof(ir_node*), compare_node_ptr); + + /* build new tree */ + last = args[n_args-1]; + mode = get_irn_mode(last); + for (i = n_args-2; i >= 0; --i) { + ir_mode *mode_right; + ir_node *new_node; + ir_node *in[2]; + + in[0] = last; + in[1] = args[i]; + + /* AddP violates the assumption that all modes in args are equal... + * we need some hacks to cope with this */ + mode_right = get_irn_mode(in[1]); + if (mode_is_reference(mode_right)) { + assert(is_Add(node) && mode_is_reference(get_irn_mode(node))); + mode = get_irn_mode(in[1]); + } + if (mode_right != mode) { + assert(is_Add(node) && mode_is_reference(get_irn_mode(node))); + in[1] = new_r_Conv(current_ir_graph, commutative_block,in[1], mode); + } + + /* TODO: produce useful debug info! */ + new_node = new_ir_node(NULL, current_ir_graph, commutative_block, + commutative_op, mode, 2, in); + new_node = optimize_node(new_node); + last = new_node; + } + + /* CSE often returns the old node again, only exchange if needed */ + if (last != node) { + exchange(node, last); + *n = last; + return 1; + } + return 0; +} + +#endif + #define reassoc_Add reassoc_commutative #define reassoc_And reassoc_commutative #define reassoc_Or reassoc_commutative @@ -426,7 +560,6 @@ static void do_reassociation(walker_t *wenv) int i, res, changed; ir_node *n, *blk; - while (! waitq_empty(wenv->wq)) { n = waitq_get(wenv->wq); set_irn_link(n, NULL); @@ -549,7 +682,7 @@ static int reverse_rule_distributive(ir_node **node) { x = get_Shl_right(left); if (x == get_Shl_right(right)) { - /* (a << x) +/- (b << x) */ + /* (a << x) +/- (b << x) ==> (a +/- b) << x */ a = get_Shl_left(left); b = get_Shl_left(right); goto transform; @@ -558,12 +691,12 @@ static int reverse_rule_distributive(ir_node **node) { x = get_Mul_left(left); if (x == get_Mul_left(right)) { - /* (x * a) +/- (x * b) */ + /* (x * a) +/- (x * b) ==> (a +/- b) * x */ a = get_Mul_right(left); b = get_Mul_right(right); goto transform; } else if (x == get_Mul_right(right)) { - /* (x * a) +/- (b * x) */ + /* (x * a) +/- (b * x) ==> (a +/- b) * x */ a = get_Mul_right(left); b = get_Mul_left(right); goto transform; @@ -572,12 +705,12 @@ static int reverse_rule_distributive(ir_node **node) { x = get_Mul_right(left); if (x == get_Mul_right(right)) { - /* (a * x) +/- (b * x) */ + /* (a * x) +/- (b * x) ==> (a +/- b) * x */ a = get_Mul_left(left); b = get_Mul_left(right); goto transform; } else if (x == get_Mul_left(right)) { - /* (a * x) +/- (x * b) */ + /* (a * x) +/- (x * b) ==> (a +/- b) * x */ a = get_Mul_left(left); b = get_Mul_right(right); goto transform; @@ -630,16 +763,19 @@ static int move_consts_up(ir_node **node) { dbg = get_irn_dbg_info(n); op = get_irn_op(n); if (get_irn_op(l) == op) { + /* (a .op. b) .op. r */ a = get_binop_left(l); b = get_binop_right(l); if (is_constant_expr(a)) { + /* (C .op. b) .op. r ==> (r .op. b) .op. C */ c = a; a = r; blk = get_nodes_block(l); dbg = dbg == get_irn_dbg_info(l) ? dbg : NULL; goto transform; } else if (is_constant_expr(b)) { + /* (a .op. C) .op. r ==> (a .op. r) .op. C */ c = b; b = r; blk = get_nodes_block(l); @@ -647,16 +783,19 @@ static int move_consts_up(ir_node **node) { goto transform; } } else if (get_irn_op(r) == op) { + /* l .op. (a .op. b) */ a = get_binop_left(r); b = get_binop_right(r); if (is_constant_expr(a)) { + /* l .op. (C .op. b) ==> (l .op. b) .op. C */ c = a; a = l; blk = get_nodes_block(r); dbg = dbg == get_irn_dbg_info(r) ? dbg : NULL; goto transform; } else if (is_constant_expr(b)) { + /* l .op. (a .op. C) ==> (a .op. l) .op. C */ c = b; b = l; blk = get_nodes_block(r); @@ -678,7 +817,7 @@ transform: if (ma != mb && mode_is_int(ma) && mode_is_int(mb)) return 0; - /* check if a+b can be calculated in the same block is the old instruction */ + /* check if (a .op. b) can be calculated in the same block is the old instruction */ if (! block_dominates(get_nodes_block(a), blk)) return 0; if (! block_dominates(get_nodes_block(b), blk)) @@ -688,9 +827,10 @@ transform: in[1] = b; mode = get_mode_from_ops(a, b); - in[0] = optimize_node(new_ir_node(dbg, current_ir_graph, blk, op, mode, 2, in)); + in[0] = irn = optimize_node(new_ir_node(dbg, current_ir_graph, blk, op, mode, 2, in)); - if (op == op_Add || op == op_Sub) { + /* beware: optimize_node might have changed the opcode, check again */ + if (is_Add(irn) || is_Sub(irn)) { reverse_rule_distributive(&in[0]); } in[1] = c; @@ -722,7 +862,8 @@ static void reverse_rules(ir_node *node, void *env) { if (is_op_commutative(op)) { wenv->changes |= res = move_consts_up(&node); } - if (op == op_Add || op == op_Sub) { + /* beware: move_consts_up might have changed the opcode, check again */ + if (is_Add(node) || is_Sub(node)) { wenv->changes |= res = reverse_rule_distributive(&node); } } while (res); @@ -735,18 +876,23 @@ void optimize_reassociation(ir_graph *irg) { walker_t env; irg_loopinfo_state state; + ir_graph *rem; assert(get_irg_phase_state(irg) != phase_building); assert(get_irg_pinned(irg) != op_pin_state_floats && "Reassociation needs pinned graph to work properly"); - /* reassociation needs constant folding */ - if (!get_opt_reassociation() || !get_opt_constant_folding()) - return; + rem = current_ir_graph; + current_ir_graph = irg; /* we use dominance to detect dead blocks */ assure_doms(irg); +#ifdef NEW_REASSOC + assure_irg_outs(irg); + obstack_init(&commutative_args); +#endif + /* * Calculate loop info, so we could identify loop-invariant * code and threat it like a constant. @@ -762,7 +908,7 @@ void optimize_reassociation(ir_graph *irg) env.wq = new_waitq(); /* disable some optimizations while reassoc is running to prevent endless loops */ - set_opt_reassoc_running(1); + set_reassoc_running(1); { /* now we have collected enough information, optimize */ irg_walk_graph(irg, NULL, wq_walker, &env); @@ -771,7 +917,7 @@ void optimize_reassociation(ir_graph *irg) /* reverse those rules that do not result in collapsed constants */ irg_walk_graph(irg, NULL, reverse_rules, &env); } - set_opt_reassoc_running(0); + set_reassoc_running(0); /* Handle graph state */ if (env.changes) { @@ -779,7 +925,12 @@ void optimize_reassociation(ir_graph *irg) set_irg_loopinfo_inconsistent(irg); } +#ifdef NEW_REASSOC + obstack_free(&commutative_args, NULL); +#endif + del_waitq(env.wq); + current_ir_graph = rem; } /* optimize_reassociation */ /* Sets the default reassociation operation for an ir_op_ops. */