X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Freassoc.c;h=b7cfe70133f5ff43bb3b55ce015ca5f4afc0d007;hb=f1a1a6092d9e4ebd9e22dd1c57d76ef8aeda74fc;hp=8b73777891bd4a1b708eb3b0c6c79ca816da39c2;hpb=63fd188f718ca654512dda7c24a24d417e3297a6;p=libfirm diff --git a/ir/opt/reassoc.c b/ir/opt/reassoc.c index 8b7377789..b7cfe7013 100644 --- a/ir/opt/reassoc.c +++ b/ir/opt/reassoc.c @@ -10,7 +10,7 @@ */ #ifdef HAVE_CONFIG_H -# include +# include "config.h" #endif # include "irnode_t.h" @@ -24,7 +24,11 @@ # include "irflag_t.h" # include "irgwalk.h" # include "reassoc_t.h" -# include "firmstat.h" +# include "irhooks.h" +# include "irloop.h" +# include "debug.h" + +static firm_dbg_module_t *dbg; typedef struct _walker_t { int changes; /* set, if a reassociation take place */ @@ -32,16 +36,19 @@ typedef struct _walker_t { typedef enum { NO_CONSTANT = 0, /**< node is not constant */ - REAL_CONSTANT = 1, /**< node is a constnt that is suitable for constant folding */ - CONST_EXPR = 4 /**< node is not constnt expression in the current context, + REAL_CONSTANT = 1, /**< node is a Const that is suitable for constant folding */ + CONST_EXPR = 4 /**< node is a constant expression in the current context, use 4 here to simplify implementation of get_comm_Binop_ops() */ } const_class_t; /** - * returns wheater a node is constant, ie is a constant or + * returns whether a node is constant ie is a constant or * is loop invariant + * + * @param n the node to be checked for constant + * @param block a block that might be in a loop */ -static const_class_t get_const_class(ir_node *n) +static const_class_t get_const_class(ir_node *n, ir_node *block) { ir_op *op = get_irn_op(n); @@ -50,6 +57,13 @@ static const_class_t get_const_class(ir_node *n) if (op == op_SymConst) return CONST_EXPR; + /* + * Beware: Bad nodes are always loop-invariant, but + * cannot handled in later code, so filter them here + */ + if (! is_Bad(n) && is_loop_invariant(n, block)) + return CONST_EXPR; + return NO_CONSTANT; } @@ -57,15 +71,16 @@ static const_class_t get_const_class(ir_node *n) * returns the operands of a commutative bin-op, if one operand is * a constant in the current context, it is returned as the second one. * - * Beware: Real constrants must be returned with higher priority than - * constnt expression, because they might be folded. + * Beware: Real constants must be returned with higher priority than + * constant expression, because they might be folded. */ static void get_comm_Binop_ops(ir_node *binop, ir_node **a, ir_node **c) { ir_node *op_a = get_binop_left(binop); ir_node *op_b = get_binop_right(binop); - int class_a = get_const_class(op_a); - int class_b = get_const_class(op_b); + ir_node *block = get_nodes_block(binop); + int class_a = get_const_class(op_a, block); + int class_b = get_const_class(op_b, block); assert(is_op_commutative(get_irn_op(binop))); @@ -87,8 +102,10 @@ static void get_comm_Binop_ops(ir_node *binop, ir_node **a, ir_node **c) /** * reassociate a Sub: x - c = (-c) + x */ -static int reassoc_Sub(ir_node *n) +static int reassoc_Sub(ir_node **in) { + ir_node *n = *in; + ir_node *block = get_nodes_block(n); ir_node *right = get_Sub_right(n); /* FIXME: Do not apply this rule for unsigned Sub's because our code @@ -103,18 +120,19 @@ static int reassoc_Sub(ir_node *n) * As there is NO real Minus in Firm it makes no sense to do this * for non-real constants yet. * */ - if (get_const_class(right) == REAL_CONSTANT) { + if (get_const_class(right, block) == REAL_CONSTANT) { ir_node *left = get_Sub_left(n); ir_node *block = get_nodes_block(n); ir_mode *mode = get_irn_mode(n); - dbg_info *dbg = get_irn_dbg_info(n); + dbg_info *dbi = get_irn_dbg_info(n); ir_node *irn, *c; - switch (get_const_class(left)) { + switch (get_const_class(left, block)) { case REAL_CONSTANT: irn = optimize_in_place(n); if (irn != n) { exchange(n, irn); + *in = irn; return 1; } return 0; @@ -126,22 +144,22 @@ static int reassoc_Sub(ir_node *n) } c = new_r_Const(current_ir_graph, block, mode, get_mode_null(mode)); - irn = new_rd_Sub(dbg, current_ir_graph, block, c, right, mode); + irn = new_rd_Sub(dbi, current_ir_graph, block, c, right, mode); - irn = new_rd_Add(dbg, current_ir_graph, block, left, irn, get_irn_mode(n)); + irn = new_rd_Add(dbi, current_ir_graph, block, left, irn, get_irn_mode(n)); - printf("Applied: %s - %s => %s + (-%s)\n", - get_irn_opname(get_Sub_left(n)), get_irn_opname(c), - get_irn_opname(get_Sub_left(n)), get_irn_opname(c) ); + DBG((dbg, LEVEL_5, "Applied: %n - %n => %n + (-%n)\n", + get_Sub_left(n), c, get_Sub_left(n), c)); exchange(n, irn); + *in = irn; return 1; } return 0; } -/** Retrieve a mode form the operands. We need this, because +/** Retrieve a mode from the operands. We need this, because * Add and Sub are allowed to operate on (P, Is) */ static ir_mode *get_mode_from_ops(ir_node *op1, ir_node *op2) @@ -165,11 +183,12 @@ static ir_mode *get_mode_from_ops(ir_node *op1, ir_node *op2) * reassociate a commutative Binop * * BEWARE: this rule leads to a potential loop, if - * all two operands are are constant expressions and the third is a + * two operands are are constant expressions and the third is a * constant, so avoid this situation. */ -static int reassoc_commutative(ir_node *n) +static int reassoc_commutative(ir_node **node) { + ir_node *n = *node; ir_op *op = get_irn_op(n); ir_node *block = get_nodes_block(n); ir_node *t1, *c1; @@ -182,16 +201,20 @@ static int reassoc_commutative(ir_node *n) get_comm_Binop_ops(t1, &t2, &c2); - c_c1 = get_const_class(c1); - c_c2 = get_const_class(c2); - c_t2 = get_const_class(t2); + /* do not optimize Bad nodes, will fail later */ + if (is_Bad(t2)) + return 0; + + c_c1 = get_const_class(c1, block); + c_c2 = get_const_class(c2, block); + c_t2 = get_const_class(t2, block); if ( ((c_c1 > NO_CONSTANT) & (c_t2 > NO_CONSTANT)) && ((((c_c1 ^ c_c2 ^ c_t2) & CONST_EXPR) == 0) || ((c_c1 & c_c2 & c_t2) == CONST_EXPR)) ) { - /* all three are constant and either all are constant expressions or two of them are: - * then, applying this rule would lead into a cycle + /* All three are constant and either all are constant expressions or two of them are: + * then applying this rule would lead into a cycle * - * Note that if t2 is a onstant so is c2, so we save one test. + * Note that if t2 is a constant so is c2 hence we save one test. */ return 0; } @@ -201,7 +224,27 @@ static int reassoc_commutative(ir_node *n) * convert c1 .OP. (c2 .OP. x) => (c1 .OP. c2) .OP. x */ ir_node *irn, *in[2]; - ir_mode *mode; + ir_mode *mode, *mode_c1 = get_irn_mode(c1), *mode_c2 = get_irn_mode(c2); + + /* It might happen, that c1 and c2 have different modes, for instance Is and Iu. + * Handle this here. + */ + if (mode_c1 != mode_c2) { + if (mode_is_int(mode_c1) && mode_is_int(mode_c2)) { + /* get the bigger one */ + if (get_mode_size_bits(mode_c1) > get_mode_size_bits(mode_c2)) + c2 = new_r_Conv(current_ir_graph, block, c2, mode_c1); + else if (get_mode_size_bits(mode_c1) < get_mode_size_bits(mode_c2)) + c1 = new_r_Conv(current_ir_graph, block, c1, mode_c2); + else { + /* Try to cast the real const */ + if (c_c1 == REAL_CONSTANT) + c1 = new_r_Conv(current_ir_graph, block, c1, mode_c2); + else + c2 = new_r_Conv(current_ir_graph, block, c2, mode_c1); + } + } + } in[0] = c1; in[1] = c2; @@ -213,29 +256,39 @@ static int reassoc_commutative(ir_node *n) mode = get_mode_from_ops(in[0], in[1]); irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in)); - printf("Applied: %s .%s. (%s .%s. %s) => (%s .%s. %s) .%s. %s\n", - get_irn_opname(c1), get_irn_opname(n), get_irn_opname(c2), get_irn_opname(n), get_irn_opname(t2), - get_irn_opname(c1), get_irn_opname(n), get_irn_opname(c2), get_irn_opname(n), get_irn_opname(t2)); - - exchange(n, irn); - - return 1; + DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => (%n .%s. %n) .%s. %n\n", + c1, get_irn_opname(n), c2, get_irn_opname(n), + t2, c1, get_irn_opname(n), c2, get_irn_opname(n), t2)); + /* + * In some rare cases it can really happen that we get the same node back. + * This might be happen in dead loops, were the Phi nodes are already gone away. + * So check this. + */ + if (n != irn) { + exchange(n, irn); + *node = irn; + return 1; + } } } return 0; } #define reassoc_Add reassoc_commutative +#define reassoc_And reassoc_commutative +#define reassoc_Or reassoc_commutative +#define reassoc_Eor reassoc_commutative /** - * reassociate using distibutive law for Mul and Add/Sub + * reassociate using distributive law for Mul and Add/Sub */ -static int reassoc_Mul(ir_node *n) +static int reassoc_Mul(ir_node **node) { + ir_node *n = *node; ir_node *add_sub, *c; ir_op *op; - if (reassoc_commutative(n)) + if (reassoc_commutative(&n)) return 1; get_comm_Binop_ops(n, &add_sub, &c); @@ -256,13 +309,10 @@ static int reassoc_Mul(ir_node *n) mode = get_mode_from_ops(in[0], in[1]); irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in)); - printf("Applied: (%s .%s. %s) %s %s => (%s %s %s) .%s. (%s %s %s)\n", - get_irn_opname(t1), get_op_name(op), get_irn_opname(t2), get_irn_opname(n), get_irn_opname(c), - get_irn_opname(t1), get_irn_opname(n), get_irn_opname(c), - get_op_name(op), - get_irn_opname(t2), get_irn_opname(n), get_irn_opname(c)); - + DBG((dbg, LEVEL_5, "Applied: (%n .%s. %n) %n %n => (%n %n %n) .%s. (%n %n %n)\n", + t1, get_op_name(op), t2, n, c, t1, n, c, get_op_name(op), t2, n, c)); exchange(n, irn); + *node = irn; return 1; } @@ -270,14 +320,16 @@ static int reassoc_Mul(ir_node *n) } /** - * The walker for the reassociation + * The walker for the reassociation. */ static void do_reassociation(ir_node *n, void *env) { walker_t *wenv = env; int res; - /* reassociation must run until fixpoint */ + hook_reassociate(1); + + /* reassociation must run until a fixpoint is reached. */ do { ir_op *op = get_irn_op(n); ir_mode *mode = get_irn_mode(n); @@ -286,15 +338,13 @@ static void do_reassociation(ir_node *n, void *env) /* reassociation works only for integer or reference modes */ if (op->reassociate && (mode_is_int(mode) || mode_is_reference(mode))) { - res = op->reassociate(n); - if (res) { - wenv->changes = 1; + res = op->reassociate(&n); - /* we need a skip here, or we will see an Id in the next iteration */ - n = skip_Id(n); - } + wenv->changes |= res; } } while (res == 1); + + hook_reassociate(0); } /* @@ -303,33 +353,51 @@ static void do_reassociation(ir_node *n, void *env) void optimize_reassociation(ir_graph *irg) { walker_t env; + irg_loopinfo_state state; assert(get_irg_phase_state(irg) != phase_building); + assert(get_irg_pinned(irg) != op_pin_state_floats && + "Reassociation needs pinned graph to work properly"); /* reassociation needs constant folding */ if (!get_opt_reassociation() || !get_opt_constant_folding()) return; - env.changes = 0; + /* + * Calculate loop info, so we could identify loop-invariant + * code and threat it like a constant. + * We only need control flow loops here but can handle generic + * INTRA info as well. + */ + state = get_irg_loopinfo_state(irg); + if ((state & loopinfo_inter) || + (state & (loopinfo_constructed | loopinfo_valid)) != (loopinfo_constructed | loopinfo_valid)) + construct_cf_backedges(irg); - irg_walk_graph(irg, NULL, do_reassociation, &env); + env.changes = 0; /* now we have collected enough information, optimize */ irg_walk_graph(irg, NULL, do_reassociation, &env); /* Handle graph state */ if (env.changes) { - if (get_irg_outs_state(current_ir_graph) == outs_consistent) - set_irg_outs_inconsistent(current_ir_graph); + if (get_irg_outs_state(irg) == outs_consistent) + set_irg_outs_inconsistent(irg); + set_irg_loopinfo_inconsistent(irg); } } -/* initialise the reassociation by adding operations to some opcodes */ +/* initialize the reassociation by adding operations to some opcodes */ void firm_init_reassociation(void) { #define INIT(a) op_##a->reassociate = reassoc_##a; INIT(Mul); INIT(Add); INIT(Sub); -#undef CASE + INIT(And); + INIT(Or); + INIT(Eor); +#undef INIT + + dbg = firm_dbg_register("firm.opt.reassoc"); }