X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Freassoc.c;h=8cfcd8c2ecfed349f15eb90771454c6c2b84fe34;hb=bbc6291b6a4ed4fe5bd8d1c2ddd57f4e6a0c8a74;hp=1890716b72ad9355c619bb214d3057f8ab241c8a;hpb=f210fb5c29c0d72050c8150db3b814b624389faa;p=libfirm diff --git a/ir/opt/reassoc.c b/ir/opt/reassoc.c index 1890716b7..8cfcd8c2e 100644 --- a/ir/opt/reassoc.c +++ b/ir/opt/reassoc.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -168,12 +168,13 @@ static int reassoc_Sub(ir_node **in) /* already constant, nothing to do */ return 0; } + mode = get_irn_mode(n); dbi = get_irn_dbg_info(n); /* Beware of SubP(P, Is) */ irn = new_rd_Minus(dbi, current_ir_graph, block, right, rmode); - irn = new_rd_Add(dbi, current_ir_graph, block, left, irn, get_irn_mode(n)); + irn = new_rd_Add(dbi, current_ir_graph, block, left, irn, mode); DBG((dbg, LEVEL_5, "Applied: %n - %n => %n + (-%n)\n", get_Sub_left(n), right, get_Sub_left(n), right)); @@ -549,7 +550,7 @@ static int reverse_rule_distributive(ir_node **node) { x = get_Shl_right(left); if (x == get_Shl_right(right)) { - /* (a << x) +/- (b << x) */ + /* (a << x) +/- (b << x) ==> (a +/- b) << x */ a = get_Shl_left(left); b = get_Shl_left(right); goto transform; @@ -558,12 +559,12 @@ static int reverse_rule_distributive(ir_node **node) { x = get_Mul_left(left); if (x == get_Mul_left(right)) { - /* (x * a) +/- (x * b) */ + /* (x * a) +/- (x * b) ==> (a +/- b) * x */ a = get_Mul_right(left); b = get_Mul_right(right); goto transform; } else if (x == get_Mul_right(right)) { - /* (x * a) +/- (b * x) */ + /* (x * a) +/- (b * x) ==> (a +/- b) * x */ a = get_Mul_right(left); b = get_Mul_left(right); goto transform; @@ -572,12 +573,12 @@ static int reverse_rule_distributive(ir_node **node) { x = get_Mul_right(left); if (x == get_Mul_right(right)) { - /* (a * x) +/- (b * x) */ + /* (a * x) +/- (b * x) ==> (a +/- b) * x */ a = get_Mul_left(left); b = get_Mul_left(right); goto transform; } else if (x == get_Mul_left(right)) { - /* (a * x) +/- (x * b) */ + /* (a * x) +/- (x * b) ==> (a +/- b) * x */ a = get_Mul_left(left); b = get_Mul_right(right); goto transform; @@ -630,16 +631,19 @@ static int move_consts_up(ir_node **node) { dbg = get_irn_dbg_info(n); op = get_irn_op(n); if (get_irn_op(l) == op) { + /* (a .op. b) .op. r */ a = get_binop_left(l); b = get_binop_right(l); if (is_constant_expr(a)) { + /* (C .op. b) .op. r ==> (r .op. b) .op. C */ c = a; a = r; blk = get_nodes_block(l); dbg = dbg == get_irn_dbg_info(l) ? dbg : NULL; goto transform; } else if (is_constant_expr(b)) { + /* (a .op. C) .op. r ==> (a .op. r) .op. C */ c = b; b = r; blk = get_nodes_block(l); @@ -647,16 +651,19 @@ static int move_consts_up(ir_node **node) { goto transform; } } else if (get_irn_op(r) == op) { + /* l .op. (a .op. b) */ a = get_binop_left(r); b = get_binop_right(r); if (is_constant_expr(a)) { + /* l .op. (C .op. b) ==> (l .op. b) .op. C */ c = a; a = l; blk = get_nodes_block(r); dbg = dbg == get_irn_dbg_info(r) ? dbg : NULL; goto transform; } else if (is_constant_expr(b)) { + /* l .op. (a .op. C) ==> (a .op. l) .op. C */ c = b; b = l; blk = get_nodes_block(r); @@ -678,7 +685,7 @@ transform: if (ma != mb && mode_is_int(ma) && mode_is_int(mb)) return 0; - /* check if a+b can be calculated in the same block is the old instruction */ + /* check if (a .op. b) can be calculated in the same block is the old instruction */ if (! block_dominates(get_nodes_block(a), blk)) return 0; if (! block_dominates(get_nodes_block(b), blk)) @@ -688,9 +695,10 @@ transform: in[1] = b; mode = get_mode_from_ops(a, b); - in[0] = optimize_node(new_ir_node(dbg, current_ir_graph, blk, op, mode, 2, in)); + in[0] = irn = optimize_node(new_ir_node(dbg, current_ir_graph, blk, op, mode, 2, in)); - if (op == op_Add || op == op_Sub) { + /* beware: optimize_node might have changed the opcode, check again */ + if (is_Add(irn) || is_Sub(irn)) { reverse_rule_distributive(&in[0]); } in[1] = c; @@ -722,7 +730,8 @@ static void reverse_rules(ir_node *node, void *env) { if (is_op_commutative(op)) { wenv->changes |= res = move_consts_up(&node); } - if (op == op_Add || op == op_Sub) { + /* beware: move_consts_up might have changed the opcode, check again */ + if (is_Add(node) || is_Sub(node)) { wenv->changes |= res = reverse_rule_distributive(&node); } } while (res); @@ -735,14 +744,14 @@ void optimize_reassociation(ir_graph *irg) { walker_t env; irg_loopinfo_state state; + ir_graph *rem; assert(get_irg_phase_state(irg) != phase_building); assert(get_irg_pinned(irg) != op_pin_state_floats && "Reassociation needs pinned graph to work properly"); - /* reassociation needs constant folding */ - if (!get_opt_reassociation() || !get_opt_constant_folding()) - return; + rem = current_ir_graph; + current_ir_graph = irg; /* we use dominance to detect dead blocks */ assure_doms(irg); @@ -762,7 +771,7 @@ void optimize_reassociation(ir_graph *irg) env.wq = new_waitq(); /* disable some optimizations while reassoc is running to prevent endless loops */ - set_opt_reassoc_running(1); + set_reassoc_running(1); { /* now we have collected enough information, optimize */ irg_walk_graph(irg, NULL, wq_walker, &env); @@ -771,7 +780,7 @@ void optimize_reassociation(ir_graph *irg) /* reverse those rules that do not result in collapsed constants */ irg_walk_graph(irg, NULL, reverse_rules, &env); } - set_opt_reassoc_running(0); + set_reassoc_running(0); /* Handle graph state */ if (env.changes) { @@ -780,6 +789,7 @@ void optimize_reassociation(ir_graph *irg) } del_waitq(env.wq); + current_ir_graph = rem; } /* optimize_reassociation */ /* Sets the default reassociation operation for an ir_op_ops. */