X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firopt.c;h=ce862ea90ff4aa276e3498e6c9b1e89dd8a8e5da;hb=8f355cb9b3d20c10f71d1b1e17cbf59a51ced83b;hp=428fe2d2db53536f81fbb7ec92a12cb37229ee9c;hpb=b71da3f004e5d59dbef2381ece98d67dd93cf57e;p=libfirm diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 428fe2d2d..ce862ea90 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved. * * This file is part of libFirm. * @@ -1577,34 +1577,27 @@ static ir_node *equivalent_node_CopyB(ir_node *n) { * Optimize Bounds(idx, idx, upper) into idx. */ static ir_node *equivalent_node_Bound(ir_node *n) { - ir_node *idx = get_Bound_index(n); - ir_node *lower = get_Bound_lower(n); + ir_node *idx = get_Bound_index(n); + ir_node *pred = skip_Proj(idx); int ret_tuple = 0; - /* By definition lower < upper, so if idx == lower --> - lower <= idx && idx < upper */ - if (idx == lower) { - /* Turn Bound into a tuple (mem, jmp, bad, idx) */ - ret_tuple = 1; - } else { - ir_node *pred = skip_Proj(idx); - - if (get_irn_op(pred) == op_Bound) { + if (is_Bound(pred)) { + /* + * idx was Bounds checked in the same MacroBlock previously, + * it is still valid if lower <= pred_lower && pred_upper <= upper. + */ + ir_node *lower = get_Bound_lower(n); + ir_node *upper = get_Bound_upper(n); + if (get_Bound_lower(pred) == lower && + get_Bound_upper(pred) == upper && + get_irn_MacroBlock(n) == get_irn_MacroBlock(pred)) { /* - * idx was Bounds_check previously, it is still valid if - * lower <= pred_lower && pred_upper <= upper. + * One could expect that we simply return the previous + * Bound here. However, this would be wrong, as we could + * add an exception Proj to a new location then. + * So, we must turn in into a tuple. */ - ir_node *upper = get_Bound_upper(n); - if (get_Bound_lower(pred) == lower && - get_Bound_upper(pred) == upper) { - /* - * One could expect that we simply return the previous - * Bound here. However, this would be wrong, as we could - * add an exception Proj to a new location then. - * So, we must turn in into a tuple. - */ - ret_tuple = 1; - } + ret_tuple = 1; } } if (ret_tuple) { @@ -1756,6 +1749,50 @@ static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, tarval *(*eval)( return new_r_Phi(irg, get_nodes_block(phi), n, (ir_node **)res, mode); } /* apply_binop_on_phi */ +/** + * Apply an evaluator on a binop with two constant Phi. + * + * @param a the left Phi node + * @param b the right Phi node + * @param eval an evaluator function + * @param mode the mode of the result, may be different from the mode of the Phi! + * + * @return a new Phi node if the conversion was successful, NULL else + */ +static ir_node *apply_binop_on_2_phis(ir_node *a, ir_node *b, tarval *(*eval)(tarval *, tarval *), ir_mode *mode) { + tarval *tv_l, *tv_r, *tv; + void **res; + ir_node *pred; + ir_graph *irg; + int i, n; + + if (get_nodes_block(a) != get_nodes_block(b)) + return NULL; + + n = get_irn_arity(a); + NEW_ARR_A(void *, res, n); + + for (i = 0; i < n; ++i) { + pred = get_irn_n(a, i); + tv_l = get_Const_tarval(pred); + pred = get_irn_n(b, i); + tv_r = get_Const_tarval(pred); + tv = eval(tv_l, tv_r); + + if (tv == tarval_bad) { + /* folding failed, bad */ + return NULL; + } + res[i] = tv; + } + irg = current_ir_graph; + for (i = 0; i < n; ++i) { + pred = get_irn_n(a, i); + res[i] = new_r_Const_type(irg, get_irg_start_block(irg), mode, res[i], get_Const_type(pred)); + } + return new_r_Phi(irg, get_nodes_block(a), n, (ir_node **)res, mode); +} /* apply_binop_on_2_phis */ + /** * Apply an evaluator on a unop with a constant operator (a Phi). * @@ -1838,13 +1875,13 @@ static ir_node *transform_node_AddSub(ir_node *n) { ir_mode *mode = get_irn_mode(n); if (mode_is_reference(mode)) { - ir_node *left = get_binop_left(n); - ir_node *right = get_binop_right(n); - int ref_bits = get_mode_size_bits(mode); + ir_node *left = get_binop_left(n); + ir_node *right = get_binop_right(n); + unsigned ref_bits = get_mode_size_bits(mode); if (is_Conv(left)) { ir_mode *mode = get_irn_mode(left); - int bits = get_mode_size_bits(mode); + unsigned bits = get_mode_size_bits(mode); if (ref_bits == bits && mode_is_int(mode) && @@ -1867,7 +1904,7 @@ static ir_node *transform_node_AddSub(ir_node *n) { if (is_Conv(right)) { ir_mode *mode = get_irn_mode(right); - int bits = get_mode_size_bits(mode); + unsigned bits = get_mode_size_bits(mode); if (ref_bits == bits && mode_is_int(mode) && @@ -1901,6 +1938,10 @@ static ir_node *transform_node_AddSub(ir_node *n) { /* check for Op(Const, Phi) */ \ c = apply_binop_on_phi(b, get_Const_tarval(a), eval, mode, 1);\ } \ + else if (is_const_Phi(a) && is_const_Phi(b)) { \ + /* check for Op(Phi, Phi) */ \ + c = apply_binop_on_2_phis(a, b, eval, mode); \ + } \ if (c) { \ DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI); \ return c; \ @@ -1979,74 +2020,6 @@ static ir_node *transform_node_Add(ir_node *n) { DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B); return n; } - if (! is_reassoc_running()) { - /* do NOT execute this code if reassociation is enabled, it does the inverse! */ - if (is_Mul(a)) { - ir_node *ma = get_Mul_left(a); - ir_node *mb = get_Mul_right(a); - - if (b == ma) { - ir_node *blk = get_irn_n(n, -1); - n = new_rd_Mul( - get_irn_dbg_info(n), current_ir_graph, blk, - ma, - new_rd_Add( - get_irn_dbg_info(n), current_ir_graph, blk, - mb, - new_r_Const_long(current_ir_graph, blk, mode, 1), - mode), - mode); - DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A); - return n; - } else if (b == mb) { - ir_node *blk = get_irn_n(n, -1); - n = new_rd_Mul( - get_irn_dbg_info(n), current_ir_graph, blk, - mb, - new_rd_Add( - get_irn_dbg_info(n), current_ir_graph, blk, - ma, - new_r_Const_long(current_ir_graph, blk, mode, 1), - mode), - mode); - DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A); - return n; - } - } - if (is_Mul(b)) { - ir_node *ma = get_Mul_left(b); - ir_node *mb = get_Mul_right(b); - - if (a == ma) { - ir_node *blk = get_irn_n(n, -1); - n = new_rd_Mul( - get_irn_dbg_info(n), current_ir_graph, blk, - ma, - new_rd_Add( - get_irn_dbg_info(n), current_ir_graph, blk, - mb, - new_r_Const_long(current_ir_graph, blk, mode, 1), - mode), - mode); - DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A); - return n; - } - if (a == mb) { - ir_node *blk = get_irn_n(n, -1); - n = new_rd_Mul( - get_irn_dbg_info(n), current_ir_graph, blk, - mb, - new_rd_Add( - get_irn_dbg_info(n), current_ir_graph, blk, - ma, - new_r_Const_long(current_ir_graph, blk, mode, 1), - mode), - mode); - DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A); - return n; - } - } - } if (get_mode_arithmetic(mode) == irma_twos_complement) { /* Here we rely on constants be on the RIGHT side */ if (is_Not(a)) { @@ -2263,16 +2236,16 @@ restart: if (mode == get_irn_mode(b)) { ir_mode *ma, *mb; - - a = get_Conv_op(a); - b = get_Conv_op(b); + ir_node *op_a = get_Conv_op(a); + ir_node *op_b = get_Conv_op(b); /* check if it's allowed to skip the conv */ - ma = get_irn_mode(a); - mb = get_irn_mode(b); + ma = get_irn_mode(op_a); + mb = get_irn_mode(op_b); if (mode_is_reference(ma) && mode_is_reference(mb)) { /* SubInt(ConvInt(aP), ConvInt(bP)) -> SubInt(aP,bP) */ + a = op_a; b = op_b; set_Sub_left(n, a); set_Sub_right(n, b); @@ -2381,12 +2354,14 @@ static ir_node *transform_node_Mul2n(ir_node *n, ir_mode *mode) { ir_mode *smode = get_irn_mode(a); if (ta == get_mode_one(smode)) { + /* (L)1 * (L)b = (L)b */ ir_node *blk = get_irn_n(n, -1); n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, b, mode); DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1); return n; } else if (ta == get_mode_minus_one(smode)) { + /* (L)-1 * (L)b = (L)b */ ir_node *blk = get_irn_n(n, -1); n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, b, smode); n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, n, mode); @@ -2394,12 +2369,14 @@ static ir_node *transform_node_Mul2n(ir_node *n, ir_mode *mode) { return n; } if (tb == get_mode_one(smode)) { + /* (L)a * (L)1 = (L)a */ ir_node *blk = get_irn_n(a, -1); n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, a, mode); DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1); return n; } else if (tb == get_mode_minus_one(smode)) { + /* (L)a * (L)-1 = (L)-a */ ir_node *blk = get_irn_n(n, -1); n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, a, smode); n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, n, mode); @@ -2485,7 +2462,8 @@ static ir_node *transform_node_Mul(ir_node *n) { if (is_Const(a)) { tarval *tv = get_Const_tarval(a); if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)) { - n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), b, b, mode); + /* 2.0 * b = b + b */ + n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), b, b, mode); DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A); return n; } @@ -2493,7 +2471,8 @@ static ir_node *transform_node_Mul(ir_node *n) { else if (is_Const(b)) { tarval *tv = get_Const_tarval(b); if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)) { - n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, a, mode); + /* a * 2.0 = a + a */ + n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), a, a, mode); DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A); return n; } @@ -2506,10 +2485,39 @@ static ir_node *transform_node_Mul(ir_node *n) { * Transform a Div Node. */ static ir_node *transform_node_Div(ir_node *n) { - tarval *tv = value_of(n); ir_mode *mode = get_Div_resmode(n); - ir_node *value = n; + ir_node *a = get_Div_left(n); + ir_node *b = get_Div_right(n); + ir_node *value; + tarval *tv; + + if (is_Const(b) && is_const_Phi(a)) { + /* check for Div(Phi, Const) */ + value = apply_binop_on_phi(a, get_Const_tarval(b), tarval_div, mode, 0); + if (value) { + DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + else if (is_Const(a) && is_const_Phi(b)) { + /* check for Div(Const, Phi) */ + value = apply_binop_on_phi(b, get_Const_tarval(a), tarval_div, mode, 1); + if (value) { + DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + else if (is_const_Phi(a) && is_const_Phi(b)) { + /* check for Div(Phi, Phi) */ + value = apply_binop_on_2_phis(a, b, tarval_div, mode); + if (value) { + DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + value = n; + tv = value_of(n); if (tv != tarval_bad) { value = new_Const(get_tarval_mode(tv), tv); @@ -2565,10 +2573,39 @@ make_tuple: * Transform a Mod node. */ static ir_node *transform_node_Mod(ir_node *n) { - tarval *tv = value_of(n); ir_mode *mode = get_Mod_resmode(n); - ir_node *value = n; + ir_node *a = get_Mod_left(n); + ir_node *b = get_Mod_right(n); + ir_node *value; + tarval *tv; + + if (is_Const(b) && is_const_Phi(a)) { + /* check for Div(Phi, Const) */ + value = apply_binop_on_phi(a, get_Const_tarval(b), tarval_mod, mode, 0); + if (value) { + DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + else if (is_Const(a) && is_const_Phi(b)) { + /* check for Div(Const, Phi) */ + value = apply_binop_on_phi(b, get_Const_tarval(a), tarval_mod, mode, 1); + if (value) { + DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + else if (is_const_Phi(a) && is_const_Phi(b)) { + /* check for Div(Phi, Phi) */ + value = apply_binop_on_2_phis(a, b, tarval_mod, mode); + if (value) { + DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + value = n; + tv = value_of(n); if (tv != tarval_bad) { value = new_Const(get_tarval_mode(tv), tv); @@ -2628,14 +2665,48 @@ static ir_node *transform_node_DivMod(ir_node *n) { ir_node *a = get_DivMod_left(n); ir_node *b = get_DivMod_right(n); ir_mode *mode = get_DivMod_resmode(n); - tarval *ta = value_of(a); - tarval *tb = value_of(b); + tarval *ta, *tb; int evaluated = 0; + ir_node *va, *vb; + + if (is_Const(b) && is_const_Phi(a)) { + /* check for Div(Phi, Const) */ + va = apply_binop_on_phi(a, get_Const_tarval(b), tarval_div, mode, 0); + vb = apply_binop_on_phi(a, get_Const_tarval(b), tarval_mod, mode, 0); + if (va && vb) { + DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI); + DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + else if (is_Const(a) && is_const_Phi(b)) { + /* check for Div(Const, Phi) */ + va = apply_binop_on_phi(b, get_Const_tarval(a), tarval_div, mode, 1); + vb = apply_binop_on_phi(b, get_Const_tarval(a), tarval_mod, mode, 1); + if (va && vb) { + DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI); + DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + else if (is_const_Phi(a) && is_const_Phi(b)) { + /* check for Div(Phi, Phi) */ + va = apply_binop_on_2_phis(a, b, tarval_div, mode); + vb = apply_binop_on_2_phis(a, b, tarval_mod, mode); + if (va && vb) { + DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI); + DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + ta = value_of(a); + tb = value_of(b); if (tb != tarval_bad) { if (tb == get_mode_one(get_tarval_mode(tb))) { - b = new_Const(mode, get_mode_null(mode)); - DBG_OPT_CSTEVAL(n, b); + va = a; + vb = new_Const(mode, get_mode_null(mode)); + DBG_OPT_CSTEVAL(n, vb); goto make_tuple; } else if (ta != tarval_bad) { tarval *resa, *resb; @@ -2644,28 +2715,30 @@ static ir_node *transform_node_DivMod(ir_node *n) { Jmp for X result!? */ resb = tarval_mod(ta, tb); if (resb == tarval_bad) return n; /* Causes exception! */ - a = new_Const(mode, resa); - b = new_Const(mode, resb); - DBG_OPT_CSTEVAL(n, a); - DBG_OPT_CSTEVAL(n, b); + va = new_Const(mode, resa); + vb = new_Const(mode, resb); + DBG_OPT_CSTEVAL(n, va); + DBG_OPT_CSTEVAL(n, vb); goto make_tuple; } else if (mode_is_signed(mode) && tb == get_mode_minus_one(mode)) { - a = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, mode); - b = new_Const(mode, get_mode_null(mode)); - DBG_OPT_CSTEVAL(n, a); - DBG_OPT_CSTEVAL(n, b); + va = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, mode); + vb = new_Const(mode, get_mode_null(mode)); + DBG_OPT_CSTEVAL(n, va); + DBG_OPT_CSTEVAL(n, vb); goto make_tuple; } else { /* Try architecture dependent optimization */ - arch_dep_replace_divmod_by_const(&a, &b, n); - evaluated = a != NULL; + va = a; + vb = b; + arch_dep_replace_divmod_by_const(&va, &vb, n); + evaluated = va != NULL; } } else if (a == b) { if (value_not_zero(a, &dummy)) { /* a/a && a != 0 */ - a = new_Const(mode, get_mode_one(mode)); - b = new_Const(mode, get_mode_null(mode)); - DBG_OPT_CSTEVAL(n, a); - DBG_OPT_CSTEVAL(n, b); + va = new_Const(mode, get_mode_one(mode)); + vb = new_Const(mode, get_mode_null(mode)); + DBG_OPT_CSTEVAL(n, va); + DBG_OPT_CSTEVAL(n, vb); goto make_tuple; } else { /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */ @@ -2673,7 +2746,7 @@ static ir_node *transform_node_DivMod(ir_node *n) { } } else if (ta == get_mode_null(mode) && value_not_zero(b, &dummy)) { /* 0 / non-Const = 0 */ - b = a; + vb = va = a; goto make_tuple; } @@ -2691,8 +2764,8 @@ make_tuple: set_Tuple_pred(n, pn_DivMod_M, mem); set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk)); set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */ - set_Tuple_pred(n, pn_DivMod_res_div, a); - set_Tuple_pred(n, pn_DivMod_res_mod, b); + set_Tuple_pred(n, pn_DivMod_res_div, va); + set_Tuple_pred(n, pn_DivMod_res_mod, vb); } return n; @@ -2741,6 +2814,7 @@ static ir_node *transform_node_Quot(ir_node *n) { /** * Optimize Abs(x) into x if x is Confirmed >= 0 * Optimize Abs(x) into -x if x is Confirmed <= 0 + * Optimize Abs(-x) int Abs(x) */ static ir_node *transform_node_Abs(ir_node *n) { ir_node *c, *oldn = n; @@ -2761,7 +2835,7 @@ static ir_node *transform_node_Abs(ir_node *n) { * not run it in the equivalent_node() context. */ n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, - get_irn_n(n, -1), a, mode); + get_nodes_block(n), a, mode); DBG_OPT_CONFIRM(oldn, n); return n; @@ -2772,8 +2846,17 @@ static ir_node *transform_node_Abs(ir_node *n) { DBG_OPT_CONFIRM(oldn, n); return n; default: + break; + } + if (is_Minus(a)) { + /* Abs(-x) = Abs(x) */ + mode = get_irn_mode(n); + n = new_rd_Abs(get_irn_dbg_info(n), current_ir_graph, + get_nodes_block(n), get_Minus_op(a), mode); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ABS_MINUS_X); return n; } + return n; } /* transform_node_Abs */ /** @@ -2863,7 +2946,8 @@ static ir_node *transform_bitwise_distributive(ir_node *n, ir_node *b_left = get_binop_left(b); ir_node *b_right = get_binop_right(b); ir_node *c = NULL; - ir_node *op1, *op2; + ir_node *op1 = NULL; + ir_node *op2 = NULL; if (is_op_commutative(op)) { if (a_left == b_left) { @@ -3204,7 +3288,7 @@ static ir_node *transform_node_Minus(ir_node *n) { if (is_Const(c)) { tarval *tv = get_Const_tarval(c); - if (tarval_is_long(tv) && get_tarval_long(tv) == get_mode_size_bits(mode) - 1) { + if (tarval_is_long(tv) && get_tarval_long(tv) == (int) get_mode_size_bits(mode) - 1) { /* -(a >>u (size-1)) = a >>s (size-1) */ ir_node *v = get_Shr_left(a); @@ -3220,7 +3304,7 @@ static ir_node *transform_node_Minus(ir_node *n) { if (is_Const(c)) { tarval *tv = get_Const_tarval(c); - if (tarval_is_long(tv) && get_tarval_long(tv) == get_mode_size_bits(mode) - 1) { + if (tarval_is_long(tv) && get_tarval_long(tv) == (int) get_mode_size_bits(mode) - 1) { /* -(a >>s (size-1)) = a >>u (size-1) */ ir_node *v = get_Shrs_left(a); @@ -3273,8 +3357,8 @@ static ir_node *transform_node_Cast(ir_node *n) { get_Const_tarval(pred), tp); DBG_OPT_CSTEVAL(oldn, n); } else if (is_SymConst(pred) && get_SymConst_value_type(pred) != tp) { - n = new_rd_SymConst_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_SymConst_symbol(pred), - get_SymConst_kind(pred), tp); + n = new_rd_SymConst_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_irn_mode(pred), + get_SymConst_symbol(pred), get_SymConst_kind(pred), tp); DBG_OPT_CSTEVAL(oldn, n); } @@ -3487,18 +3571,44 @@ static ir_node *create_zero_const(ir_mode *mode) { return cnst; } +/* the order of the values is important! */ +typedef enum const_class { + const_const = 0, + const_like = 1, + const_other = 2 +} const_class; + +static const_class classify_const(const ir_node* n) +{ + if (is_Const(n)) return const_const; + if (is_irn_constlike(n)) return const_like; + return const_other; +} + +/** + * Determines whether r is more constlike or has a larger index (in that order) + * than l. + */ +static int operands_are_normalized(const ir_node *l, const ir_node *r) +{ + const const_class l_order = classify_const(l); + const const_class r_order = classify_const(r); + return + l_order > r_order || + (l_order == r_order && get_irn_idx(l) <= get_irn_idx(r)); +} + /** * Normalizes and optimizes Cmp nodes. */ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { - ir_node *n = get_Proj_pred(proj); - ir_node *left = get_Cmp_left(n); - ir_node *right = get_Cmp_right(n); - ir_node *c = NULL; - tarval *tv = NULL; - int changed = 0; - ir_mode *mode = NULL; - long proj_nr = get_Proj_proj(proj); + ir_node *n = get_Proj_pred(proj); + ir_node *left = get_Cmp_left(n); + ir_node *right = get_Cmp_right(n); + tarval *tv = NULL; + int changed = 0; + ir_mode *mode = NULL; + long proj_nr = get_Proj_proj(proj); /* we can evaluate some cases directly */ switch (proj_nr) { @@ -3529,7 +3639,8 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { ir_mode *mode_left = get_irn_mode(op_left); ir_mode *mode_right = get_irn_mode(op_right); - if (smaller_mode(mode_left, mode) && smaller_mode(mode_right, mode)) { + if (smaller_mode(mode_left, mode) && smaller_mode(mode_right, mode) + && mode_left != mode_b && mode_right != mode_b) { ir_graph *irg = current_ir_graph; ir_node *block = get_nodes_block(n); @@ -3697,30 +3808,12 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { } } - if (!get_opt_reassociation()) - return proj; - /* * First step: normalize the compare op * by placing the constant on the right side * or moving the lower address node to the left. - * We ignore the case that both are constants - * this case should be optimized away. */ - if (is_irn_constlike(right) && !(is_Const(left) && !is_Const(right))) { - if(is_Const(right)) { - c = right; - } - } else if (is_irn_constlike(left)) { - c = left; - left = right; - right = c; - if(!is_Const(c)) - c = NULL; - - proj_nr = get_inversed_pnc(proj_nr); - changed |= 1; - } else if (get_irn_idx(left) > get_irn_idx(right)) { + if (!operands_are_normalized(left, right)) { ir_node *t = left; left = right; @@ -3736,9 +3829,9 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { * later and may help to normalize more compares. * Of course this is only possible for integer values. */ - if (c) { - mode = get_irn_mode(c); - tv = get_Const_tarval(c); + if (is_Const(right)) { + mode = get_irn_mode(right); + tv = get_Const_tarval(right); /* TODO extend to arbitrary constants */ if (is_Conv(left) && tarval_is_null(tv)) { @@ -4338,7 +4431,7 @@ static ir_node *transform_node_Or_Rot(ir_node *or) { return or; if (get_tarval_long(tv1) + get_tarval_long(tv2) - != get_mode_size_bits(mode)) + != (int) get_mode_size_bits(mode)) return or; /* yet, condition met */ @@ -4363,7 +4456,7 @@ static ir_node *transform_node_Or_Rot(ir_node *or) { if (! tarval_is_long(tv1)) return or; - if (get_tarval_long(tv1) != get_mode_size_bits(mode)) + if (get_tarval_long(tv1) != (int) get_mode_size_bits(mode)) return or; /* yet, condition met */ @@ -4386,7 +4479,7 @@ static ir_node *transform_node_Or_Rot(ir_node *or) { if (! tarval_is_long(tv1)) return or; - if (get_tarval_long(tv1) != get_mode_size_bits(mode)) + if (get_tarval_long(tv1) != (int) get_mode_size_bits(mode)) return or; /* yet, condition met */ @@ -4582,6 +4675,12 @@ static ir_node *transform_node_Conv(ir_node *n) { return c; } } + + if (is_Unknown(a)) { /* Conv_A(Unknown_B) -> Unknown_A */ + ir_mode *mode = get_irn_mode(n); + return new_r_Unknown(current_ir_graph, mode); + } + return n; } /* transform_node_Conv */ @@ -5048,30 +5147,32 @@ static ir_op_ops *firm_set_default_node_cmp_attr(ir_opcode code, ir_op_ops *ops) } /* firm_set_default_node_cmp_attr */ /* - * Compare function for two nodes in the hash table. Gets two - * nodes as parameters. Returns 0 if the nodes are a cse. + * Compare function for two nodes in the value table. Gets two + * nodes as parameters. Returns 0 if the nodes are a Common Sub Expression. */ int identities_cmp(const void *elt, const void *key) { - ir_node *a, *b; + const ir_node *a = elt; + const ir_node *b = key; int i, irn_arity_a; - a = (void *)elt; - b = (void *)key; - if (a == b) return 0; if ((get_irn_op(a) != get_irn_op(b)) || (get_irn_mode(a) != get_irn_mode(b))) return 1; /* compare if a's in and b's in are of equal length */ - irn_arity_a = get_irn_intra_arity (a); + irn_arity_a = get_irn_intra_arity(a); if (irn_arity_a != get_irn_intra_arity(b)) return 1; - /* for block-local cse and op_pin_state_pinned nodes: */ - if (!get_opt_global_cse() || (get_irn_pinned(a) == op_pin_state_pinned)) { + if (get_irn_pinned(a) == op_pin_state_pinned) { + /* for pinned nodes, the block inputs must be equal */ if (get_irn_intra_n(a, -1) != get_irn_intra_n(b, -1)) return 1; + } else if (! get_opt_global_cse()) { + /* for block-local CSE both nodes must be in the same MacroBlock */ + if (get_irn_MacroBlock(a) != get_irn_MacroBlock(b)) + return 1; } /* compare a->in[0..ins] with b->in[0..ins] */ @@ -5132,34 +5233,74 @@ void del_identities(pset *value_table) { } /* del_identities */ /** - * Normalize a node by putting constants (and operands with smaller - * node index) on the right + * Normalize a node by putting constants (and operands with larger + * node index) on the right (operator side). * * @param n The node to normalize */ static void normalize_node(ir_node *n) { - if (get_opt_reassociation()) { - if (is_op_commutative(get_irn_op(n))) { - ir_node *l = get_binop_left(n); - ir_node *r = get_binop_right(n); - int l_idx = get_irn_idx(l); - int r_idx = get_irn_idx(r); - - /* For commutative operators perform a OP b == b OP a but keep - constants on the RIGHT side. This helps greatly in some optimizations. - Moreover we use the idx number to make the form deterministic. */ - if (is_irn_constlike(l)) - l_idx = -l_idx; - if (is_irn_constlike(r)) - r_idx = -r_idx; - if (l_idx < r_idx) { - set_binop_left(n, r); - set_binop_right(n, l); - } + if (is_op_commutative(get_irn_op(n))) { + ir_node *l = get_binop_left(n); + ir_node *r = get_binop_right(n); + + /* For commutative operators perform a OP b == b OP a but keep + * constants on the RIGHT side. This helps greatly in some + * optimizations. Moreover we use the idx number to make the form + * deterministic. */ + if (!operands_are_normalized(l, r)) { + set_binop_left(n, r); + set_binop_right(n, l); } } } /* normalize_node */ +/** + * Update the nodes after a match in the value table. If both nodes have + * the same MacroBlock but different Blocks, we must ensure that the node + * with the dominating Block (the node that is near to the MacroBlock header + * is stored in the table. + * Because a MacroBlock has only one "non-exception" flow, we don't need + * dominance info here: We known, that one block must dominate the other and + * following the only block input will allow to find it. + */ +static void update_known_irn(ir_node *known_irn, const ir_node *new_ir_node) { + ir_node *known_blk, *new_block, *block, *mbh; + + if (get_opt_global_cse()) { + /* Block inputs are meaning less */ + return; + } + known_blk = get_irn_n(known_irn, -1); + new_block = get_irn_n(new_ir_node, -1); + if (known_blk == new_block) { + /* already in the same block */ + return; + } + /* + * We expect the typical case when we built the graph. In that case, the + * known_irn is already the upper one, so checking this should be faster. + */ + block = new_block; + mbh = get_Block_MacroBlock(new_block); + for (;;) { + if (block == known_blk) { + /* ok, we have found it: known_block dominates new_block as expected */ + return; + } + if (block == mbh) { + /* + * We have reached the MacroBlock header NOT founding + * the known_block. new_block must dominate known_block. + * Update known_irn. + */ + set_irn_n(known_irn, -1, new_block); + return; + } + assert(get_Block_n_cfgpreds(block) == 1); + block = get_Block_cfgpred_block(block, 0); + } +} /* update_value_table */ + /** * Return the canonical node computing the same value as n. * @@ -5180,8 +5321,10 @@ static INLINE ir_node *identify(pset *value_table, ir_node *n) { normalize_node(n); o = pset_find(value_table, n, ir_node_hash(n)); - if (!o) return n; + if (o == NULL) + return n; + update_known_irn(o, n); DBG_OPT_CSE(n, o); return o; @@ -5191,12 +5334,15 @@ static INLINE ir_node *identify(pset *value_table, ir_node *n) { * During construction we set the op_pin_state_pinned flag in the graph right when the * optimization is performed. The flag turning on procedure global cse could * be changed between two allocations. This way we are safe. + * + * @param value_table The value table + * @param n The node to lookup */ static INLINE ir_node *identify_cons(pset *value_table, ir_node *n) { ir_node *old = n; n = identify(value_table, n); - if (get_irn_n(old, -1) != get_irn_n(n, -1)) + if (n != old && get_irn_MacroBlock(old) != get_irn_MacroBlock(n)) set_irg_pinned(current_ir_graph, op_pin_state_floats); return n; } /* identify_cons */ @@ -5205,6 +5351,13 @@ static INLINE ir_node *identify_cons(pset *value_table, ir_node *n) { * Return the canonical node computing the same value as n. * Looks up the node in a hash table, enters it in the table * if it isn't there yet. + * + * @param value_table the HashSet containing all nodes in the + * current IR graph + * @param n the node to look up + * + * @return a node that computes the same value as n or n if no such + * node could be found */ ir_node *identify_remember(pset *value_table, ir_node *n) { ir_node *o = NULL; @@ -5216,6 +5369,7 @@ ir_node *identify_remember(pset *value_table, ir_node *n) { o = pset_insert(value_table, n, ir_node_hash(n)); if (o != n) { + update_known_irn(o, n); DBG_OPT_CSE(n, o); }