X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firopt.c;h=4fc0169aa03e8ecfbbd4038809d7cbc981c15fbd;hb=203717b8dd44597fb10c126b33a28528a2432e9c;hp=6984b87372f31900de5e46fca62801d9f3c8c87c;hpb=ebe2d199a9067f39af06ad6a13494b445a138019;p=libfirm diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 6984b8737..4fc0169aa 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -178,7 +178,7 @@ static tarval *computed_value_Minus(ir_node *n) { ir_node *a = get_Minus_op(n); tarval *ta = value_of(a); - if ((ta != tarval_bad) && mode_is_signed(get_irn_mode(a))) + if (ta != tarval_bad) return tarval_neg(ta); return tarval_bad; @@ -1159,7 +1159,6 @@ static ir_node *equivalent_node_And(ir_node *n) { static ir_node *equivalent_node_Conv(ir_node *n) { ir_node *oldn = n; ir_node *a = get_Conv_op(n); - ir_node *b; ir_mode *n_mode = get_irn_mode(n); ir_mode *a_mode = get_irn_mode(a); @@ -1174,26 +1173,30 @@ static ir_node *equivalent_node_Conv(ir_node *n) { return n; } /* else both are strict conv, second is superflous */ - } else { - /* leave strict floating point Conv's */ - return n; + } else if(is_Proj(a)) { + ir_node *pred = get_Proj_pred(a); + if(is_Load(pred)) { + /* loads always return with the exact precision of n_mode */ + assert(get_Load_mode(pred) == n_mode); + return a; + } } + + /* leave strict floating point Conv's */ + return n; } n = a; DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV); } else if (is_Conv(a)) { /* Conv(Conv(b)) */ - ir_mode *b_mode; - - b = get_Conv_op(a); - n_mode = get_irn_mode(n); - b_mode = get_irn_mode(b); + ir_node *b = get_Conv_op(a); + ir_mode *b_mode = get_irn_mode(b); if (n_mode == b_mode) { if (n_mode == mode_b) { n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV); } else if (mode_is_int(n_mode)) { - if (smaller_mode(b_mode, a_mode)){ + if (get_mode_size_bits(b_mode) <= get_mode_size_bits(a_mode)) { n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV); } @@ -1692,7 +1695,7 @@ static ir_op_ops *firm_set_default_equivalent_node(ir_opcode code, ir_op_ops *op static int is_const_Phi(ir_node *n) { int i; - if (! is_Phi(n)) + if (! is_Phi(n) || get_irn_arity(n) == 0) return 0; for (i = get_irn_arity(n) - 1; i >= 0; --i) if (! is_Const(get_irn_n(n, i))) @@ -1706,15 +1709,15 @@ static int is_const_Phi(ir_node *n) { * @param phi the Phi node * @param other the other operand * @param eval an evaluator function + * @param mode the mode of the result, may be different from the mode of the Phi! * @param left if non-zero, other is the left operand, else the right * * @return a new Phi node if the conversion was successful, NULL else */ -static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, tarval *(*eval)(tarval *, tarval *), int left) { +static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, tarval *(*eval)(tarval *, tarval *), ir_mode *mode, int left) { tarval *tv; void **res; ir_node *pred; - ir_mode *mode; ir_graph *irg; int i, n = get_irn_arity(phi); @@ -1744,7 +1747,6 @@ static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, tarval *(*eval)( res[i] = tv; } } - mode = get_irn_mode(phi); irg = current_ir_graph; for (i = 0; i < n; ++i) { pred = get_irn_n(phi, i); @@ -1754,6 +1756,50 @@ static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, tarval *(*eval)( return new_r_Phi(irg, get_nodes_block(phi), n, (ir_node **)res, mode); } /* apply_binop_on_phi */ +/** + * Apply an evaluator on a binop with two constant Phi. + * + * @param a the left Phi node + * @param b the right Phi node + * @param eval an evaluator function + * @param mode the mode of the result, may be different from the mode of the Phi! + * + * @return a new Phi node if the conversion was successful, NULL else + */ +static ir_node *apply_binop_on_2_phis(ir_node *a, ir_node *b, tarval *(*eval)(tarval *, tarval *), ir_mode *mode) { + tarval *tv_l, *tv_r, *tv; + void **res; + ir_node *pred; + ir_graph *irg; + int i, n; + + if (get_nodes_block(a) != get_nodes_block(b)) + return NULL; + + n = get_irn_arity(a); + NEW_ARR_A(void *, res, n); + + for (i = 0; i < n; ++i) { + pred = get_irn_n(a, i); + tv_l = get_Const_tarval(pred); + pred = get_irn_n(b, i); + tv_r = get_Const_tarval(pred); + tv = eval(tv_l, tv_r); + + if (tv == tarval_bad) { + /* folding failed, bad */ + return NULL; + } + res[i] = tv; + } + irg = current_ir_graph; + for (i = 0; i < n; ++i) { + pred = get_irn_n(a, i); + res[i] = new_r_Const_type(irg, get_irg_start_block(irg), mode, res[i], get_Const_type(pred)); + } + return new_r_Phi(irg, get_nodes_block(a), n, (ir_node **)res, mode); +} /* apply_binop_on_2_phis */ + /** * Apply an evaluator on a unop with a constant operator (a Phi). * @@ -1792,6 +1838,41 @@ static ir_node *apply_unop_on_phi(ir_node *phi, tarval *(*eval)(tarval *)) { return new_r_Phi(irg, get_nodes_block(phi), n, (ir_node **)res, mode); } /* apply_unop_on_phi */ +/** + * Apply a conversion on a constant operator (a Phi). + * + * @param phi the Phi node + * + * @return a new Phi node if the conversion was successful, NULL else + */ +static ir_node *apply_conv_on_phi(ir_node *phi, ir_mode *mode) { + tarval *tv; + void **res; + ir_node *pred; + ir_graph *irg; + int i, n = get_irn_arity(phi); + + NEW_ARR_A(void *, res, n); + for (i = 0; i < n; ++i) { + pred = get_irn_n(phi, i); + tv = get_Const_tarval(pred); + tv = tarval_convert_to(tv, mode); + + if (tv == tarval_bad) { + /* folding failed, bad */ + return 0; + } + res[i] = tv; + } + irg = current_ir_graph; + for (i = 0; i < n; ++i) { + pred = get_irn_n(phi, i); + res[i] = new_r_Const_type(irg, get_irg_start_block(irg), + mode, res[i], get_Const_type(pred)); + } + return new_r_Phi(irg, get_nodes_block(phi), n, (ir_node **)res, mode); +} /* apply_conv_on_phi */ + /** * Transform AddP(P, ConvIs(Iu)), AddP(P, ConvIu(Is)) and * SubP(P, ConvIs(Iu)), SubP(P, ConvIu(Is)). @@ -1854,26 +1935,30 @@ static ir_node *transform_node_AddSub(ir_node *n) { return n; } /* transform_node_AddSub */ -#define HANDLE_BINOP_PHI(op,a,b,c) \ - c = NULL; \ - if (is_Const(b) && is_const_Phi(a)) { \ - /* check for Op(Phi, Const) */ \ - c = apply_binop_on_phi(a, get_Const_tarval(b), op, 0); \ - } \ - else if (is_Const(a) && is_const_Phi(b)) { \ - /* check for Op(Const, Phi) */ \ - c = apply_binop_on_phi(b, get_Const_tarval(a), op, 1); \ - } \ - if (c) { \ - DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI); \ - return c; \ +#define HANDLE_BINOP_PHI(eval, a, b, c, mode) \ + c = NULL; \ + if (is_Const(b) && is_const_Phi(a)) { \ + /* check for Op(Phi, Const) */ \ + c = apply_binop_on_phi(a, get_Const_tarval(b), eval, mode, 0);\ + } \ + else if (is_Const(a) && is_const_Phi(b)) { \ + /* check for Op(Const, Phi) */ \ + c = apply_binop_on_phi(b, get_Const_tarval(a), eval, mode, 1);\ + } \ + else if (is_const_Phi(a) && is_const_Phi(b)) { \ + /* check for Op(Phi, Phi) */ \ + c = apply_binop_on_2_phis(a, b, eval, mode); \ + } \ + if (c) { \ + DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI); \ + return c; \ } -#define HANDLE_UNOP_PHI(op,a,c) \ +#define HANDLE_UNOP_PHI(eval, a, c) \ c = NULL; \ if (is_const_Phi(a)) { \ /* check for Op(Phi) */ \ - c = apply_unop_on_phi(a, op); \ + c = apply_unop_on_phi(a, eval); \ if (c) { \ DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI); \ return c; \ @@ -1898,9 +1983,8 @@ static ir_node *transform_node_Add(ir_node *n) { a = get_Add_left(n); b = get_Add_right(n); - HANDLE_BINOP_PHI(tarval_add, a,b,c); - mode = get_irn_mode(n); + HANDLE_BINOP_PHI(tarval_add, a, b, c, mode); /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */ if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic)) @@ -1943,74 +2027,6 @@ static ir_node *transform_node_Add(ir_node *n) { DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B); return n; } - if (! is_reassoc_running()) { - /* do NOT execute this code if reassociation is enabled, it does the inverse! */ - if (is_Mul(a)) { - ir_node *ma = get_Mul_left(a); - ir_node *mb = get_Mul_right(a); - - if (b == ma) { - ir_node *blk = get_irn_n(n, -1); - n = new_rd_Mul( - get_irn_dbg_info(n), current_ir_graph, blk, - ma, - new_rd_Add( - get_irn_dbg_info(n), current_ir_graph, blk, - mb, - new_r_Const_long(current_ir_graph, blk, mode, 1), - mode), - mode); - DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A); - return n; - } else if (b == mb) { - ir_node *blk = get_irn_n(n, -1); - n = new_rd_Mul( - get_irn_dbg_info(n), current_ir_graph, blk, - mb, - new_rd_Add( - get_irn_dbg_info(n), current_ir_graph, blk, - ma, - new_r_Const_long(current_ir_graph, blk, mode, 1), - mode), - mode); - DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A); - return n; - } - } - if (is_Mul(b)) { - ir_node *ma = get_Mul_left(b); - ir_node *mb = get_Mul_right(b); - - if (a == ma) { - ir_node *blk = get_irn_n(n, -1); - n = new_rd_Mul( - get_irn_dbg_info(n), current_ir_graph, blk, - ma, - new_rd_Add( - get_irn_dbg_info(n), current_ir_graph, blk, - mb, - new_r_Const_long(current_ir_graph, blk, mode, 1), - mode), - mode); - DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A); - return n; - } - if (a == mb) { - ir_node *blk = get_irn_n(n, -1); - n = new_rd_Mul( - get_irn_dbg_info(n), current_ir_graph, blk, - mb, - new_rd_Add( - get_irn_dbg_info(n), current_ir_graph, blk, - ma, - new_r_Const_long(current_ir_graph, blk, mode, 1), - mode), - mode); - DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A); - return n; - } - } - } if (get_mode_arithmetic(mode) == irma_twos_complement) { /* Here we rely on constants be on the RIGHT side */ if (is_Not(a)) { @@ -2047,7 +2063,9 @@ static ir_node *transform_node_Add(ir_node *n) { return n; } /* transform_node_Add */ -/* returns -cnst */ +/** + * returns -cnst or NULL if impossible + */ static ir_node *const_negate(ir_node *cnst) { tarval *tv = tarval_neg(get_Const_tarval(cnst)); dbg_info *dbgi = get_irn_dbg_info(cnst); @@ -2081,7 +2099,7 @@ static ir_node *transform_node_Sub(ir_node *n) { mode = get_irn_mode(n); restart: - HANDLE_BINOP_PHI(tarval_sub, a,b,c); + HANDLE_BINOP_PHI(tarval_sub, a, b, c, mode); /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */ if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic)) @@ -2106,7 +2124,6 @@ restart: dbg_info *dbg = get_irn_dbg_info(n); ir_node *block = get_nodes_block(n); ir_node *left = get_Minus_op(a); - ir_mode *mode = get_irn_mode(n); ir_node *add = new_rd_Add(dbg, irg, block, left, b, mode); n = new_rd_Minus(dbg, irg, block, add, mode); @@ -2117,7 +2134,6 @@ restart: dbg_info *dbg = get_irn_dbg_info(n); ir_node *block = get_nodes_block(n); ir_node *right = get_Minus_op(b); - ir_mode *mode = get_irn_mode(n); n = new_rd_Add(dbg, irg, block, a, right, mode); DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MINUS); @@ -2132,9 +2148,8 @@ restart: ir_node *sub = new_rd_Sub(s_dbg, irg, s_block, s_left, s_right, s_mode); dbg_info *a_dbg = get_irn_dbg_info(n); ir_node *a_block = get_nodes_block(n); - ir_mode *a_mode = get_irn_mode(n); - n = new_rd_Add(a_dbg, irg, a_block, a, sub, a_mode); + n = new_rd_Add(a_dbg, irg, a_block, a, sub, mode); DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD); return n; } else if (is_Mul(b)) { /* a - (b * C) -> a + (b * -C) */ @@ -2150,9 +2165,8 @@ restart: ir_node *mul = new_rd_Mul(m_dbg, irg, m_block, m_left, cnst2, m_mode); dbg_info *a_dbg = get_irn_dbg_info(n); ir_node *a_block = get_nodes_block(n); - ir_mode *a_mode = get_irn_mode(n); - n = new_rd_Add(a_dbg, irg, a_block, a, mul, a_mode); + n = new_rd_Add(a_dbg, irg, a_block, a, mul, mode); DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD); return n; } @@ -2229,16 +2243,16 @@ restart: if (mode == get_irn_mode(b)) { ir_mode *ma, *mb; - - a = get_Conv_op(a); - b = get_Conv_op(b); + ir_node *op_a = get_Conv_op(a); + ir_node *op_b = get_Conv_op(b); /* check if it's allowed to skip the conv */ - ma = get_irn_mode(a); - mb = get_irn_mode(b); + ma = get_irn_mode(op_a); + mb = get_irn_mode(op_b); if (mode_is_reference(ma) && mode_is_reference(mb)) { /* SubInt(ConvInt(aP), ConvInt(bP)) -> SubInt(aP,bP) */ + a = op_a; b = op_b; set_Sub_left(n, a); set_Sub_right(n, b); @@ -2392,7 +2406,7 @@ static ir_node *transform_node_Mul(ir_node *n) { if (mode != get_irn_mode(a)) return transform_node_Mul2n(n, mode); - HANDLE_BINOP_PHI(tarval_mul, a,b,c); + HANDLE_BINOP_PHI(tarval_mul, a, b, c, mode); if (mode_is_signed(mode)) { ir_node *r = NULL; @@ -2472,10 +2486,39 @@ static ir_node *transform_node_Mul(ir_node *n) { * Transform a Div Node. */ static ir_node *transform_node_Div(ir_node *n) { - tarval *tv = value_of(n); ir_mode *mode = get_Div_resmode(n); - ir_node *value = n; + ir_node *a = get_Div_left(n); + ir_node *b = get_Div_right(n); + ir_node *value; + tarval *tv; + + if (is_Const(b) && is_const_Phi(a)) { + /* check for Div(Phi, Const) */ + value = apply_binop_on_phi(a, get_Const_tarval(b), tarval_div, mode, 0); + if (value) { + DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + else if (is_Const(a) && is_const_Phi(b)) { + /* check for Div(Const, Phi) */ + value = apply_binop_on_phi(b, get_Const_tarval(a), tarval_div, mode, 1); + if (value) { + DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + else if (is_const_Phi(a) && is_const_Phi(b)) { + /* check for Div(Phi, Phi) */ + value = apply_binop_on_2_phis(a, b, tarval_div, mode); + if (value) { + DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + value = n; + tv = value_of(n); if (tv != tarval_bad) { value = new_Const(get_tarval_mode(tv), tv); @@ -2531,10 +2574,39 @@ make_tuple: * Transform a Mod node. */ static ir_node *transform_node_Mod(ir_node *n) { - tarval *tv = value_of(n); ir_mode *mode = get_Mod_resmode(n); - ir_node *value = n; + ir_node *a = get_Mod_left(n); + ir_node *b = get_Mod_right(n); + ir_node *value; + tarval *tv; + + if (is_Const(b) && is_const_Phi(a)) { + /* check for Div(Phi, Const) */ + value = apply_binop_on_phi(a, get_Const_tarval(b), tarval_mod, mode, 0); + if (value) { + DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + else if (is_Const(a) && is_const_Phi(b)) { + /* check for Div(Const, Phi) */ + value = apply_binop_on_phi(b, get_Const_tarval(a), tarval_mod, mode, 1); + if (value) { + DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + else if (is_const_Phi(a) && is_const_Phi(b)) { + /* check for Div(Phi, Phi) */ + value = apply_binop_on_2_phis(a, b, tarval_mod, mode); + if (value) { + DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + value = n; + tv = value_of(n); if (tv != tarval_bad) { value = new_Const(get_tarval_mode(tv), tv); @@ -2594,14 +2666,48 @@ static ir_node *transform_node_DivMod(ir_node *n) { ir_node *a = get_DivMod_left(n); ir_node *b = get_DivMod_right(n); ir_mode *mode = get_DivMod_resmode(n); - tarval *ta = value_of(a); - tarval *tb = value_of(b); + tarval *ta, *tb; int evaluated = 0; + ir_node *va, *vb; + + if (is_Const(b) && is_const_Phi(a)) { + /* check for Div(Phi, Const) */ + va = apply_binop_on_phi(a, get_Const_tarval(b), tarval_div, mode, 0); + vb = apply_binop_on_phi(a, get_Const_tarval(b), tarval_mod, mode, 0); + if (va && vb) { + DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI); + DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + else if (is_Const(a) && is_const_Phi(b)) { + /* check for Div(Const, Phi) */ + va = apply_binop_on_phi(b, get_Const_tarval(a), tarval_div, mode, 1); + vb = apply_binop_on_phi(b, get_Const_tarval(a), tarval_mod, mode, 1); + if (va && vb) { + DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI); + DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + else if (is_const_Phi(a) && is_const_Phi(b)) { + /* check for Div(Phi, Phi) */ + va = apply_binop_on_2_phis(a, b, tarval_div, mode); + vb = apply_binop_on_2_phis(a, b, tarval_mod, mode); + if (va && vb) { + DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI); + DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI); + goto make_tuple; + } + } + ta = value_of(a); + tb = value_of(b); if (tb != tarval_bad) { if (tb == get_mode_one(get_tarval_mode(tb))) { - b = new_Const(mode, get_mode_null(mode)); - DBG_OPT_CSTEVAL(n, b); + va = a; + vb = new_Const(mode, get_mode_null(mode)); + DBG_OPT_CSTEVAL(n, vb); goto make_tuple; } else if (ta != tarval_bad) { tarval *resa, *resb; @@ -2610,28 +2716,30 @@ static ir_node *transform_node_DivMod(ir_node *n) { Jmp for X result!? */ resb = tarval_mod(ta, tb); if (resb == tarval_bad) return n; /* Causes exception! */ - a = new_Const(mode, resa); - b = new_Const(mode, resb); - DBG_OPT_CSTEVAL(n, a); - DBG_OPT_CSTEVAL(n, b); + va = new_Const(mode, resa); + vb = new_Const(mode, resb); + DBG_OPT_CSTEVAL(n, va); + DBG_OPT_CSTEVAL(n, vb); goto make_tuple; } else if (mode_is_signed(mode) && tb == get_mode_minus_one(mode)) { - a = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, mode); - b = new_Const(mode, get_mode_null(mode)); - DBG_OPT_CSTEVAL(n, a); - DBG_OPT_CSTEVAL(n, b); + va = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, mode); + vb = new_Const(mode, get_mode_null(mode)); + DBG_OPT_CSTEVAL(n, va); + DBG_OPT_CSTEVAL(n, vb); goto make_tuple; } else { /* Try architecture dependent optimization */ - arch_dep_replace_divmod_by_const(&a, &b, n); - evaluated = a != NULL; + va = a; + vb = b; + arch_dep_replace_divmod_by_const(&va, &vb, n); + evaluated = va != NULL; } } else if (a == b) { if (value_not_zero(a, &dummy)) { /* a/a && a != 0 */ - a = new_Const(mode, get_mode_one(mode)); - b = new_Const(mode, get_mode_null(mode)); - DBG_OPT_CSTEVAL(n, a); - DBG_OPT_CSTEVAL(n, b); + va = new_Const(mode, get_mode_one(mode)); + vb = new_Const(mode, get_mode_null(mode)); + DBG_OPT_CSTEVAL(n, va); + DBG_OPT_CSTEVAL(n, vb); goto make_tuple; } else { /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */ @@ -2639,7 +2747,7 @@ static ir_node *transform_node_DivMod(ir_node *n) { } } else if (ta == get_mode_null(mode) && value_not_zero(b, &dummy)) { /* 0 / non-Const = 0 */ - b = a; + vb = va = a; goto make_tuple; } @@ -2657,8 +2765,8 @@ make_tuple: set_Tuple_pred(n, pn_DivMod_M, mem); set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk)); set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */ - set_Tuple_pred(n, pn_DivMod_res_div, a); - set_Tuple_pred(n, pn_DivMod_res_mod, b); + set_Tuple_pred(n, pn_DivMod_res_div, va); + set_Tuple_pred(n, pn_DivMod_res_mod, vb); } return n; @@ -2709,12 +2817,15 @@ static ir_node *transform_node_Quot(ir_node *n) { * Optimize Abs(x) into -x if x is Confirmed <= 0 */ static ir_node *transform_node_Abs(ir_node *n) { - ir_node *oldn = n; - ir_node *a = get_Abs_op(n); - value_classify_sign sign = classify_value_sign(a); + ir_node *c, *oldn = n; + ir_node *a = get_Abs_op(n); + ir_mode *mode; - if (sign == value_classified_negative) { - ir_mode *mode = get_irn_mode(n); + HANDLE_UNOP_PHI(tarval_abs, a, c); + + switch (classify_value_sign(a)) { + case value_classified_negative: + mode = get_irn_mode(n); /* * We can replace the Abs by -x here. @@ -2727,14 +2838,16 @@ static ir_node *transform_node_Abs(ir_node *n) { get_irn_n(n, -1), a, mode); DBG_OPT_CONFIRM(oldn, n); - } else if (sign == value_classified_positive) { + return n; + case value_classified_positive: /* n is positive, Abs is not needed */ n = a; DBG_OPT_CONFIRM(oldn, n); + return n; + default: + return n; } - - return n; } /* transform_node_Abs */ /** @@ -2824,7 +2937,8 @@ static ir_node *transform_bitwise_distributive(ir_node *n, ir_node *b_left = get_binop_left(b); ir_node *b_right = get_binop_right(b); ir_node *c = NULL; - ir_node *op1, *op2; + ir_node *op1 = NULL; + ir_node *op2 = NULL; if (is_op_commutative(op)) { if (a_left == b_left) { @@ -2868,6 +2982,7 @@ static ir_node *transform_bitwise_distributive(ir_node *n, set_irn_n(n, -1, blk); set_binop_left(n, new_n); set_binop_right(n, c); + add_identities(current_ir_graph->value_table, n); } DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_SHIFT_AND); @@ -2887,9 +3002,8 @@ static ir_node *transform_node_And(ir_node *n) { ir_node *b = get_And_right(n); ir_mode *mode; - HANDLE_BINOP_PHI(tarval_and, a,b,c); - mode = get_irn_mode(n); + HANDLE_BINOP_PHI(tarval_and, a, b, c, mode); /* we can evaluate 2 Projs of the same Cmp */ if (mode == mode_b && is_Proj(a) && is_Proj(b)) { @@ -3021,7 +3135,7 @@ static ir_node *transform_node_Eor(ir_node *n) { ir_node *b = get_Eor_right(n); ir_mode *mode = get_irn_mode(n); - HANDLE_BINOP_PHI(tarval_eor, a,b,c); + HANDLE_BINOP_PHI(tarval_eor, a, b, c, mode); /* we can evaluate 2 Projs of the same Cmp */ if (mode == mode_b && is_Proj(a) && is_Proj(b)) { @@ -3135,6 +3249,9 @@ static ir_node *transform_node_Not(ir_node *n) { * Optimize: * -(~x) = x + 1 * -(a-b) = b - a + * -(a >>u (size-1)) = a >>s (size-1) + * -(a >>s (size-1)) = a >>u (size-1) + * -(a * const) -> a * -const */ static ir_node *transform_node_Minus(ir_node *n) { ir_node *c, *oldn = n; @@ -3210,6 +3327,7 @@ static ir_node *transform_node_Minus(ir_node *n) { ir_graph *irg = current_ir_graph; ir_node *block = get_nodes_block(a); n = new_rd_Mul(dbg, irg, block, mul_l, cnst, mode); + DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_MUL_C); return n; } } @@ -3434,33 +3552,75 @@ static ir_node *transform_node_Proj_Cond(ir_node *proj) { return proj; } /* transform_node_Proj_Cond */ +/** + * Create a 0 constant of given mode. + */ +static ir_node *create_zero_const(ir_mode *mode) { + tarval *tv = get_mode_null(mode); + ir_node *cnst = new_Const(mode, tv); + + return cnst; +} + +/* the order of the values is important! */ +typedef enum const_class { + const_const = 0, + const_like = 1, + const_other = 2 +} const_class; + +static const_class classify_const(const ir_node* n) +{ + if (is_Const(n)) return const_const; + if (is_irn_constlike(n)) return const_like; + return const_other; +} + +/** + * Determines whether r is more constlike or has a larger index (in that order) + * than l. + */ +static int operands_are_normalized(const ir_node *l, const ir_node *r) +{ + const const_class l_order = classify_const(l); + const const_class r_order = classify_const(r); + return + l_order > r_order || + (l_order == r_order && get_irn_idx(l) <= get_irn_idx(r)); +} + /** * Normalizes and optimizes Cmp nodes. */ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { - ir_node *n = get_Proj_pred(proj); - ir_node *left = get_Cmp_left(n); - ir_node *right = get_Cmp_right(n); - ir_node *c = NULL; - tarval *tv = NULL; - int changed = 0; - ir_mode *mode = NULL; - long proj_nr = get_Proj_proj(proj); - - /* we can evaluate this direct */ - switch(proj_nr) { + ir_node *n = get_Proj_pred(proj); + ir_node *left = get_Cmp_left(n); + ir_node *right = get_Cmp_right(n); + tarval *tv = NULL; + int changed = 0; + ir_mode *mode = NULL; + long proj_nr = get_Proj_proj(proj); + + /* we can evaluate some cases directly */ + switch (proj_nr) { case pn_Cmp_False: return new_Const(mode_b, get_tarval_b_false()); case pn_Cmp_True: return new_Const(mode_b, get_tarval_b_true()); case pn_Cmp_Leg: - if(!mode_is_float(get_irn_mode(left))) + if (!mode_is_float(get_irn_mode(left))) return new_Const(mode_b, get_tarval_b_true()); break; default: break; } + /* remove Casts */ + if (is_Cast(left)) + left = get_Cast_op(left); + if (is_Cast(right)) + right = get_Cast_op(right); + /* Remove unnecessary conversions */ /* TODO handle constants */ if (is_Conv(left) && is_Conv(right)) { @@ -3478,126 +3638,163 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { left = op_left; right = op_right; changed |= 1; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV_CONV); } else if (smaller_mode(mode_left, mode_right)) { left = new_r_Conv(irg, block, op_left, mode_right); right = op_right; changed |= 1; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV); } else if (smaller_mode(mode_right, mode_left)) { left = op_left; right = new_r_Conv(irg, block, op_right, mode_left); changed |= 1; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV); } } } - /* TODO extend to arbitrary constants */ - if (is_Conv(left) && is_Const(right) && is_Const_null(right)) { - ir_mode* mode = get_irn_mode(left); - ir_node* op = get_Conv_op(left); - ir_mode* op_mode = get_irn_mode(op); - - if (get_mode_size_bits(mode) > get_mode_size_bits(op_mode) && - (mode_is_signed(mode) || !mode_is_signed(op_mode))) { - ir_node *null = new_Const(op_mode, get_mode_null(op_mode)); - set_Cmp_left( n, op); - set_Cmp_right(n, null); - return proj; - } - } - - /* remove Casts */ - if (is_Cast(left)) - left = get_Cast_op(left); - if (is_Cast(right)) - right = get_Cast_op(right); - /* remove operation of both sides if possible */ if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) { - ir_opcode lop = get_irn_opcode(left); - - if (lop == get_irn_opcode(right)) { - ir_node *ll, *lr, *rl, *rr; - - /* same operation on both sides, try to remove */ - switch (lop) { - case iro_Not: - case iro_Minus: - /* ~a CMP ~b => a CMP b, -a CMP -b ==> a CMP b */ - left = get_unop_op(left); - right = get_unop_op(right); - changed |= 1; - break; - case iro_Add: - ll = get_Add_left(left); - lr = get_Add_right(left); - rl = get_Add_left(right); - rr = get_Add_right(right); - - if (ll == rl) { - /* X + a CMP X + b ==> a CMP b */ - left = lr; - right = rr; - changed |= 1; - } else if (ll == rr) { - /* X + a CMP b + X ==> a CMP b */ - left = lr; - right = rl; - changed |= 1; - } else if (lr == rl) { - /* a + X CMP X + b ==> a CMP b */ - left = ll; - right = rr; - changed |= 1; - } else if (lr == rr) { - /* a + X CMP b + X ==> a CMP b */ - left = ll; - right = rl; + /* + * The following operations are NOT safe for floating point operations, for instance + * 1.0 + inf == 2.0 + inf, =/=> x == y + */ + if (mode_is_int(get_irn_mode(left))) { + unsigned lop = get_irn_opcode(left); + + if (lop == get_irn_opcode(right)) { + ir_node *ll, *lr, *rl, *rr; + + /* same operation on both sides, try to remove */ + switch (lop) { + case iro_Not: + case iro_Minus: + /* ~a CMP ~b => a CMP b, -a CMP -b ==> a CMP b */ + left = get_unop_op(left); + right = get_unop_op(right); changed |= 1; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP); + break; + case iro_Add: + ll = get_Add_left(left); + lr = get_Add_right(left); + rl = get_Add_left(right); + rr = get_Add_right(right); + + if (ll == rl) { + /* X + a CMP X + b ==> a CMP b */ + left = lr; + right = rr; + changed |= 1; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP); + } else if (ll == rr) { + /* X + a CMP b + X ==> a CMP b */ + left = lr; + right = rl; + changed |= 1; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP); + } else if (lr == rl) { + /* a + X CMP X + b ==> a CMP b */ + left = ll; + right = rr; + changed |= 1; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP); + } else if (lr == rr) { + /* a + X CMP b + X ==> a CMP b */ + left = ll; + right = rl; + changed |= 1; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP); + } + break; + case iro_Sub: + ll = get_Sub_left(left); + lr = get_Sub_right(left); + rl = get_Sub_left(right); + rr = get_Sub_right(right); + + if (ll == rl) { + /* X - a CMP X - b ==> a CMP b */ + left = lr; + right = rr; + changed |= 1; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP); + } else if (lr == rr) { + /* a - X CMP b - X ==> a CMP b */ + left = ll; + right = rl; + changed |= 1; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP); + } + break; + case iro_Rot: + if (get_Rot_right(left) == get_Rot_right(right)) { + /* a ROT X CMP b ROT X ==> a CMP b */ + left = get_Rot_left(left); + right = get_Rot_left(right); + changed |= 1; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP); + } + break; + default: + break; } - break; - case iro_Sub: - ll = get_Sub_left(left); - lr = get_Sub_right(left); - rl = get_Sub_left(right); - rr = get_Sub_right(right); - - if (ll == rl) { - /* X - a CMP X - b ==> a CMP b */ - left = lr; - right = rr; - changed |= 1; - } else if (lr == rr) { - /* a - X CMP b - X ==> a CMP b */ - left = ll; - right = rl; + } + + /* X+A == A, A+X == A, A-X == A -> X == 0 */ + if (is_Add(left) || is_Sub(left)) { + ir_node *ll = get_binop_left(left); + ir_node *lr = get_binop_right(left); + + if (lr == right && is_Add(left)) { + ir_node *tmp = ll; + ll = lr; + lr = tmp; + } + if (ll == right) { + left = lr; + right = create_zero_const(get_irn_mode(left)); changed |= 1; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP); } - break; - case iro_Rot: - if (get_Rot_right(left) == get_Rot_right(right)) { - /* a ROT X CMP b ROT X */ - left = get_Rot_left(left); - right = get_Rot_left(right); + } + if (is_Add(right) || is_Sub(right)) { + ir_node *rl = get_binop_left(right); + ir_node *rr = get_binop_right(right); + + if (rr == left && is_Add(right)) { + ir_node *tmp = rl; + rl = rr; + rr = tmp; + } + if (rl == left) { + left = rr; + right = create_zero_const(get_irn_mode(left)); changed |= 1; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP); } - break; - default: - break; } - } - } + } /* mode_is_int(...) */ + } /* proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg */ + /* replace mode_b compares with ands/ors */ if (get_irn_mode(left) == mode_b) { ir_graph *irg = current_ir_graph; ir_node *block = get_nodes_block(n); + ir_node *bres; switch (proj_nr) { - case pn_Cmp_Le: return new_r_Or( irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b); - case pn_Cmp_Lt: return new_r_And(irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b); - case pn_Cmp_Ge: return new_r_Or( irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b); - case pn_Cmp_Gt: return new_r_And(irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b); - case pn_Cmp_Lg: return new_r_Eor(irg, block, left, right, mode_b); - case pn_Cmp_Eq: return new_r_Not(irg, block, new_r_Eor(irg, block, left, right, mode_b), mode_b); + case pn_Cmp_Le: bres = new_r_Or( irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b); break; + case pn_Cmp_Lt: bres = new_r_And(irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b); break; + case pn_Cmp_Ge: bres = new_r_Or( irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b); break; + case pn_Cmp_Gt: bres = new_r_And(irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b); break; + case pn_Cmp_Lg: bres = new_r_Eor(irg, block, left, right, mode_b); break; + case pn_Cmp_Eq: bres = new_r_Not(irg, block, new_r_Eor(irg, block, left, right, mode_b), mode_b); break; + default: bres = NULL; + } + if (bres) { + DBG_OPT_ALGSIM0(n, bres, FS_OPT_CMP_TO_BOOL); + return bres; } } @@ -3608,19 +3805,8 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { * First step: normalize the compare op * by placing the constant on the right side * or moving the lower address node to the left. - * We ignore the case that both are constants - * this case should be optimized away. */ - if (is_Const(right)) { - c = right; - } else if (is_Const(left)) { - c = left; - left = right; - right = c; - - proj_nr = get_inversed_pnc(proj_nr); - changed |= 1; - } else if (get_irn_idx(left) > get_irn_idx(right)) { + if (!operands_are_normalized(left, right)) { ir_node *t = left; left = right; @@ -3636,9 +3822,28 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { * later and may help to normalize more compares. * Of course this is only possible for integer values. */ - if (c) { - mode = get_irn_mode(c); - tv = get_Const_tarval(c); + if (is_Const(right)) { + mode = get_irn_mode(right); + tv = get_Const_tarval(right); + + /* TODO extend to arbitrary constants */ + if (is_Conv(left) && tarval_is_null(tv)) { + ir_node *op = get_Conv_op(left); + ir_mode *op_mode = get_irn_mode(op); + + /* + * UpConv(x) REL 0 ==> x REL 0 + */ + if (get_mode_size_bits(mode) > get_mode_size_bits(op_mode) && + ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) || + mode_is_signed(mode) || !mode_is_signed(op_mode))) { + tv = get_mode_null(op_mode); + left = op; + mode = op_mode; + changed |= 2; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV); + } + } if (tv != tarval_bad) { /* the following optimization is possible on modes without Overflow @@ -3658,13 +3863,16 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { left = get_Minus_op(left); proj_nr = get_inversed_pnc(proj_nr); changed |= 2; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C); } } else if (is_Not(left) && (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg)) { + /* Not(a) ==/!= c ==> a ==/!= Not(c) */ tv = tarval_not(tv); if (tv != tarval_bad) { left = get_Not_op(left); changed |= 2; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C); } } @@ -3685,6 +3893,7 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { if (tv != tarval_bad) { proj_nr ^= pn_Cmp_Eq; changed |= 2; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CNST_MAGN); } } /* c < 0 : a > c ==> a >= (c+1) a <= c ==> a < (c+1) */ @@ -3695,12 +3904,14 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { if (tv != tarval_bad) { proj_nr ^= pn_Cmp_Eq; changed |= 2; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CNST_MAGN); } } /* the following reassociations work only for == and != */ if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) { +#if 0 /* Might be not that good in general */ /* a-b == 0 ==> a == b, a-b != 0 ==> a != b */ if (tarval_is_null(tv) && is_Sub(left)) { right = get_Sub_right(left); @@ -3708,7 +3919,9 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { tv = value_of(right); changed = 1; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C); } +#endif if (tv != tarval_bad) { /* a-c1 == c2 ==> a == c2+c1, a-c1 != c2 ==> a != c2+c1 */ @@ -3723,6 +3936,7 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { left = get_Sub_left(left); tv = tv2; changed |= 2; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C); } } } @@ -3748,6 +3962,7 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { left = a; tv = tv2; changed |= 2; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C); } } } @@ -3759,6 +3974,7 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { left = get_Minus_op(left); tv = tv2; changed |= 2; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C); } } } @@ -3773,28 +3989,161 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { } } /* mode_is_int */ - /* - * optimization for AND: - * Optimize: - * And(x, C) == C ==> And(x, C) != 0 - * And(x, C) != C ==> And(X, C) == 0 - * - * if C is a single Bit constant. - */ - if ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) && is_And(left)) { - if (tarval_is_single_bit(tv)) { - /* check for Constant's match. We have check hare the tarvals, - because our const might be changed */ - ir_node *la = get_And_left(left); - ir_node *ra = get_And_right(left); - if ((is_Const(la) && get_Const_tarval(la) == tv) || - (is_Const(ra) && get_Const_tarval(ra) == tv)) { - /* fine: do the transformation */ - tv = get_mode_null(get_tarval_mode(tv)); - proj_nr ^= pn_Cmp_Leg; - changed |= 2; + if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) { + switch (get_irn_opcode(left)) { + ir_node *c1; + + case iro_And: + c1 = get_And_right(left); + if (is_Const(c1)) { + /* + * And(x, C1) == C2 ==> FALSE if C2 & C1 != C2 + * And(x, C1) != C2 ==> TRUE if C2 & C1 != C2 + */ + tarval *mask = tarval_and(get_Const_tarval(c1), tv); + if (mask != tv) { + /* TODO: move to constant evaluation */ + tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true(); + c1 = new_Const(mode_b, tv); + DBG_OPT_CSTEVAL(proj, c1); + return c1; + } + + if (tarval_is_single_bit(tv)) { + /* + * optimization for AND: + * Optimize: + * And(x, C) == C ==> And(x, C) != 0 + * And(x, C) != C ==> And(X, C) == 0 + * + * if C is a single Bit constant. + */ + + /* check for Constant's match. We have check hare the tarvals, + because our const might be changed */ + if (get_Const_tarval(c1) == tv) { + /* fine: do the transformation */ + tv = get_mode_null(get_tarval_mode(tv)); + proj_nr ^= pn_Cmp_Leg; + changed |= 2; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CNST_MAGN); + } + } } - } + break; + case iro_Or: + c1 = get_Or_right(left); + if (is_Const(c1) && tarval_is_null(tv)) { + /* + * Or(x, C) == 0 && C != 0 ==> FALSE + * Or(x, C) != 0 && C != 0 ==> TRUE + */ + if (! tarval_is_null(get_Const_tarval(c1))) { + /* TODO: move to constant evaluation */ + tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true(); + c1 = new_Const(mode_b, tv); + DBG_OPT_CSTEVAL(proj, c1); + return c1; + } + } + break; + case iro_Shl: + /* + * optimize x << c1 == c into x & (-1 >>u c1) == c >> c1 if c & (-1 << c1) == c + * FALSE else + * optimize x << c1 != c into x & (-1 >>u c1) != c >> c1 if c & (-1 << c1) == c + * TRUE else + */ + c1 = get_Shl_right(left); + if (is_Const(c1)) { + tarval *tv1 = get_Const_tarval(c1); + ir_mode *mode = get_irn_mode(left); + tarval *minus1 = get_mode_all_one(mode); + tarval *amask = tarval_shr(minus1, tv1); + tarval *cmask = tarval_shl(minus1, tv1); + ir_node *sl, *blk; + + if (tarval_and(tv, cmask) != tv) { + /* condition not met */ + tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true(); + c1 = new_Const(mode_b, tv); + DBG_OPT_CSTEVAL(proj, c1); + return c1; + } + sl = get_Shl_left(left); + blk = get_nodes_block(n); + left = new_rd_And(get_irn_dbg_info(left), current_ir_graph, blk, sl, new_Const(mode, amask), mode); + tv = tarval_shr(tv, tv1); + changed |= 2; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND); + } + break; + case iro_Shr: + /* + * optimize x >>u c1 == c into x & (-1 << c1) == c << c1 if c & (-1 >>u c1) == c + * FALSE else + * optimize x >>u c1 != c into x & (-1 << c1) != c << c1 if c & (-1 >>u c1) == c + * TRUE else + */ + c1 = get_Shr_right(left); + if (is_Const(c1)) { + tarval *tv1 = get_Const_tarval(c1); + ir_mode *mode = get_irn_mode(left); + tarval *minus1 = get_mode_all_one(mode); + tarval *amask = tarval_shl(minus1, tv1); + tarval *cmask = tarval_shr(minus1, tv1); + ir_node *sl, *blk; + + if (tarval_and(tv, cmask) != tv) { + /* condition not met */ + tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true(); + c1 = new_Const(mode_b, tv); + DBG_OPT_CSTEVAL(proj, c1); + return c1; + } + sl = get_Shr_left(left); + blk = get_nodes_block(n); + left = new_rd_And(get_irn_dbg_info(left), current_ir_graph, blk, sl, new_Const(mode, amask), mode); + tv = tarval_shl(tv, tv1); + changed |= 2; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND); + } + break; + case iro_Shrs: + /* + * optimize x >>s c1 == c into x & (-1 << c1) == c << c1 if (c >>s (BITS - c1)) \in {0,-1} + * FALSE else + * optimize x >>s c1 != c into x & (-1 << c1) != c << c1 if (c >>s (BITS - c1)) \in {0,-1} + * TRUE else + */ + c1 = get_Shrs_right(left); + if (is_Const(c1)) { + tarval *tv1 = get_Const_tarval(c1); + ir_mode *mode = get_irn_mode(left); + tarval *minus1 = get_mode_all_one(mode); + tarval *amask = tarval_shl(minus1, tv1); + tarval *cond = new_tarval_from_long(get_mode_size_bits(mode), get_tarval_mode(tv1)); + ir_node *sl, *blk; + + cond = tarval_sub(cond, tv1); + cond = tarval_shrs(tv, cond); + + if (!tarval_is_all_one(cond) && !tarval_is_null(cond)) { + /* condition not met */ + tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true(); + c1 = new_Const(mode_b, tv); + DBG_OPT_CSTEVAL(proj, c1); + return c1; + } + sl = get_Shrs_left(left); + blk = get_nodes_block(n); + left = new_rd_And(get_irn_dbg_info(left), current_ir_graph, blk, sl, new_Const(mode, amask), mode); + tv = tarval_shl(tv, tv1); + changed |= 2; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND); + } + break; + } /* switch */ } } /* tarval != bad */ } @@ -3821,6 +4170,7 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { tv = tarval_sub(tv, get_mode_one(mode)); left = new_rd_And(get_irn_dbg_info(op), current_ir_graph, blk, v, new_Const(mode, tv), mode); changed |= 1; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_MOD_TO_AND); } } } @@ -3831,9 +4181,7 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { /* create a new compare */ n = new_rd_Cmp(get_irn_dbg_info(n), current_ir_graph, block, left, right); - - set_Proj_pred(proj, n); - set_Proj_proj(proj, proj_nr); + proj = new_rd_Proj(get_irn_dbg_info(proj), current_ir_graph, block, n, get_irn_mode(proj), proj_nr); } return proj; @@ -4147,12 +4495,13 @@ static ir_node *transform_node_Or(ir_node *n) { ir_node *c, *oldn = n; ir_node *a = get_Or_left(n); ir_node *b = get_Or_right(n); + ir_mode *mode; if (is_Not(a) && is_Not(b)) { /* ~a | ~b = ~(a&b) */ ir_node *block = get_nodes_block(n); - ir_mode *mode = get_irn_mode(n); + mode = get_irn_mode(n); a = get_Not_op(a); b = get_Not_op(b); n = new_rd_And(get_irn_dbg_info(n), current_ir_graph, block, a, b, mode); @@ -4178,7 +4527,8 @@ static ir_node *transform_node_Or(ir_node *n) { } } - HANDLE_BINOP_PHI(tarval_or, a,b,c); + mode = get_irn_mode(n); + HANDLE_BINOP_PHI(tarval_or, a, b, c, mode); n = transform_node_Or_bf_store(n); n = transform_node_Or_Rot(n); @@ -4195,7 +4545,7 @@ static ir_node *transform_node_Or(ir_node *n) { static ir_node *transform_node(ir_node *n); /** - * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl. + * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl, Rot. * * Should be moved to reassociation? */ @@ -4257,10 +4607,11 @@ static ir_node *transform_node_shift(ir_node *n) { */ static ir_node *transform_node_Shr(ir_node *n) { ir_node *c, *oldn = n; - ir_node *a = get_Shr_left(n); - ir_node *b = get_Shr_right(n); + ir_node *a = get_Shr_left(n); + ir_node *b = get_Shr_right(n); + ir_mode *mode = get_irn_mode(n); - HANDLE_BINOP_PHI(tarval_shr, a, b, c); + HANDLE_BINOP_PHI(tarval_shr, a, b, c, mode); return transform_node_shift(n); } /* transform_node_Shr */ @@ -4269,10 +4620,11 @@ static ir_node *transform_node_Shr(ir_node *n) { */ static ir_node *transform_node_Shrs(ir_node *n) { ir_node *c, *oldn = n; - ir_node *a = get_Shrs_left(n); - ir_node *b = get_Shrs_right(n); + ir_node *a = get_Shrs_left(n); + ir_node *b = get_Shrs_right(n); + ir_mode *mode = get_irn_mode(n); - HANDLE_BINOP_PHI(tarval_shrs, a, b, c); + HANDLE_BINOP_PHI(tarval_shrs, a, b, c, mode); return transform_node_shift(n); } /* transform_node_Shrs */ @@ -4281,13 +4633,50 @@ static ir_node *transform_node_Shrs(ir_node *n) { */ static ir_node *transform_node_Shl(ir_node *n) { ir_node *c, *oldn = n; - ir_node *a = get_Shl_left(n); - ir_node *b = get_Shl_right(n); + ir_node *a = get_Shl_left(n); + ir_node *b = get_Shl_right(n); + ir_mode *mode = get_irn_mode(n); - HANDLE_BINOP_PHI(tarval_shl, a, b, c); + HANDLE_BINOP_PHI(tarval_shl, a, b, c, mode); return transform_node_shift(n); } /* transform_node_Shl */ +/** + * Transform a Rot. + */ +static ir_node *transform_node_Rot(ir_node *n) { + ir_node *c, *oldn = n; + ir_node *a = get_Rot_left(n); + ir_node *b = get_Rot_right(n); + ir_mode *mode = get_irn_mode(n); + + HANDLE_BINOP_PHI(tarval_rot, a, b, c, mode); + return transform_node_shift(n); +} /* transform_node_Rot */ + +/** + * Transform a Conv. + */ +static ir_node *transform_node_Conv(ir_node *n) { + ir_node *c, *oldn = n; + ir_node *a = get_Conv_op(n); + + if (is_const_Phi(a)) { + c = apply_conv_on_phi(a, get_irn_mode(n)); + if (c) { + DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI); + return c; + } + } + + if (is_Unknown(a)) { /* Conv_A(Unknown_B) -> Unknown_A */ + ir_mode *mode = get_irn_mode(n); + return new_r_Unknown(current_ir_graph, mode); + } + + return n; +} /* transform_node_Conv */ + /** * Remove dead blocks and nodes in dead blocks * in keep alive list. We do not generate a new End node. @@ -4424,53 +4813,6 @@ static ir_node *transform_node_Mux(ir_node *n) { return n; } } - - if (mode_is_int(mode) && mode_is_signed(mode) && - get_mode_arithmetic(mode) == irma_twos_complement) { - ir_node *x = get_Cmp_left(cmp); - - /* the following optimization works only with signed integer two-complement mode */ - - if (mode == get_irn_mode(x)) { - /* - * FIXME: this restriction is two rigid, as it would still - * work if mode(x) = Hs and mode == Is, but at least it removes - * all wrong cases. - */ - if ((pn == pn_Cmp_Lt || pn == pn_Cmp_Le) && - is_Const(t) && is_Const_all_one(t) && - is_Const(f) && is_Const_null(f)) { - /* - * Mux(x:T Shrs(x, sizeof_bits(T) - 1) - * Conditions: - * T must be signed. - */ - n = new_rd_Shrs(get_irn_dbg_info(n), - current_ir_graph, block, x, - new_r_Const_long(current_ir_graph, block, mode_Iu, - get_mode_size_bits(mode) - 1), - mode); - DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_SHR); - return n; - } else if ((pn == pn_Cmp_Gt || pn == pn_Cmp_Ge) && - is_Const(t) && is_Const_one(t) && - is_Const(f) && is_Const_null(f)) { - /* - * Mux(x:T >/>= 0, 0, 1) -> Shr(-x, sizeof_bits(T) - 1) - * Conditions: - * T must be signed. - */ - n = new_rd_Shr(get_irn_dbg_info(n), - current_ir_graph, block, - new_r_Minus(current_ir_graph, block, x, mode), - new_r_Const_long(current_ir_graph, block, mode_Iu, - get_mode_size_bits(mode) - 1), - mode); - DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_SHR); - return n; - } - } - } } } } @@ -4487,6 +4829,42 @@ static ir_node *transform_node_Psi(ir_node *n) { return n; } /* transform_node_Psi */ +/** + * optimize sync nodes that have other syncs as input we simply add the inputs + * of the other sync to our own inputs + */ +static ir_node *transform_node_Sync(ir_node *n) { + int i, arity; + + arity = get_irn_arity(n); + for(i = 0; i < get_irn_arity(n); /*empty*/) { + int i2, arity2; + ir_node *in = get_irn_n(n, i); + if(!is_Sync(in)) { + ++i; + continue; + } + + /* set sync input 0 instead of the sync */ + set_irn_n(n, i, get_irn_n(in, 0)); + /* so we check this input again for syncs */ + + /* append all other inputs of the sync to our sync */ + arity2 = get_irn_arity(in); + for(i2 = 1; i2 < arity2; ++i2) { + ir_node *in_in = get_irn_n(in, i2); + add_irn_n(n, in_in); + /* increase arity so we also check the new inputs for syncs */ + arity++; + } + } + + /* rehash the sync node */ + add_identities(current_ir_graph->value_table, n); + + return n; +} + /** * Tries several [inplace] [optimizing] transformations and returns an * equivalent node. The difference to equivalent_node() is that these @@ -4548,9 +4926,12 @@ static ir_op_ops *firm_set_default_transform_node(ir_opcode code, ir_op_ops *ops CASE(Shr); CASE(Shrs); CASE(Shl); + CASE(Rot); + CASE(Conv); CASE(End); CASE(Mux); CASE(Psi); + CASE(Sync); default: /* leave NULL */; } @@ -4843,7 +5224,7 @@ void del_identities(pset *value_table) { } /* del_identities */ /** - * Normalize a node by putting constants (and operands with smaller + * Normalize a node by putting constants (and operands with larger * node index) on the right * * @param n The node to normalize @@ -4853,17 +5234,12 @@ static void normalize_node(ir_node *n) { if (is_op_commutative(get_irn_op(n))) { ir_node *l = get_binop_left(n); ir_node *r = get_binop_right(n); - int l_idx = get_irn_idx(l); - int r_idx = get_irn_idx(r); /* For commutative operators perform a OP b == b OP a but keep - constants on the RIGHT side. This helps greatly in some optimizations. - Moreover we use the idx number to make the form deterministic. */ - if (is_irn_constlike(l)) - l_idx = -l_idx; - if (is_irn_constlike(r)) - r_idx = -r_idx; - if (l_idx < r_idx) { + * constants on the RIGHT side. This helps greatly in some + * optimizations. Moreover we use the idx number to make the form + * deterministic. */ + if (!operands_are_normalized(l, r)) { set_binop_left(n, r); set_binop_right(n, l); }