X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firopt.c;h=2112fe04cf2d84277ab00d36ae9efca14b93a0aa;hb=6b124543aff56817fcfe6d5b5ff181ac5c790e73;hp=89d4cc5a17ab2da849fae3261565f16949ba546a;hpb=bc35052031be38b7243401b37fe1d791728c7df2;p=libfirm diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 89d4cc5a1..2112fe04c 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -47,6 +47,8 @@ #include "irtools.h" #include "irhooks.h" #include "array_t.h" +#include "vrp.h" +#include "firm_types.h" /* Make types visible to allow most efficient access */ #include "entity_t.h" @@ -452,106 +454,93 @@ static tarval *computed_value_Confirm(const ir_node *n) { * There are several case where we can evaluate a Cmp node, see later. */ static tarval *computed_value_Proj_Cmp(const ir_node *n) { - ir_node *a = get_Proj_pred(n); - ir_node *aa = get_Cmp_left(a); - ir_node *ab = get_Cmp_right(a); - long proj_nr = get_Proj_proj(n); + ir_node *cmp = get_Proj_pred(n); + ir_node *left = get_Cmp_left(cmp); + ir_node *right = get_Cmp_right(cmp); + long pn_cmp = get_Proj_proj(n); + ir_mode *mode = get_irn_mode(left); + tarval *tv_l, *tv_r; /* * BEWARE: a == a is NOT always True for floating Point values, as * NaN != NaN is defined, so we must check this here. */ - if (aa == ab && ( - !mode_is_float(get_irn_mode(aa)) || proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Gt) - ) { /* 1.: */ - + if (left == right && (!mode_is_float(mode) || pn_cmp == pn_Cmp_Lt || pn_cmp == pn_Cmp_Gt)) { /* This is a trick with the bits used for encoding the Cmp Proj numbers, the following statement is not the same: - return new_tarval_from_long (proj_nr == pn_Cmp_Eq, mode_b) */ - return new_tarval_from_long (proj_nr & pn_Cmp_Eq, mode_b); + return new_tarval_from_long(pn_cmp == pn_Cmp_Eq, mode_b) */ + return new_tarval_from_long(pn_cmp & pn_Cmp_Eq, mode_b); } - else { - tarval *taa = value_of(aa); - tarval *tab = value_of(ab); - ir_mode *mode = get_irn_mode(aa); + tv_l = value_of(left); + tv_r = value_of(right); + if ((tv_l != tarval_bad) && (tv_r != tarval_bad)) { /* * The predecessors of Cmp are target values. We can evaluate * the Cmp. */ - if ((taa != tarval_bad) && (tab != tarval_bad)) { - /* strange checks... */ - pn_Cmp flags = tarval_cmp(taa, tab); - if (flags != pn_Cmp_False) { - return new_tarval_from_long (proj_nr & flags, mode_b); - } + pn_Cmp flags = tarval_cmp(tv_l, tv_r); + if (flags != pn_Cmp_False) { + return new_tarval_from_long (pn_cmp & flags, mode_b); } + } else if (mode_is_int(mode)) { /* for integer values, we can check against MIN/MAX */ - else if (mode_is_int(mode)) { + pn_Cmp cmp_result; + + if (tv_l == get_mode_min(mode)) { /* MIN <=/> x. This results in true/false. */ - if (taa == get_mode_min(mode)) { - /* a compare with the MIN value */ - if (proj_nr == pn_Cmp_Le) - return get_tarval_b_true(); - else if (proj_nr == pn_Cmp_Gt) - return get_tarval_b_false(); - } + if (pn_cmp == pn_Cmp_Le) + return tarval_b_true; + else if (pn_cmp == pn_Cmp_Gt) + return tarval_b_false; + } else if (tv_r == get_mode_min(mode)) { /* x >=/< MIN. This results in true/false. */ - else - if (tab == get_mode_min(mode)) { - /* a compare with the MIN value */ - if (proj_nr == pn_Cmp_Ge) - return get_tarval_b_true(); - else if (proj_nr == pn_Cmp_Lt) - return get_tarval_b_false(); - } - /* MAX >=/< x. This results in true/false. */ - else if (taa == get_mode_max(mode)) { - if (proj_nr == pn_Cmp_Ge) - return get_tarval_b_true(); - else if (proj_nr == pn_Cmp_Lt) - return get_tarval_b_false(); - } - /* x <=/> MAX. This results in true/false. */ - else if (tab == get_mode_max(mode)) { - if (proj_nr == pn_Cmp_Le) - return get_tarval_b_true(); - else if (proj_nr == pn_Cmp_Gt) - return get_tarval_b_false(); + if (pn_cmp == pn_Cmp_Ge) + return tarval_b_true; + else if (pn_cmp == pn_Cmp_Lt) + return tarval_b_false; + } else if (tv_l == get_mode_max(mode)) { + /* MAX >=/< x. This results in true/false. */ + if (pn_cmp == pn_Cmp_Ge) + return tarval_b_true; + else if (pn_cmp == pn_Cmp_Lt) + return tarval_b_false; + } else if (tv_r == get_mode_max(mode)) { + /* x <=/> MAX. This results in true/false. */ + if (pn_cmp == pn_Cmp_Le) + return tarval_b_true; + else if (pn_cmp == pn_Cmp_Gt) + return tarval_b_false; + } + + cmp_result = vrp_cmp(left, right); + if (cmp_result != pn_Cmp_False) { + if (cmp_result == pn_Cmp_Lg) { + if (pn_cmp == pn_Cmp_Eq) { + return tarval_b_false; + } else if (pn_cmp == pn_Cmp_Lg) { + return tarval_b_true; } + } else { + return new_tarval_from_long(cmp_result & pn_cmp, mode_b); + } } - /* - * The predecessors are Allocs or (void*)(0) constants. Allocs never - * return NULL, they raise an exception. Therefore we can predict - * the Cmp result. - */ - else { - ir_node *aaa = skip_Proj(aa); - ir_node *aba = skip_Proj(ab); - - if ( ( (/* aa is ProjP and aaa is Alloc */ - is_Proj(aa) - && mode_is_reference(get_irn_mode(aa)) - && is_Alloc(aaa)) - && ( (/* ab is NULL */ - mode_is_reference(get_irn_mode(ab)) - && tarval_is_null(tab)) - || (/* ab is other Alloc */ - is_Proj(ab) - && mode_is_reference(get_irn_mode(ab)) - && is_Alloc(aba) - && (aaa != aba)))) - || (/* aa is NULL and aba is Alloc */ - mode_is_reference(get_irn_mode(aa)) - && tarval_is_null(taa) - && is_Proj(ab) - && mode_is_reference(get_irn_mode(ab)) - && is_Alloc(aba))) - /* 3.: */ - return new_tarval_from_long(proj_nr & pn_Cmp_Lg, mode_b); - } - } - return computed_value_Cmp_Confirm(a, aa, ab, proj_nr); + } else if (mode_is_reference(mode)) { + /* pointer compare */ + ir_node *s_l = skip_Proj(left); + ir_node *s_r = skip_Proj(right); + + if ((is_Alloc(s_l) && tarval_is_null(tv_r)) || + (tarval_is_null(tv_l) && is_Alloc(s_r))) { + /* + * The predecessors are Allocs and (void*)(0) constants. In Firm Allocs never + * return NULL, they raise an exception. Therefore we can predict + * the Cmp result. + */ + return new_tarval_from_long(pn_cmp & pn_Cmp_Lg, mode_b); + } + } + return computed_value_Cmp_Confirm(cmp, left, right, pn_cmp); } /* computed_value_Proj_Cmp */ /** @@ -675,6 +664,10 @@ static tarval *computed_value_Proj(const ir_node *proj) { * @param n The node this should be evaluated */ tarval *computed_value(const ir_node *n) { + if(mode_is_int(get_irn_mode(n)) && n->vrp.valid && tarval_is_all_one( + tarval_or(n->vrp.bits_set, n->vrp.bits_not_set))) { + return n->vrp.bits_set; + } if (n->op->ops.computed_value) return n->op->ops.computed_value(n); return tarval_bad; @@ -1955,11 +1948,18 @@ static int is_const_Phi(ir_node *n) { typedef tarval *(*tarval_sub_type)(tarval *a, tarval *b, ir_mode *mode); typedef tarval *(*tarval_binop_type)(tarval *a, tarval *b); +/** + * in reality eval_func should be tarval (*eval_func)() but incomplete + * declarations are bad style and generate noisy warnings + */ +typedef void (*eval_func)(void); + /** * Wrapper for the tarval binop evaluation, tarval_sub has one more parameter. */ -static tarval *do_eval(tarval *(*eval)(), tarval *a, tarval *b, ir_mode *mode) { - if (eval == tarval_sub) { +static tarval *do_eval(eval_func eval, tarval *a, tarval *b, ir_mode *mode) +{ + if (eval == (eval_func) tarval_sub) { tarval_sub_type func = (tarval_sub_type)eval; return func(a, b, mode); @@ -1981,7 +1981,7 @@ static tarval *do_eval(tarval *(*eval)(), tarval *a, tarval *b, ir_mode *mode) { * * @return a new Phi node if the conversion was successful, NULL else */ -static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, tarval *(*eval)(), ir_mode *mode, int left) { +static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, eval_func eval, ir_mode *mode, int left) { tarval *tv; void **res; ir_node *pred; @@ -2032,7 +2032,7 @@ static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, tarval *(*eval)( * * @return a new Phi node if the conversion was successful, NULL else */ -static ir_node *apply_binop_on_2_phis(ir_node *a, ir_node *b, tarval *(*eval)(), ir_mode *mode) { +static ir_node *apply_binop_on_2_phis(ir_node *a, ir_node *b, eval_func eval, ir_mode *mode) { tarval *tv_l, *tv_r, *tv; void **res; ir_node *pred; @@ -2273,7 +2273,7 @@ static ir_node *transform_node_Add(ir_node *n) { } } - HANDLE_BINOP_PHI(tarval_add, a, b, c, mode); + HANDLE_BINOP_PHI((eval_func) tarval_add, a, b, c, mode); /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */ if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic)) @@ -2345,6 +2345,18 @@ static ir_node *transform_node_Add(ir_node *n) { } } } + if (mode_is_int(mode) && a->vrp.valid && b->vrp.valid) { + tarval *c = tarval_and( + tarval_not(a->vrp.bits_not_set), + tarval_not(b->vrp.bits_not_set) + ); + + if(tarval_is_null(c)) { + dbg_info *dbgi = get_irn_dbg_info(n); + return new_rd_Or(dbgi, get_nodes_block(n), + a, b, mode); + } + } return n; } /* transform_node_Add */ @@ -2405,7 +2417,7 @@ static ir_node *transform_node_Sub(ir_node *n) { } restart: - HANDLE_BINOP_PHI(tarval_sub, a, b, c, mode); + HANDLE_BINOP_PHI((eval_func) tarval_sub, a, b, c, mode); /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */ if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic)) @@ -2722,7 +2734,7 @@ static ir_node *transform_node_Mul(ir_node *n) { if (mode != get_irn_mode(a)) return transform_node_Mul2n(n, mode); - HANDLE_BINOP_PHI(tarval_mul, a, b, c, mode); + HANDLE_BINOP_PHI((eval_func) tarval_mul, a, b, c, mode); if (mode_is_signed(mode)) { ir_node *r = NULL; @@ -2834,7 +2846,7 @@ static ir_node *transform_node_Div(ir_node *n) { if (is_Const(b) && is_const_Phi(a)) { /* check for Div(Phi, Const) */ - value = apply_binop_on_phi(a, get_Const_tarval(b), tarval_div, mode, 0); + value = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_div, mode, 0); if (value) { DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); goto make_tuple; @@ -2842,7 +2854,7 @@ static ir_node *transform_node_Div(ir_node *n) { } else if (is_Const(a) && is_const_Phi(b)) { /* check for Div(Const, Phi) */ - value = apply_binop_on_phi(b, get_Const_tarval(a), tarval_div, mode, 1); + value = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_div, mode, 1); if (value) { DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); goto make_tuple; @@ -2850,7 +2862,7 @@ static ir_node *transform_node_Div(ir_node *n) { } else if (is_const_Phi(a) && is_const_Phi(b)) { /* check for Div(Phi, Phi) */ - value = apply_binop_on_2_phis(a, b, tarval_div, mode); + value = apply_binop_on_2_phis(a, b, (eval_func) tarval_div, mode); if (value) { DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); goto make_tuple; @@ -2910,7 +2922,7 @@ static ir_node *transform_node_Mod(ir_node *n) { if (is_Const(b) && is_const_Phi(a)) { /* check for Div(Phi, Const) */ - value = apply_binop_on_phi(a, get_Const_tarval(b), tarval_mod, mode, 0); + value = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_mod, mode, 0); if (value) { DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); goto make_tuple; @@ -2918,7 +2930,7 @@ static ir_node *transform_node_Mod(ir_node *n) { } else if (is_Const(a) && is_const_Phi(b)) { /* check for Div(Const, Phi) */ - value = apply_binop_on_phi(b, get_Const_tarval(a), tarval_mod, mode, 1); + value = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_mod, mode, 1); if (value) { DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); goto make_tuple; @@ -2926,7 +2938,7 @@ static ir_node *transform_node_Mod(ir_node *n) { } else if (is_const_Phi(a) && is_const_Phi(b)) { /* check for Div(Phi, Phi) */ - value = apply_binop_on_2_phis(a, b, tarval_mod, mode); + value = apply_binop_on_2_phis(a, b, (eval_func) tarval_mod, mode); if (value) { DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI); goto make_tuple; @@ -2999,8 +3011,8 @@ static ir_node *transform_node_DivMod(ir_node *n) { if (is_Const(b) && is_const_Phi(a)) { /* check for Div(Phi, Const) */ - va = apply_binop_on_phi(a, get_Const_tarval(b), tarval_div, mode, 0); - vb = apply_binop_on_phi(a, get_Const_tarval(b), tarval_mod, mode, 0); + va = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_div, mode, 0); + vb = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_mod, mode, 0); if (va && vb) { DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI); DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI); @@ -3009,8 +3021,8 @@ static ir_node *transform_node_DivMod(ir_node *n) { } else if (is_Const(a) && is_const_Phi(b)) { /* check for Div(Const, Phi) */ - va = apply_binop_on_phi(b, get_Const_tarval(a), tarval_div, mode, 1); - vb = apply_binop_on_phi(b, get_Const_tarval(a), tarval_mod, mode, 1); + va = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_div, mode, 1); + vb = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_mod, mode, 1); if (va && vb) { DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI); DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI); @@ -3019,8 +3031,8 @@ static ir_node *transform_node_DivMod(ir_node *n) { } else if (is_const_Phi(a) && is_const_Phi(b)) { /* check for Div(Phi, Phi) */ - va = apply_binop_on_2_phis(a, b, tarval_div, mode); - vb = apply_binop_on_2_phis(a, b, tarval_mod, mode); + va = apply_binop_on_2_phis(a, b, (eval_func) tarval_div, mode); + vb = apply_binop_on_2_phis(a, b, (eval_func) tarval_mod, mode); if (va && vb) { DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI); DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI); @@ -3376,7 +3388,7 @@ static ir_node *transform_node_And(ir_node *n) { ir_mode *mode; mode = get_irn_mode(n); - HANDLE_BINOP_PHI(tarval_and, a, b, c, mode); + HANDLE_BINOP_PHI((eval_func) tarval_and, a, b, c, mode); /* we can evaluate 2 Projs of the same Cmp */ if (mode == mode_b && is_Proj(a) && is_Proj(b)) { @@ -3494,6 +3506,20 @@ static ir_node *transform_node_And(ir_node *n) { return n; } + if (is_Const(a) && b->vrp.valid && (tarval_is_all_one(tarval_or(get_Const_tarval(a), + b->vrp.bits_not_set)))) { + return new_rd_Id(get_irn_dbg_info(n), get_nodes_block(n), + b, get_irn_mode(n)); + + } + + if (is_Const(b) && a->vrp.valid && (tarval_is_all_one(tarval_or(get_Const_tarval(b), + a->vrp.bits_not_set)))) { + return new_rd_Id(get_irn_dbg_info(n), get_nodes_block(n), + a, get_irn_mode(n)); + + } + n = transform_bitwise_distributive(n, transform_node_And); return n; @@ -3508,7 +3534,7 @@ static ir_node *transform_node_Eor(ir_node *n) { ir_node *b = get_Eor_right(n); ir_mode *mode = get_irn_mode(n); - HANDLE_BINOP_PHI(tarval_eor, a, b, c, mode); + HANDLE_BINOP_PHI((eval_func) tarval_eor, a, b, c, mode); /* we can evaluate 2 Projs of the same Cmp */ if (mode == mode_b && is_Proj(a) && is_Proj(b)) { @@ -3531,15 +3557,6 @@ static ir_node *transform_node_Eor(ir_node *n) { n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph, get_mode_null(mode)); DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_A_A); - } else if (mode == mode_b && - is_Proj(a) && - is_Const(b) && is_Const_one(b) && - is_Cmp(get_Proj_pred(a))) { - /* The Eor negates a Cmp. The Cmp has the negated result anyways! */ - n = new_r_Proj(get_nodes_block(n), get_Proj_pred(a), - mode_b, get_negated_pnc(get_Proj_proj(a), mode)); - - DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT_BOOL); } else if (is_Const(b)) { if (is_Not(a)) { /* ~x ^ const -> x ^ ~const */ ir_node *cnst = new_Const(tarval_not(get_Const_tarval(b))); @@ -3978,6 +3995,50 @@ static ir_node *transform_node_Proj_Cond(ir_node *proj) { /* this case will NEVER be taken, kill it */ return get_irg_bad(current_ir_graph); } + } + } else { + long num = get_Proj_proj(proj); + if (num != get_Cond_default_proj(n) && b->vrp.valid) { + /* Try handling with vrp data. We only remove dead parts. */ + tarval *tp = new_tarval_from_long(num, get_irn_mode(b)); + + if (b->vrp.range_type == VRP_RANGE) { + pn_Cmp cmp_result = tarval_cmp(b->vrp.range_bottom, tp); + pn_Cmp cmp_result2 = tarval_cmp(b->vrp.range_top, tp); + + if ((cmp_result & pn_Cmp_Lt) == cmp_result && (cmp_result2 + & pn_Cmp_Gt) == cmp_result2) { + return get_irg_bad(current_ir_graph); + } + } else if (b->vrp.range_type == VRP_ANTIRANGE) { + pn_Cmp cmp_result = tarval_cmp(b->vrp.range_bottom, tp); + pn_Cmp cmp_result2 = tarval_cmp(b->vrp.range_top, tp); + + if ((cmp_result & pn_Cmp_Ge) == cmp_result && (cmp_result2 + & pn_Cmp_Le) == cmp_result2) { + return get_irg_bad(current_ir_graph); + } + } + + if (!(tarval_cmp( + tarval_and( b->vrp.bits_set, tp), + b->vrp.bits_set + ) == pn_Cmp_Eq)) { + + return get_irg_bad(current_ir_graph); + } + + if (!(tarval_cmp( + tarval_and( + tarval_not(tp), + b->vrp.bits_not_set), + b->vrp.bits_not_set) + == pn_Cmp_Eq)) { + + return get_irg_bad(current_ir_graph); + } + + } } } @@ -4284,10 +4345,13 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { /* * UpConv(x) REL 0 ==> x REL 0 + * Don't do this for float values as it's unclear whether it is a + * win. (on the other side it makes detection/creation of fabs hard) */ if (get_mode_size_bits(mode) > get_mode_size_bits(op_mode) && ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) || - mode_is_signed(mode) || !mode_is_signed(op_mode))) { + mode_is_signed(mode) || !mode_is_signed(op_mode)) && + !mode_is_float(mode)) { tv = get_mode_null(op_mode); left = op; mode = op_mode; @@ -4818,7 +4882,7 @@ static ir_node *transform_node_Or_bf_store(ir_node *or) { ir_node *new_and, *new_const, *block; ir_mode *mode = get_irn_mode(or); - tarval *tv1, *tv2, *tv3, *tv4, *tv, *n_tv4, *n_tv2; + tarval *tv1, *tv2, *tv3, *tv4, *tv; while (1) { get_comm_Binop_Ops(or, &and, &c1); @@ -4867,14 +4931,12 @@ static ir_node *transform_node_Or_bf_store(ir_node *or) { return or; } - n_tv4 = tarval_not(tv4); - if (tv3 != tarval_and(tv3, n_tv4)) { + if (tv3 != tarval_andnot(tv3, tv4)) { /* bit in the or_mask is outside the and_mask */ return or; } - n_tv2 = tarval_not(tv2); - if (tv1 != tarval_and(tv1, n_tv2)) { + if (tv1 != tarval_andnot(tv1, tv2)) { /* bit in the or_mask is outside the and_mask */ return or; } @@ -5020,7 +5082,7 @@ static ir_node *transform_node_Or(ir_node *n) { } mode = get_irn_mode(n); - HANDLE_BINOP_PHI(tarval_or, a, b, c, mode); + HANDLE_BINOP_PHI((eval_func) tarval_or, a, b, c, mode); n = transform_node_Or_bf_store(n); n = transform_node_Or_Rotl(n); @@ -5297,7 +5359,7 @@ static ir_node *transform_node_Shr(ir_node *n) { ir_node *right = get_Shr_right(n); ir_mode *mode = get_irn_mode(n); - HANDLE_BINOP_PHI(tarval_shr, left, right, c, mode); + HANDLE_BINOP_PHI((eval_func) tarval_shr, left, right, c, mode); n = transform_node_shift(n); if (is_Shr(n)) @@ -5317,7 +5379,7 @@ static ir_node *transform_node_Shrs(ir_node *n) { ir_node *b = get_Shrs_right(n); ir_mode *mode = get_irn_mode(n); - HANDLE_BINOP_PHI(tarval_shrs, a, b, c, mode); + HANDLE_BINOP_PHI((eval_func) tarval_shrs, a, b, c, mode); n = transform_node_shift(n); if (is_Shrs(n)) @@ -5335,7 +5397,7 @@ static ir_node *transform_node_Shl(ir_node *n) { ir_node *b = get_Shl_right(n); ir_mode *mode = get_irn_mode(n); - HANDLE_BINOP_PHI(tarval_shl, a, b, c, mode); + HANDLE_BINOP_PHI((eval_func) tarval_shl, a, b, c, mode); n = transform_node_shift(n); if (is_Shl(n)) @@ -5355,7 +5417,7 @@ static ir_node *transform_node_Rotl(ir_node *n) { ir_node *b = get_Rotl_right(n); ir_mode *mode = get_irn_mode(n); - HANDLE_BINOP_PHI(tarval_rotl, a, b, c, mode); + HANDLE_BINOP_PHI((eval_func) tarval_rotl, a, b, c, mode); n = transform_node_shift(n); if (is_Rotl(n)) @@ -5446,12 +5508,12 @@ static ir_node *transform_node_End(ir_node *n) { return n; } /* transform_node_End */ -/** returns 1 if a == -b */ -static int is_negated_value(ir_node *a, ir_node *b) { +bool is_negated_value(ir_node *a, ir_node *b) +{ if (is_Minus(a) && get_Minus_op(a) == b) - return 1; + return true; if (is_Minus(b) && get_Minus_op(b) == a) - return 1; + return true; if (is_Sub(a) && is_Sub(b)) { ir_node *a_left = get_Sub_left(a); ir_node *a_right = get_Sub_right(a); @@ -5459,10 +5521,10 @@ static int is_negated_value(ir_node *a, ir_node *b) { ir_node *b_right = get_Sub_right(b); if (a_left == b_right && a_right == b_left) - return 1; + return true; } - return 0; + return false; } /** @@ -5653,15 +5715,18 @@ static ir_node *transform_node_Mux(ir_node *n) { if (!mode_honor_signed_zeros(mode) && is_negated_value(f, t)) { /* f = -t */ - if ( (cmp_l == t && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt)) - || (cmp_l == f && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt))) + /* NaN's work fine with abs, so it is ok to remove Uo */ + long pnc = pn & ~pn_Cmp_Uo; + + if ( (cmp_l == t && (pnc == pn_Cmp_Ge || pnc == pn_Cmp_Gt)) + || (cmp_l == f && (pnc == pn_Cmp_Le || pnc == pn_Cmp_Lt))) { /* Mux(a >/>= 0, a, -a) = Mux(a Abs(a) */ n = new_rd_Abs(get_irn_dbg_info(n), block, cmp_l, mode); DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS); return n; - } else if ((cmp_l == t && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt)) - || (cmp_l == f && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt))) + } else if ((cmp_l == t && (pnc == pn_Cmp_Le || pnc == pn_Cmp_Lt)) + || (cmp_l == f && (pnc == pn_Cmp_Ge || pnc == pn_Cmp_Gt))) { /* Mux(a />= 0, -a, a) ==> -Abs(a) */ n = new_rd_Abs(get_irn_dbg_info(n), block, cmp_l, mode); @@ -5789,7 +5854,7 @@ static ir_node *transform_node_Call(ir_node *call) { ir_node *callee = get_Call_ptr(call); ir_node *adr, *mem, *res, *bl, **in; ir_type *ctp, *mtp, *tp; - ident *id; + type_dbg_info *tdb; dbg_info *db; int i, n_res, n_param; ir_variadicity var; @@ -5811,13 +5876,11 @@ static ir_node *transform_node_Call(ir_node *call) { /* build a new call type */ mtp = get_Call_type(call); - id = get_type_ident(mtp); - id = id_mangle(new_id_from_chars("T_", 2), id); - db = get_type_dbg_info(mtp); + tdb = get_type_dbg_info(mtp); n_res = get_method_n_ress(mtp); n_param = get_method_n_params(mtp); - ctp = new_d_type_method(id, n_param + 1, n_res, db); + ctp = new_d_type_method(n_param + 1, n_res, tdb); for (i = 0; i < n_res; ++i) set_method_res_type(ctp, i, get_method_res_type(mtp, i)); @@ -5826,8 +5889,7 @@ static ir_node *transform_node_Call(ir_node *call) { /* FIXME: we don't need a new pointer type in every step */ tp = get_irg_frame_type(current_ir_graph); - id = id_mangle(get_type_ident(tp), new_id_from_chars("_ptr", 4)); - tp = new_type_pointer(id, tp, mode_P_data); + tp = new_type_pointer(tp); set_method_param_type(ctp, 0, tp); in[0] = get_Builtin_param(callee, 2); @@ -6252,9 +6314,15 @@ int identities_cmp(const void *elt, const void *key) { } /* compare a->in[0..ins] with b->in[0..ins] */ - for (i = 0; i < irn_arity_a; i++) - if (get_irn_intra_n(a, i) != get_irn_intra_n(b, i)) - return 1; + for (i = 0; i < irn_arity_a; ++i) { + ir_node *pred_a = get_irn_intra_n(a, i); + ir_node *pred_b = get_irn_intra_n(b, i); + if (pred_a != pred_b) { + /* if both predecessors are CSE neutral they might be different */ + if (!is_irn_cse_neutral(pred_a) || !is_irn_cse_neutral(pred_b)) + return 1; + } + } /* * here, we already now that the nodes are identical except their @@ -6363,19 +6431,22 @@ static void update_known_irn(ir_node *known_irn, const ir_node *new_ir_node) { * node could be found */ ir_node *identify_remember(pset *value_table, ir_node *n) { - ir_node *o = NULL; + ir_node *nn = NULL; if (!value_table) return n; ir_normalize_node(n); /* lookup or insert in hash table with given hash key. */ - o = pset_insert(value_table, n, ir_node_hash(n)); + nn = pset_insert(value_table, n, ir_node_hash(n)); + + if (nn != n) { + update_known_irn(nn, n); - if (o != n) { - update_known_irn(o, n); + /* n is reachable again */ + edges_node_revival(nn, get_irn_irg(nn)); } - return o; + return nn; } /* identify_remember */ /**