X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firopt.c;h=824181077dc6ff56f33bdbe18adf38abccc01ed1;hb=57298884a0b02e67f3f3e31636ca3382b74abef8;hp=53c4d9b320e3915243783048a48e3eb1653c0352;hpb=a69648ef59fd9cec0d1cca717008ac4110a6599e;p=libfirm diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 53c4d9b32..824181077 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -53,6 +53,25 @@ /* Make types visible to allow most efficient access */ #include "entity_t.h" +/** + * Returns the tarval of a Const node or tarval_bad for all other nodes. + */ +static tarval *default_value_of(const ir_node *n) { + if (is_Const(n)) + return get_Const_tarval(n); /* might return tarval_bad */ + else + return tarval_bad; +} + +value_of_func value_of_ptr = default_value_of; + +void set_value_of_func(value_of_func func) { + if (func != NULL) + value_of_ptr = func; + else + value_of_ptr = default_value_of; +} + /** * Return the value of a Constant. */ @@ -418,20 +437,20 @@ static tarval *computed_value_Shrs(ir_node *n) { } /* computed_value_Shrs */ /** - * Return the value of a Rot. + * Return the value of a Rotl. */ -static tarval *computed_value_Rot(ir_node *n) { - ir_node *a = get_Rot_left(n); - ir_node *b = get_Rot_right(n); +static tarval *computed_value_Rotl(ir_node *n) { + ir_node *a = get_Rotl_left(n); + ir_node *b = get_Rotl_right(n); tarval *ta = value_of(a); tarval *tb = value_of(b); if ((ta != tarval_bad) && (tb != tarval_bad)) { - return tarval_rot (ta, tb); + return tarval_rotl(ta, tb); } return tarval_bad; -} /* computed_value_Rot */ +} /* computed_value_Rotl */ /** * Return the value of a Conv. @@ -637,8 +656,16 @@ static tarval *computed_value_Psi(ir_node *n) { * if it has the form Confirm(x, '=', Const). */ static tarval *computed_value_Confirm(ir_node *n) { - return get_Confirm_cmp(n) == pn_Cmp_Eq ? - value_of(get_Confirm_bound(n)) : tarval_bad; + /* + * Beware: we might produce Phi(Confirm(x == true), Confirm(x == false)). + * Do NOT optimize them away (CondEval wants them), so wait until + * remove_confirm is activated. + */ + if (get_opt_remove_confirm()) { + return get_Confirm_cmp(n) == pn_Cmp_Eq ? + value_of(get_Confirm_bound(n)) : tarval_bad; + } + return tarval_bad; } /* computed_value_Confirm */ /** @@ -687,7 +714,7 @@ static ir_op_ops *firm_set_default_computed_value(ir_opcode code, ir_op_ops *ops CASE(Shl); CASE(Shr); CASE(Shrs); - CASE(Rot); + CASE(Rotl); CASE(Carry); CASE(Borrow); CASE(Conv); @@ -722,10 +749,16 @@ static ir_op_ops *firm_set_default_computed_value(ir_opcode code, ir_op_ops *ops static ir_node *equivalent_node_Block(ir_node *n) { ir_node *oldn = n; - int n_preds = get_Block_n_cfgpreds(n); + int n_preds; - /* The Block constructor does not call optimize, but mature_immBlock - calls the optimization. */ + /* don't optimize dead blocks */ + if (is_Block_dead(n)) + return n; + + n_preds = get_Block_n_cfgpreds(n); + + /* The Block constructor does not call optimize, but mature_immBlock() + calls the optimization. */ assert(get_Block_matured(n)); /* Straightening: a single entry Block following a single exit Block @@ -862,7 +895,53 @@ static ir_node *equivalent_node_neutral_zero(ir_node *n) /** * Eor is commutative and has neutral 0. */ -#define equivalent_node_Eor equivalent_node_neutral_zero +static ir_node *equivalent_node_Eor(ir_node *n) +{ + ir_node *oldn = n; + ir_node *a; + ir_node *b; + + n = equivalent_node_neutral_zero(n); + if (n != oldn) return n; + + a = get_Eor_left(n); + b = get_Eor_right(n); + + if (is_Eor(a)) { + ir_node *aa = get_Eor_left(a); + ir_node *ab = get_Eor_right(a); + + if (aa == b) { + /* (a ^ b) ^ a -> b */ + n = ab; + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A); + return n; + } else if (ab == b) { + /* (a ^ b) ^ b -> a */ + n = aa; + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A); + return n; + } + } + if (is_Eor(b)) { + ir_node *ba = get_Eor_left(b); + ir_node *bb = get_Eor_right(b); + + if (ba == a) { + /* a ^ (a ^ b) -> b */ + n = bb; + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A); + return n; + } else if (bb == a) { + /* a ^ (b ^ a) -> b */ + n = ba; + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A); + return n; + } + } + + return n; +} /* * Optimize a - 0 and (a - x) + x (for modes with wrap-around). @@ -878,14 +957,14 @@ static ir_node *equivalent_node_Add(ir_node *n) { ir_node *left, *right; ir_mode *mode = get_irn_mode(n); - /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */ - if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic)) - return n; - n = equivalent_node_neutral_zero(n); if (n != oldn) return n; + /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */ + if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic)) + return n; + left = get_Add_left(n); right = get_Add_right(n); @@ -935,7 +1014,7 @@ static ir_node *equivalent_node_left_zero(ir_node *n) { #define equivalent_node_Shl equivalent_node_left_zero #define equivalent_node_Shr equivalent_node_left_zero #define equivalent_node_Shrs equivalent_node_left_zero -#define equivalent_node_Rot equivalent_node_left_zero +#define equivalent_node_Rotl equivalent_node_left_zero /** * Optimize a - 0 and (a + x) - x (for modes with wrap-around). @@ -971,7 +1050,7 @@ static ir_node *equivalent_node_Sub(ir_node *n) { /** - * Optimize an "idempotent unary op", ie op(op(n)) = n. + * Optimize an "self-inverse unary op", ie op(op(n)) = n. * * @todo * -(-a) == a, but might overflow two times. @@ -1163,19 +1242,22 @@ static ir_node *equivalent_node_Conv(ir_node *n) { ir_mode *n_mode = get_irn_mode(n); ir_mode *a_mode = get_irn_mode(a); +restart: if (n_mode == a_mode) { /* No Conv necessary */ if (get_Conv_strict(n)) { /* special case: the predecessor might be a also a Conv */ if (is_Conv(a)) { if (! get_Conv_strict(a)) { /* first one is not strict, kick it */ - set_Conv_op(n, get_Conv_op(a)); - return n; + a = get_Conv_op(a); + a_mode = get_irn_mode(a); + set_Conv_op(n, a); + goto restart; } - /* else both are strict conv, second is superflous */ - } else if(is_Proj(a)) { + /* else both are strict conv, second is superfluous */ + } else if (is_Proj(a)) { ir_node *pred = get_Proj_pred(a); - if(is_Load(pred)) { + if (is_Load(pred)) { /* loads always return with the exact precision of n_mode */ assert(get_Load_mode(pred) == n_mode); return a; @@ -1191,13 +1273,32 @@ static ir_node *equivalent_node_Conv(ir_node *n) { ir_node *b = get_Conv_op(a); ir_mode *b_mode = get_irn_mode(b); + if (get_Conv_strict(n) && get_Conv_strict(a)) { + /* both are strict conv */ + if (smaller_mode(a_mode, n_mode)) { + /* both are strict, but the first is smaller, so + the second cannot remove more precision, remove the + strict bit */ + set_Conv_strict(n, 0); + } + } if (n_mode == b_mode) { - if (n_mode == mode_b) { - n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */ - DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV); - } else if (mode_is_int(n_mode)) { - if (get_mode_size_bits(b_mode) <= get_mode_size_bits(a_mode)) { - n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */ + if (! get_Conv_strict(n) && ! get_Conv_strict(a)) { + if (n_mode == mode_b) { + n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */ + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV); + } else if (get_mode_arithmetic(n_mode) == get_mode_arithmetic(a_mode)) { + if (smaller_mode(b_mode, a_mode)) { + n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */ + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV); + } + } + } + if (is_Conv(b)) { + if (smaller_mode(b_mode, a_mode)) { + if (get_Conv_strict(n)) + set_Conv_strict(b, 1); + n = b; /* ConvA(ConvB(ConvA(...))) == ConvA(...) */ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV); } } @@ -1232,7 +1333,7 @@ static ir_node *equivalent_node_Phi(ir_node *n) { int i, n_preds; ir_node *oldn = n; - ir_node *block = NULL; /* to shutup gcc */ + ir_node *block; ir_node *first_val = NULL; /* to shutup gcc */ if (!get_opt_normalize()) return n; @@ -1240,8 +1341,6 @@ static ir_node *equivalent_node_Phi(ir_node *n) { n_preds = get_Phi_n_preds(n); block = get_nodes_block(n); - /* @@@ fliegt 'raus, sollte aber doch immer wahr sein!!! - assert(get_irn_arity(block) == n_preds && "phi in wrong block!"); */ if ((is_Block_dead(block)) || /* Control dead */ (block == get_irg_start_block(current_ir_graph))) /* There should be no Phi nodes */ return new_Bad(); /* in the Start Block. */ @@ -1257,7 +1356,15 @@ static ir_node *equivalent_node_Phi(ir_node *n) { for (i = 0; i < n_preds; ++i) { first_val = get_Phi_pred(n, i); if ( (first_val != n) /* not self pointer */ -#if 1 +#if 0 + /* BEWARE: when the if is changed to 1, Phi's will ignore it's Bad + * predecessors. Then, Phi nodes in dead code might be removed, causing + * nodes pointing to themself (Add's for instance). + * This is really bad and causes endless recursions in several + * code pathes, so we do NOT optimize such a code. + * This is not that bad as it sounds, optimize_cf() removes bad control flow + * (and bad Phi predecessors), so live code is optimized later. + */ && (! is_Bad(first_val)) #endif ) { /* value not dead */ @@ -1276,7 +1383,8 @@ static ir_node *equivalent_node_Phi(ir_node *n) { ir_node *scnd_val = get_Phi_pred(n, i); if ( (scnd_val != n) && (scnd_val != first_val) -#if 1 +#if 0 + /* see above */ && (! is_Bad(scnd_val)) #endif ) { @@ -1299,45 +1407,36 @@ static ir_node *equivalent_node_Phi(ir_node *n) { * themselves. */ static ir_node *equivalent_node_Sync(ir_node *n) { - int i, n_preds; - - ir_node *oldn = n; - ir_node *first_val = NULL; /* to shutup gcc */ - - if (!get_opt_normalize()) return n; + int arity = get_Sync_n_preds(n); + int i; - n_preds = get_Sync_n_preds(n); + for (i = 0; i < arity;) { + ir_node *pred = get_Sync_pred(n, i); + int j; - /* Find first non-self-referencing input */ - for (i = 0; i < n_preds; ++i) { - first_val = get_Sync_pred(n, i); - if ((first_val != n) /* not self pointer */ && - (! is_Bad(first_val)) - ) { /* value not dead */ - break; /* then found first value. */ + /* Remove Bad predecessors */ + if (is_Bad(pred)) { + del_Sync_n(n, i); + --arity; + continue; } - } - - if (i >= n_preds) - /* A totally Bad or self-referencing Sync (we didn't break the above loop) */ - return new_Bad(); - /* search the rest of inputs, determine if any of these - are non-self-referencing */ - while (++i < n_preds) { - ir_node *scnd_val = get_Sync_pred(n, i); - if ((scnd_val != n) && - (scnd_val != first_val) && - (! is_Bad(scnd_val)) - ) - break; + /* Remove duplicate predecessors */ + for (j = 0;; ++j) { + if (j >= i) { + ++i; + break; + } + if (get_Sync_pred(n, j) == pred) { + del_Sync_n(n, i); + --arity; + break; + } + } } - if (i >= n_preds) { - /* Fold, if no multiple distinct non-self-referencing inputs */ - n = first_val; - DBG_OPT_SYNC(oldn, n); - } + if (arity == 0) return new_Bad(); + if (arity == 1) return get_Sync_pred(n, 0); return n; } /* equivalent_node_Sync */ @@ -1445,41 +1544,54 @@ static ir_node *equivalent_node_Mux(ir_node *n) else if (is_Proj(sel) && !mode_honor_signed_zeros(get_irn_mode(n))) { ir_node *cmp = get_Proj_pred(sel); long proj_nr = get_Proj_proj(sel); - ir_node *b = get_Mux_false(n); - ir_node *a = get_Mux_true(n); + ir_node *f = get_Mux_false(n); + ir_node *t = get_Mux_true(n); /* - * Note: normalization puts the constant on the right site, - * so we check only one case. - * * Note further that these optimization work even for floating point * with NaN's because -NaN == NaN. * However, if +0 and -0 is handled differently, we cannot use the first one. */ - if (is_Cmp(cmp) && get_Cmp_left(cmp) == a) { - ir_node *cmp_r = get_Cmp_right(cmp); - if (is_Const(cmp_r) && is_Const_null(cmp_r)) { - /* Mux(a CMP 0, X, a) */ - if (is_Minus(b) && get_Minus_op(b) == a) { - /* Mux(a CMP 0, -a, a) */ - if (proj_nr == pn_Cmp_Eq) { - /* Mux(a == 0, -a, a) ==> -a */ - n = b; + if (is_Cmp(cmp)) { + ir_node *const cmp_l = get_Cmp_left(cmp); + ir_node *const cmp_r = get_Cmp_right(cmp); + + switch (proj_nr) { + case pn_Cmp_Eq: + if ((cmp_l == t && cmp_r == f) || /* Psi(t == f, t, f) -> f */ + (cmp_l == f && cmp_r == t)) { /* Psi(f == t, t, f) -> f */ + n = f; DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM); - } else if (proj_nr == pn_Cmp_Lg || proj_nr == pn_Cmp_Ne) { - /* Mux(a != 0, -a, a) ==> a */ - n = a; + return n; + } + break; + + case pn_Cmp_Lg: + case pn_Cmp_Ne: + if ((cmp_l == t && cmp_r == f) || /* Psi(t != f, t, f) -> t */ + (cmp_l == f && cmp_r == t)) { /* Psi(f != t, t, f) -> t */ + n = t; DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM); + return n; } - } else if (is_Const(b) && is_Const_null(b)) { - /* Mux(a CMP 0, 0, a) */ - if (proj_nr == pn_Cmp_Lg || proj_nr == pn_Cmp_Ne) { - /* Mux(a != 0, 0, a) ==> a */ - n = a; + break; + } + + /* + * Note: normalization puts the constant on the right side, + * so we check only one case. + */ + if (cmp_l == t && is_Const(cmp_r) && is_Const_null(cmp_r)) { + /* Mux(t CMP 0, X, t) */ + if (is_Minus(f) && get_Minus_op(f) == t) { + /* Mux(t CMP 0, -t, t) */ + if (proj_nr == pn_Cmp_Eq) { + /* Mux(t == 0, -t, t) ==> -t */ + n = f; DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM); - } else if (proj_nr == pn_Cmp_Eq) { - /* Mux(a == 0, 0, a) ==> 0 */ - n = b; + } else if (proj_nr == pn_Cmp_Lg || proj_nr == pn_Cmp_Ne) { + /* Mux(t != 0, -t, t) ==> t */ + n = t; DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM); } } @@ -1538,19 +1650,9 @@ static ir_node *equivalent_node_Confirm(ir_node *n) { */ n = pred; } - if (pnc == pn_Cmp_Eq) { - ir_node *bound = get_Confirm_bound(n); - - /* - * Optimize a rare case: - * Confirm(x, '=', Constlike) ==> Constlike - */ - if (is_irn_constlike(bound)) { - DBG_OPT_CONFIRM(n, bound); - return bound; - } - } - return get_opt_remove_confirm() ? get_Confirm_value(n) : n; + if (get_opt_remove_confirm()) + return get_Confirm_value(n); + return n; } /** @@ -1653,7 +1755,7 @@ static ir_op_ops *firm_set_default_equivalent_node(ir_opcode code, ir_op_ops *op CASE(Shl); CASE(Shr); CASE(Shrs); - CASE(Rot); + CASE(Rotl); CASE(Not); CASE(Minus); CASE(Mul); @@ -1767,7 +1869,7 @@ static ir_node *apply_binop_on_2_phis(ir_node *a, ir_node *b, tarval *(*eval)(ta int i, n; if (get_nodes_block(a) != get_nodes_block(b)) - return NULL; + return NULL; n = get_irn_arity(a); NEW_ARR_A(void *, res, n); @@ -1880,12 +1982,12 @@ static ir_node *transform_node_AddSub(ir_node *n) { unsigned ref_bits = get_mode_size_bits(mode); if (is_Conv(left)) { - ir_mode *mode = get_irn_mode(left); - unsigned bits = get_mode_size_bits(mode); + ir_mode *lmode = get_irn_mode(left); + unsigned bits = get_mode_size_bits(lmode); if (ref_bits == bits && - mode_is_int(mode) && - get_mode_arithmetic(mode) == irma_twos_complement) { + mode_is_int(lmode) && + get_mode_arithmetic(lmode) == irma_twos_complement) { ir_node *pre = get_Conv_op(left); ir_mode *pre_mode = get_irn_mode(pre); @@ -1903,12 +2005,12 @@ static ir_node *transform_node_AddSub(ir_node *n) { } if (is_Conv(right)) { - ir_mode *mode = get_irn_mode(right); - unsigned bits = get_mode_size_bits(mode); + ir_mode *rmode = get_irn_mode(right); + unsigned bits = get_mode_size_bits(rmode); if (ref_bits == bits && - mode_is_int(mode) && - get_mode_arithmetic(mode) == irma_twos_complement) { + mode_is_int(rmode) && + get_mode_arithmetic(rmode) == irma_twos_complement) { ir_node *pre = get_Conv_op(right); ir_mode *pre_mode = get_irn_mode(pre); @@ -1924,7 +2026,21 @@ static ir_node *transform_node_AddSub(ir_node *n) { } } } + + /* let address arithmetic use unsigned modes */ + if (is_Const(right)) { + ir_mode *rmode = get_irn_mode(right); + + if (mode_is_signed(rmode) && get_mode_arithmetic(rmode) == irma_twos_complement) { + /* convert a AddP(P, *s) into AddP(P, *u) */ + ir_mode *nm = get_reference_mode_unsigned_eq(mode); + + ir_node *pre = new_r_Conv(current_ir_graph, get_nodes_block(n), right, nm); + set_binop_right(n, pre); + } + } } + return n; } /* transform_node_AddSub */ @@ -1977,6 +2093,17 @@ static ir_node *transform_node_Add(ir_node *n) { b = get_Add_right(n); mode = get_irn_mode(n); + + if (mode_is_reference(mode)) { + ir_mode *lmode = get_irn_mode(a); + + if (is_Const(b) && is_Const_null(b) && mode_is_int(lmode)) { + /* an Add(a, NULL) is a hidden Conv */ + dbg_info *dbg = get_irn_dbg_info(n); + return new_rd_Conv(dbg, current_ir_graph, get_nodes_block(n), a, mode); + } + } + HANDLE_BINOP_PHI(tarval_add, a, b, c, mode); /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */ @@ -1986,7 +2113,7 @@ static ir_node *transform_node_Add(ir_node *n) { if (mode_is_num(mode)) { /* the following code leads to endless recursion when Mul are replaced by a simple instruction chain */ if (!is_arch_dep_running() && a == b && mode_is_int(mode)) { - ir_node *block = get_irn_n(n, -1); + ir_node *block = get_nodes_block(n); n = new_rd_Mul( get_irn_dbg_info(n), @@ -2091,6 +2218,16 @@ static ir_node *transform_node_Sub(ir_node *n) { mode = get_irn_mode(n); + if (mode_is_int(mode)) { + ir_mode *lmode = get_irn_mode(a); + + if (is_Const(b) && is_Const_null(b) && mode_is_reference(lmode)) { + /* a Sub(a, NULL) is a hidden Conv */ + dbg_info *dbg = get_irn_dbg_info(n); + return new_rd_Conv(dbg, current_ir_graph, get_nodes_block(n), a, mode); + } + } + restart: HANDLE_BINOP_PHI(tarval_sub, a, b, c, mode); @@ -2783,8 +2920,17 @@ static ir_node *transform_node_Quot(ir_node *n) { if (is_Const(b)) { tarval *tv = get_Const_tarval(b); + int rem; + /* + * Floating point constant folding might be disabled here to + * prevent rounding. + * However, as we check for exact result, doing it is safe. + * Switch it on. + */ + rem = tarval_enable_fp_ops(1); tv = tarval_quo(get_mode_one(mode), tv); + (void)tarval_enable_fp_ops(rem); /* Do the transformation if the result is either exact or we are not using strict rules. */ @@ -2829,7 +2975,8 @@ static ir_node *transform_node_Abs(ir_node *n) { /* * We can replace the Abs by -x here. - * We even could add a new Confirm here. + * We even could add a new Confirm here + * (if not twos complement) * * Note that -x would create a new node, so we could * not run it in the equivalent_node() context. @@ -2880,7 +3027,8 @@ static ir_node *transform_node_Cond(ir_node *n) { (get_opt_unreachable_code())) { /* It's a boolean Cond, branching on a boolean constant. Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */ - jmp = new_r_Jmp(current_ir_graph, get_nodes_block(n)); + ir_node *blk = get_nodes_block(n); + jmp = new_r_Jmp(current_ir_graph, blk); turn_into_tuple(n, pn_Cond_max); if (ta == tarval_b_true) { set_Tuple_pred(n, pn_Cond_false, new_Bad()); @@ -2890,12 +3038,16 @@ static ir_node *transform_node_Cond(ir_node *n) { set_Tuple_pred(n, pn_Cond_true, new_Bad()); } /* We might generate an endless loop, so keep it alive. */ - add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_block(n)); + add_End_keepalive(get_irg_end(current_ir_graph), blk); } return n; } /* transform_node_Cond */ -typedef ir_node* (*recursive_transform) (ir_node *n); +/** + * Prototype of a recursive transform function + * for bitwise distributive transformations. + */ +typedef ir_node* (*recursive_transform)(ir_node *n); /** * makes use of distributive laws for and, or, eor @@ -2988,7 +3140,7 @@ static ir_node *transform_bitwise_distributive(ir_node *n, n = new_rd_And(dbgi, irg, blk, new_n, c, mode); } else { n = exact_copy(a); - set_irn_n(n, -1, blk); + set_nodes_block(n, blk); set_binop_left(n, new_n); set_binop_right(n, c); add_identities(current_ir_graph->value_table, n); @@ -3626,11 +3778,9 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { break; } - /* remove Casts */ - if (is_Cast(left)) - left = get_Cast_op(left); - if (is_Cast(right)) - right = get_Cast_op(right); + /* remove Casts of both sides */ + left = skip_Cast(left); + right = skip_Cast(right); /* Remove unnecessary conversions */ /* TODO handle constants */ @@ -3665,7 +3815,7 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { } } - /* remove operation of both sides if possible */ + /* remove operation on both sides if possible */ if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) { /* * The following operations are NOT safe for floating point operations, for instance @@ -3739,11 +3889,11 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) { DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP); } break; - case iro_Rot: - if (get_Rot_right(left) == get_Rot_right(right)) { - /* a ROT X CMP b ROT X ==> a CMP b */ - left = get_Rot_left(left); - right = get_Rot_left(right); + case iro_Rotl: + if (get_Rotl_right(left) == get_Rotl_right(right)) { + /* a ROTL X CMP b ROTL X ==> a CMP b */ + left = get_Rotl_left(left); + right = get_Rotl_left(right); changed |= 1; DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP); } @@ -4391,12 +4541,12 @@ static ir_node *transform_node_Or_bf_store(ir_node *or) { } /* transform_node_Or_bf_store */ /** - * Optimize an Or(shl(x, c), shr(x, bits - c)) into a Rot + * Optimize an Or(shl(x, c), shr(x, bits - c)) into a Rotl */ -static ir_node *transform_node_Or_Rot(ir_node *or) { +static ir_node *transform_node_Or_Rotl(ir_node *or) { ir_mode *mode = get_irn_mode(or); ir_node *shl, *shr, *block; - ir_node *irn, *x, *c1, *c2, *v, *sub, *n; + ir_node *irn, *x, *c1, *c2, *v, *sub, *n, *rotval; tarval *tv1, *tv2; if (! mode_is_int(mode)) @@ -4436,66 +4586,47 @@ static ir_node *transform_node_Or_Rot(ir_node *or) { != (int) get_mode_size_bits(mode)) return or; - /* yet, condition met */ - block = get_irn_n(or, -1); - - n = new_r_Rot(current_ir_graph, block, x, c1, mode); - - DBG_OPT_ALGSIM1(or, shl, shr, n, FS_OPT_OR_SHFT_TO_ROT); - return n; - } else if (is_Sub(c1)) { - v = c2; - sub = c1; - - if (get_Sub_right(sub) != v) - return or; - - c1 = get_Sub_left(sub); - if (!is_Const(c1)) - return or; - - tv1 = get_Const_tarval(c1); - if (! tarval_is_long(tv1)) - return or; - - if (get_tarval_long(tv1) != (int) get_mode_size_bits(mode)) - return or; - /* yet, condition met */ block = get_nodes_block(or); - /* a Rot right is not supported, so use a rot left */ - n = new_r_Rot(current_ir_graph, block, x, sub, mode); + n = new_r_Rotl(current_ir_graph, block, x, c1, mode); - DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROT); + DBG_OPT_ALGSIM1(or, shl, shr, n, FS_OPT_OR_SHFT_TO_ROTL); return n; - } else if (is_Sub(c2)) { - v = c1; - sub = c2; + } - c1 = get_Sub_left(sub); - if (!is_Const(c1)) - return or; + if (is_Sub(c1)) { + v = c2; + sub = c1; + rotval = sub; /* a Rot right is not supported, so use a rot left */ + } else if (is_Sub(c2)) { + v = c1; + sub = c2; + rotval = v; + } else return or; - tv1 = get_Const_tarval(c1); - if (! tarval_is_long(tv1)) - return or; + if (get_Sub_right(sub) != v) + return or; - if (get_tarval_long(tv1) != (int) get_mode_size_bits(mode)) - return or; + c1 = get_Sub_left(sub); + if (!is_Const(c1)) + return or; - /* yet, condition met */ - block = get_irn_n(or, -1); + tv1 = get_Const_tarval(c1); + if (! tarval_is_long(tv1)) + return or; - /* a Rot Left */ - n = new_r_Rot(current_ir_graph, block, x, v, mode); + if (get_tarval_long(tv1) != (int) get_mode_size_bits(mode)) + return or; - DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROT); - return n; - } + /* yet, condition met */ + block = get_nodes_block(or); - return or; -} /* transform_node_Or_Rot */ + n = new_r_Rotl(current_ir_graph, block, x, rotval, mode); + + DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROTL); + return n; +} /* transform_node_Or_Rotl */ /** * Transform an Or. @@ -4540,7 +4671,7 @@ static ir_node *transform_node_Or(ir_node *n) { HANDLE_BINOP_PHI(tarval_or, a, b, c, mode); n = transform_node_Or_bf_store(n); - n = transform_node_Or_Rot(n); + n = transform_node_Or_Rotl(n); if (n != oldn) return n; @@ -4554,15 +4685,15 @@ static ir_node *transform_node_Or(ir_node *n) { static ir_node *transform_node(ir_node *n); /** - * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl, Rot. + * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl, Rotl. * * Should be moved to reassociation? */ static ir_node *transform_node_shift(ir_node *n) { ir_node *left, *right; - tarval *tv1, *tv2, *res; ir_mode *mode; - int modulo_shf, flag; + tarval *tv1, *tv2, *res; + ir_node *in[2], *irn, *block; left = get_binop_left(n); @@ -4579,49 +4710,249 @@ static ir_node *transform_node_shift(ir_node *n) { if (tv2 == tarval_bad) return n; - res = tarval_add(tv1, tv2); + res = tarval_add(tv1, tv2); + mode = get_irn_mode(n); /* beware: a simple replacement works only, if res < modulo shift */ + if (!is_Rotl(n)) { + int modulo_shf = get_mode_modulo_shift(mode); + assert(modulo_shf >= (int) get_mode_size_bits(mode)); + if (modulo_shf > 0) { + tarval *modulo = new_tarval_from_long(modulo_shf, + get_tarval_mode(res)); + + /* shifting too much */ + if (!(tarval_cmp(res, modulo) & pn_Cmp_Lt)) { + if (is_Shrs(n)) { + ir_graph *irg = get_irn_irg(n); + ir_node *block = get_nodes_block(n); + dbg_info *dbgi = get_irn_dbg_info(n); + ir_node *cnst = new_Const(mode_Iu, new_tarval_from_long(get_mode_size_bits(mode)-1, mode_Iu)); + return new_rd_Shrs(dbgi, irg, block, get_binop_left(left), + cnst, mode); + } + + return new_Const(mode, get_mode_null(mode)); + } + } + } else { + res = tarval_mod(res, new_tarval_from_long(get_mode_size_bits(mode), get_tarval_mode(res))); + } + + /* ok, we can replace it */ + block = get_nodes_block(n); + + in[0] = get_binop_left(left); + in[1] = new_r_Const(current_ir_graph, block, get_tarval_mode(res), res); + + irn = new_ir_node(NULL, current_ir_graph, block, get_irn_op(n), mode, 2, in); + + DBG_OPT_ALGSIM0(n, irn, FS_OPT_REASSOC_SHIFT); + + return transform_node(irn); +} /* transform_node_shift */ + +/** + * normalisation: (x & c1) >> c2 to (x >> c2) & (c1 >> c2) + * (we can use: + * - and, or, xor instead of & + * - Shl, Shr, Shrs, rotl instead of >> + * (with a special case for Or/Xor + Shrs) + */ +static ir_node *transform_node_bitop_shift(ir_node *n) { + ir_node *left; + ir_node *right = get_binop_right(n); + ir_mode *mode = get_irn_mode(n); + ir_node *bitop_left; + ir_node *bitop_right; + ir_op *op_left; + ir_graph *irg; + ir_node *block; + dbg_info *dbgi; + ir_node *new_shift; + ir_node *new_bitop; + ir_node *new_const; + tarval *tv1; + tarval *tv2; + tarval *tv_shift; + + assert(is_Shrs(n) || is_Shr(n) || is_Shl(n) || is_Rotl(n)); + + if (!is_Const(right)) + return n; + + left = get_binop_left(n); + op_left = get_irn_op(left); + if (op_left != op_And && op_left != op_Or && op_left != op_Eor) + return n; + + /* doing it with Shrs is not legal if the Or/Eor affects the topmost bit */ + if (is_Shrs(n) && (op_left == op_Or || op_left == op_Eor)) { + /* TODO: test if sign bit is affectes */ + return n; + } + + bitop_right = get_binop_right(left); + if (!is_Const(bitop_right)) + return n; + + bitop_left = get_binop_left(left); + + irg = get_irn_irg(n); + block = get_nodes_block(n); + dbgi = get_irn_dbg_info(n); + tv1 = get_Const_tarval(bitop_right); + tv2 = get_Const_tarval(right); + + assert(get_tarval_mode(tv1) == mode); + + if (is_Shl(n)) { + new_shift = new_rd_Shl(dbgi, irg, block, bitop_left, right, mode); + tv_shift = tarval_shl(tv1, tv2); + } else if(is_Shr(n)) { + new_shift = new_rd_Shr(dbgi, irg, block, bitop_left, right, mode); + tv_shift = tarval_shr(tv1, tv2); + } else if(is_Shrs(n)) { + new_shift = new_rd_Shrs(dbgi, irg, block, bitop_left, right, mode); + tv_shift = tarval_shrs(tv1, tv2); + } else { + assert(is_Rotl(n)); + new_shift = new_rd_Rotl(dbgi, irg, block, bitop_left, right, mode); + tv_shift = tarval_rotl(tv1, tv2); + } + + assert(get_tarval_mode(tv_shift) == mode); + new_const = new_Const(mode, tv_shift); + + if (op_left == op_And) { + new_bitop = new_rd_And(dbgi, irg, block, new_shift, new_const, mode); + } else if(op_left == op_Or) { + new_bitop = new_rd_Or(dbgi, irg, block, new_shift, new_const, mode); + } else { + assert(op_left == op_Eor); + new_bitop = new_rd_Eor(dbgi, irg, block, new_shift, new_const, mode); + } + + return new_bitop; +} + +/** + * normalisation: + * (x << c1) >> c2 <=> x OP (c2-c1) & ((-1 << c1) >> c2) + * also: + * (x >> c1) << c2 <=> x OP (c2-c1) & ((-1 >> c1) << c2) + * (also with x >>s c1 when c1>=c2) + */ +static ir_node *transform_node_shl_shr(ir_node *n) { + ir_node *left; + ir_node *right = get_binop_right(n); + ir_node *x; + ir_graph *irg; + ir_node *block; + ir_mode *mode; + dbg_info *dbgi; + ir_node *new_const; + ir_node *new_shift; + ir_node *new_and; + tarval *tv_shl; + tarval *tv_shr; + tarval *tv_shift; + tarval *tv_mask; + pn_Cmp pnc; + int need_shrs = 0; + + assert(is_Shl(n) || is_Shr(n) || is_Shrs(n)); + + if (!is_Const(right)) + return n; + + left = get_binop_left(n); mode = get_irn_mode(n); + if (is_Shl(n) && (is_Shr(left) || is_Shrs(left))) { + ir_node *shr_right = get_binop_right(left); - flag = 0; + if (!is_Const(shr_right)) + return n; - modulo_shf = get_mode_modulo_shift(mode); - if (modulo_shf > 0) { - tarval *modulo = new_tarval_from_long(modulo_shf, get_tarval_mode(res)); + x = get_binop_left(left); + tv_shr = get_Const_tarval(shr_right); + tv_shl = get_Const_tarval(right); - if (tarval_cmp(res, modulo) & pn_Cmp_Lt) - flag = 1; - } else - flag = 1; + if (is_Shrs(left)) { + /* shrs variant only allowed if c1 >= c2 */ + if (! (tarval_cmp(tv_shl, tv_shr) & pn_Cmp_Ge)) + return n; - if (flag) { - /* ok, we can replace it */ - ir_node *in[2], *irn, *block = get_irn_n(n, -1); + tv_mask = tarval_shrs(get_mode_all_one(mode), tv_shr); + need_shrs = 1; + } else { + tv_mask = tarval_shr(get_mode_all_one(mode), tv_shr); + } + tv_mask = tarval_shl(tv_mask, tv_shl); + } else if(is_Shr(n) && is_Shl(left)) { + ir_node *shl_right = get_Shl_right(left); - in[0] = get_binop_left(left); - in[1] = new_r_Const(current_ir_graph, block, get_tarval_mode(res), res); + if (!is_Const(shl_right)) + return n; - irn = new_ir_node(NULL, current_ir_graph, block, get_irn_op(n), mode, 2, in); + x = get_Shl_left(left); + tv_shr = get_Const_tarval(right); + tv_shl = get_Const_tarval(shl_right); - DBG_OPT_ALGSIM0(n, irn, FS_OPT_REASSOC_SHIFT); + tv_mask = tarval_shl(get_mode_all_one(mode), tv_shl); + tv_mask = tarval_shr(tv_mask, tv_shr); + } else { + return n; + } - return transform_node(irn); + assert(get_tarval_mode(tv_shl) == get_tarval_mode(tv_shr)); + assert(tv_mask != tarval_bad); + assert(get_tarval_mode(tv_mask) == mode); + + irg = get_irn_irg(n); + block = get_nodes_block(n); + dbgi = get_irn_dbg_info(n); + + pnc = tarval_cmp(tv_shl, tv_shr); + if (pnc == pn_Cmp_Lt || pnc == pn_Cmp_Eq) { + tv_shift = tarval_sub(tv_shr, tv_shl); + new_const = new_Const(get_tarval_mode(tv_shift), tv_shift); + if (need_shrs) { + new_shift = new_rd_Shrs(dbgi, irg, block, x, new_const, mode); + } else { + new_shift = new_rd_Shr(dbgi, irg, block, x, new_const, mode); + } + } else { + assert(pnc == pn_Cmp_Gt); + tv_shift = tarval_sub(tv_shl, tv_shr); + new_const = new_Const(get_tarval_mode(tv_shift), tv_shift); + new_shift = new_rd_Shl(dbgi, irg, block, x, new_const, mode); } - return n; -} /* transform_node_shift */ + + new_const = new_Const(mode, tv_mask); + new_and = new_rd_And(dbgi, irg, block, new_shift, new_const, mode); + + return new_and; +} /** * Transform a Shr. */ static ir_node *transform_node_Shr(ir_node *n) { ir_node *c, *oldn = n; - ir_node *a = get_Shr_left(n); - ir_node *b = get_Shr_right(n); - ir_mode *mode = get_irn_mode(n); + ir_node *left = get_Shr_left(n); + ir_node *right = get_Shr_right(n); + ir_mode *mode = get_irn_mode(n); - HANDLE_BINOP_PHI(tarval_shr, a, b, c, mode); - return transform_node_shift(n); + HANDLE_BINOP_PHI(tarval_shr, left, right, c, mode); + n = transform_node_shift(n); + + if (is_Shr(n)) + n = transform_node_shl_shr(n); + if (is_Shr(n)) + n = transform_node_bitop_shift(n); + + return n; } /* transform_node_Shr */ /** @@ -4634,7 +4965,12 @@ static ir_node *transform_node_Shrs(ir_node *n) { ir_mode *mode = get_irn_mode(n); HANDLE_BINOP_PHI(tarval_shrs, a, b, c, mode); - return transform_node_shift(n); + n = transform_node_shift(n); + + if (is_Shrs(n)) + n = transform_node_bitop_shift(n); + + return n; } /* transform_node_Shrs */ /** @@ -4647,21 +4983,33 @@ static ir_node *transform_node_Shl(ir_node *n) { ir_mode *mode = get_irn_mode(n); HANDLE_BINOP_PHI(tarval_shl, a, b, c, mode); - return transform_node_shift(n); + n = transform_node_shift(n); + + if (is_Shl(n)) + n = transform_node_shl_shr(n); + if (is_Shl(n)) + n = transform_node_bitop_shift(n); + + return n; } /* transform_node_Shl */ /** - * Transform a Rot. + * Transform a Rotl. */ -static ir_node *transform_node_Rot(ir_node *n) { +static ir_node *transform_node_Rotl(ir_node *n) { ir_node *c, *oldn = n; - ir_node *a = get_Rot_left(n); - ir_node *b = get_Rot_right(n); + ir_node *a = get_Rotl_left(n); + ir_node *b = get_Rotl_right(n); ir_mode *mode = get_irn_mode(n); - HANDLE_BINOP_PHI(tarval_rot, a, b, c, mode); - return transform_node_shift(n); -} /* transform_node_Rot */ + HANDLE_BINOP_PHI(tarval_rotl, a, b, c, mode); + n = transform_node_shift(n); + + if (is_Rotl(n)) + n = transform_node_bitop_shift(n); + + return n; +} /* transform_node_Rotl */ /** * Transform a Conv. @@ -4717,17 +5065,17 @@ static ir_node *transform_node_End(ir_node *n) { /** returns 1 if a == -b */ static int is_negated_value(ir_node *a, ir_node *b) { - if(is_Minus(a) && get_Minus_op(a) == b) + if (is_Minus(a) && get_Minus_op(a) == b) return 1; - if(is_Minus(b) && get_Minus_op(b) == a) + if (is_Minus(b) && get_Minus_op(b) == a) return 1; - if(is_Sub(a) && is_Sub(b)) { + if (is_Sub(a) && is_Sub(b)) { ir_node *a_left = get_Sub_left(a); ir_node *a_right = get_Sub_right(a); ir_node *b_left = get_Sub_left(b); ir_node *b_right = get_Sub_right(b); - if(a_left == b_right && a_right == b_left) + if (a_left == b_right && a_right == b_left) return 1; } @@ -4740,50 +5088,103 @@ static int is_negated_value(ir_node *a, ir_node *b) { static ir_node *transform_node_Mux(ir_node *n) { ir_node *oldn = n, *sel = get_Mux_sel(n); ir_mode *mode = get_irn_mode(n); + ir_node *t = get_Mux_true(n); + ir_node *f = get_Mux_false(n); + ir_graph *irg = current_ir_graph; + ir_node *conds[1], *vals[2]; + + /* first normalization step: move a possible zero to the false case */ + if (is_Proj(sel)) { + ir_node *cmp = get_Proj_pred(sel); + + if (is_Cmp(cmp)) { + if (is_Const(t) && is_Const_null(t)) { + /* Psi(x, 0, y) => Psi(x, y, 0) */ + pn_Cmp pnc = get_Proj_proj(sel); + sel = new_r_Proj(irg, get_nodes_block(cmp), cmp, mode_b, + get_negated_pnc(pnc, get_irn_mode(get_Cmp_left(cmp)))); + conds[0] = sel; + vals[0] = f; + vals[1] = t; + n = new_rd_Psi(get_irn_dbg_info(n), irg, get_nodes_block(n), 1, conds, vals, mode); + t = vals[0]; + f = vals[1]; + } + } + } + /* note: after normalization, false can only happen on default */ if (mode == mode_b) { - ir_node *t = get_Mux_true(n); - ir_node *f = get_Mux_false(n); dbg_info *dbg = get_irn_dbg_info(n); - ir_node *block = get_irn_n(n, -1); + ir_node *block = get_nodes_block(n); ir_graph *irg = current_ir_graph; if (is_Const(t)) { tarval *tv_t = get_Const_tarval(t); if (tv_t == tarval_b_true) { if (is_Const(f)) { + /* Muxb(sel, true, false) = sel */ assert(get_Const_tarval(f) == tarval_b_false); + DBG_OPT_ALGSIM0(oldn, sel, FS_OPT_MUX_BOOL); return sel; } else { - return new_rd_Or(dbg, irg, block, sel, f, mode_b); - } - } else { - ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b); - assert(tv_t == tarval_b_false); - if (is_Const(f)) { - assert(get_Const_tarval(f) == tarval_b_true); - return not_sel; - } else { - return new_rd_And(dbg, irg, block, not_sel, f, mode_b); + /* Muxb(sel, true, x) = Or(sel, x) */ + n = new_rd_Or(dbg, irg, block, sel, f, mode_b); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_OR_BOOL); + return n; } } } else if (is_Const(f)) { tarval *tv_f = get_Const_tarval(f); if (tv_f == tarval_b_true) { + /* Muxb(sel, x, true) = Or(Not(sel), x) */ ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b); - return new_rd_Or(dbg, irg, block, not_sel, t, mode_b); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_ORNOT_BOOL); + n = new_rd_Or(dbg, irg, block, not_sel, t, mode_b); + return n; } else { + /* Muxb(sel, x, false) = And(sel, x) */ assert(tv_f == tarval_b_false); - return new_rd_And(dbg, irg, block, sel, t, mode_b); + n = new_rd_And(dbg, irg, block, sel, t, mode_b); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_AND_BOOL); + return n; } } } - if (is_Proj(sel) && !mode_honor_signed_zeros(mode)) { + /* more normalization: try to normalize Mux(x, C1, C2) into Mux(x, +1/-1, 0) op C2 */ + if (is_Const(t) && is_Const(f) && mode_is_int(mode)) { + tarval *a = get_Const_tarval(t); + tarval *b = get_Const_tarval(f); + tarval *null = get_tarval_null(mode); + tarval *diff, *min; + + if (tarval_cmp(a, b) & pn_Cmp_Gt) { + diff = tarval_sub(a, b); + min = b; + } else { + diff = tarval_sub(b, a); + min = a; + } + + if (diff == get_tarval_one(mode) && min != null) { + dbg_info *dbg = get_irn_dbg_info(n); + ir_node *block = get_nodes_block(n); + ir_graph *irg = current_ir_graph; + + + conds[0] = sel; + vals[0] = new_Const(mode, tarval_sub(a, min)); + vals[1] = new_Const(mode, tarval_sub(b, min)); + n = new_rd_Psi(dbg, irg, block, 1, conds, vals, mode); + n = new_rd_Add(dbg, irg, block, n, new_Const(mode, min), mode); + return n; + } + } + + if (is_Proj(sel)) { ir_node *cmp = get_Proj_pred(sel); long pn = get_Proj_proj(sel); - ir_node *f = get_Mux_false(n); - ir_node *t = get_Mux_true(n); /* * Note: normalization puts the constant on the right side, @@ -4791,37 +5192,94 @@ static ir_node *transform_node_Mux(ir_node *n) { * * Note further that these optimization work even for floating point * with NaN's because -NaN == NaN. - * However, if +0 and -0 is handled differently, we cannot use the first - * one. + * However, if +0 and -0 is handled differently, we cannot use the Abs/-Abs + * transformations. */ if (is_Cmp(cmp)) { ir_node *cmp_r = get_Cmp_right(cmp); if (is_Const(cmp_r) && is_Const_null(cmp_r)) { - ir_node *block = get_irn_n(n, -1); + ir_node *block = get_nodes_block(n); + ir_node *cmp_l = get_Cmp_left(cmp); - if(is_negated_value(f, t)) { - ir_node *cmp_left = get_Cmp_left(cmp); + if (!mode_honor_signed_zeros(mode) && is_negated_value(f, t)) { + /* f = -t */ - /* Psi(a >= 0, a, -a) = Psi(a <= 0, -a, a) ==> Abs(a) */ - if ( (cmp_left == t && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt)) - || (cmp_left == f && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt))) + if ( (cmp_l == t && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt)) + || (cmp_l == f && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt))) { + /* Psi(a >/>= 0, a, -a) = Psi(a Abs(a) */ n = new_rd_Abs(get_irn_dbg_info(n), current_ir_graph, block, - cmp_left, mode); + cmp_l, mode); DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS); return n; - /* Psi(a <= 0, a, -a) = Psi(a >= 0, -a, a) ==> -Abs(a) */ - } else if ((cmp_left == t && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt)) - || (cmp_left == f && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt))) + } else if ((cmp_l == t && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt)) + || (cmp_l == f && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt))) { + /* Psi(a />= 0, -a, a) ==> -Abs(a) */ n = new_rd_Abs(get_irn_dbg_info(n), current_ir_graph, block, - cmp_left, mode); + cmp_l, mode); n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, block, n, mode); DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS); return n; } } + + if (mode_is_int(mode)) { + /* integer only */ + if ((pn == pn_Cmp_Lg || pn == pn_Cmp_Eq) && is_And(cmp_l)) { + /* Psi((a & b) != 0, c, 0) */ + ir_node *and_r = get_And_right(cmp_l); + ir_node *and_l; + + if (and_r == t && f == cmp_r) { + if (is_Const(t) && tarval_is_single_bit(get_Const_tarval(t))) { + if (pn == pn_Cmp_Lg) { + /* Psi((a & 2^C) != 0, 2^C, 0) */ + n = cmp_l; + } else { + /* Psi((a & 2^C) == 0, 2^C, 0) */ + n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph, + block, cmp_l, t, mode); + } + return n; + } + } + if (is_Shl(and_r)) { + ir_node *shl_l = get_Shl_left(and_r); + if (is_Const(shl_l) && is_Const_one(shl_l)) { + if (and_r == t && f == cmp_r) { + if (pn == pn_Cmp_Lg) { + /* (a & (1 << n)) != 0, (1 << n), 0) */ + n = cmp_l; + } else { + /* (a & (1 << n)) == 0, (1 << n), 0) */ + n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph, + block, cmp_l, t, mode); + } + return n; + } + } + } + and_l = get_And_left(cmp_l); + if (is_Shl(and_l)) { + ir_node *shl_l = get_Shl_left(and_l); + if (is_Const(shl_l) && is_Const_one(shl_l)) { + if (and_l == t && f == cmp_r) { + if (pn == pn_Cmp_Lg) { + /* ((1 << n) & a) != 0, (1 << n), 0) */ + n = cmp_l; + } else { + /* ((1 << n) & a) == 0, (1 << n), 0) */ + n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph, + block, cmp_l, t, mode); + } + return n; + } + } + } + } + } } } } @@ -4843,28 +5301,35 @@ static ir_node *transform_node_Psi(ir_node *n) { * of the other sync to our own inputs */ static ir_node *transform_node_Sync(ir_node *n) { - int i, arity; + int arity = get_Sync_n_preds(n); + int i; + + for (i = 0; i < arity;) { + ir_node *pred = get_Sync_pred(n, i); + int pred_arity; + int j; - arity = get_irn_arity(n); - for(i = 0; i < get_irn_arity(n); /*empty*/) { - int i2, arity2; - ir_node *in = get_irn_n(n, i); - if(!is_Sync(in)) { + if (!is_Sync(pred)) { ++i; continue; } - /* set sync input 0 instead of the sync */ - set_irn_n(n, i, get_irn_n(in, 0)); - /* so we check this input again for syncs */ - - /* append all other inputs of the sync to our sync */ - arity2 = get_irn_arity(in); - for(i2 = 1; i2 < arity2; ++i2) { - ir_node *in_in = get_irn_n(in, i2); - add_irn_n(n, in_in); - /* increase arity so we also check the new inputs for syncs */ - arity++; + del_Sync_n(n, i); + --arity; + + pred_arity = get_Sync_n_preds(pred); + for (j = 0; j < pred_arity; ++j) { + ir_node *pred_pred = get_Sync_pred(pred, j); + int k; + + for (k = 0;; ++k) { + if (k >= arity) { + add_irn_n(n, pred_pred); + ++arity; + break; + } + if (get_Sync_pred(n, k) == pred_pred) break; + } } } @@ -4935,7 +5400,7 @@ static ir_op_ops *firm_set_default_transform_node(ir_opcode code, ir_op_ops *ops CASE(Shr); CASE(Shrs); CASE(Shl); - CASE(Rot); + CASE(Rotl); CASE(Conv); CASE(End); CASE(Mux); @@ -4990,14 +5455,14 @@ static int node_cmp_attr_Free(ir_node *a, ir_node *b) { static int node_cmp_attr_SymConst(ir_node *a, ir_node *b) { const symconst_attr *pa = get_irn_symconst_attr(a); const symconst_attr *pb = get_irn_symconst_attr(b); - return (pa->num != pb->num) + return (pa->kind != pb->kind) || (pa->sym.type_p != pb->sym.type_p) || (pa->tp != pb->tp); } /* node_cmp_attr_SymConst */ /** Compares the attributes of two Call nodes. */ static int node_cmp_attr_Call(ir_node *a, ir_node *b) { - return (get_irn_call_attr(a) != get_irn_call_attr(b)); + return get_irn_call_attr(a) != get_irn_call_attr(b); } /* node_cmp_attr_Call */ /** Compares the attributes of two Sel nodes. */ @@ -5057,6 +5522,49 @@ static int node_cmp_attr_Store(ir_node *a, ir_node *b) { get_Store_volatility(b) == volatility_is_volatile); } /* node_cmp_attr_Store */ +/** Compares two exception attributes */ +static int node_cmp_exception(ir_node *a, ir_node *b) { + const except_attr *ea = get_irn_except_attr(a); + const except_attr *eb = get_irn_except_attr(b); + + return ea->pin_state != eb->pin_state; +} + +#define node_cmp_attr_Bound node_cmp_exception + +/** Compares the attributes of two Div nodes. */ +static int node_cmp_attr_Div(ir_node *a, ir_node *b) { + const divmod_attr *ma = get_irn_divmod_attr(a); + const divmod_attr *mb = get_irn_divmod_attr(b); + return ma->exc.pin_state != mb->exc.pin_state || + ma->res_mode != mb->res_mode || + ma->no_remainder != mb->no_remainder; +} /* node_cmp_attr_Div */ + +/** Compares the attributes of two DivMod nodes. */ +static int node_cmp_attr_DivMod(ir_node *a, ir_node *b) { + const divmod_attr *ma = get_irn_divmod_attr(a); + const divmod_attr *mb = get_irn_divmod_attr(b); + return ma->exc.pin_state != mb->exc.pin_state || + ma->res_mode != mb->res_mode; +} /* node_cmp_attr_DivMod */ + +/** Compares the attributes of two Mod nodes. */ +static int node_cmp_attr_Mod(ir_node *a, ir_node *b) { + const divmod_attr *ma = get_irn_divmod_attr(a); + const divmod_attr *mb = get_irn_divmod_attr(b); + return ma->exc.pin_state != mb->exc.pin_state || + ma->res_mode != mb->res_mode; +} /* node_cmp_attr_Mod */ + +/** Compares the attributes of two Quot nodes. */ +static int node_cmp_attr_Quot(ir_node *a, ir_node *b) { + const divmod_attr *ma = get_irn_divmod_attr(a); + const divmod_attr *mb = get_irn_divmod_attr(b); + return ma->exc.pin_state != mb->exc.pin_state || + ma->res_mode != mb->res_mode; +} /* node_cmp_attr_Quot */ + /** Compares the attributes of two Confirm nodes. */ static int node_cmp_attr_Confirm(ir_node *a, ir_node *b) { return (get_Confirm_cmp(a) != get_Confirm_cmp(b)); @@ -5140,6 +5648,12 @@ static ir_op_ops *firm_set_default_node_cmp_attr(ir_opcode code, ir_op_ops *ops) CASE(Store); CASE(Confirm); CASE(ASM); + CASE(Div); + CASE(DivMod); + CASE(Mod); + CASE(Quot); + CASE(Bound); + /* FIXME CopyB */ default: /* leave NULL */; } @@ -5195,7 +5709,7 @@ int identities_cmp(const void *elt, const void *key) { /* * Calculate a hash value of a node. */ -unsigned ir_node_hash(ir_node *node) { +unsigned ir_node_hash(const ir_node *node) { unsigned h; int i, irn_arity; @@ -5303,52 +5817,6 @@ static void update_known_irn(ir_node *known_irn, const ir_node *new_ir_node) { } } /* update_value_table */ -/** - * Return the canonical node computing the same value as n. - * - * @param value_table The value table - * @param n The node to lookup - * - * Looks up the node in a hash table. - * - * For Const nodes this is performed in the constructor, too. Const - * nodes are extremely time critical because of their frequent use in - * constant string arrays. - */ -static INLINE ir_node *identify(pset *value_table, ir_node *n) { - ir_node *o = NULL; - - if (!value_table) return n; - - normalize_node(n); - - o = pset_find(value_table, n, ir_node_hash(n)); - if (o == NULL) - return n; - - update_known_irn(o, n); - DBG_OPT_CSE(n, o); - - return o; -} /* identify */ - -/** - * During construction we set the op_pin_state_pinned flag in the graph right when the - * optimization is performed. The flag turning on procedure global cse could - * be changed between two allocations. This way we are safe. - * - * @param value_table The value table - * @param n The node to lookup - */ -static INLINE ir_node *identify_cons(pset *value_table, ir_node *n) { - ir_node *old = n; - - n = identify(value_table, n); - if (n != old && get_irn_MacroBlock(old) != get_irn_MacroBlock(n)) - set_irg_pinned(current_ir_graph, op_pin_state_floats); - return n; -} /* identify_cons */ - /* * Return the canonical node computing the same value as n. * Looks up the node in a hash table, enters it in the table @@ -5378,6 +5846,23 @@ ir_node *identify_remember(pset *value_table, ir_node *n) { return o; } /* identify_remember */ +/** + * During construction we set the op_pin_state_pinned flag in the graph right when the + * optimization is performed. The flag turning on procedure global cse could + * be changed between two allocations. This way we are safe. + * + * @param value_table The value table + * @param n The node to lookup + */ +static INLINE ir_node *identify_cons(pset *value_table, ir_node *n) { + ir_node *old = n; + + n = identify_remember(value_table, n); + if (n != old && get_irn_MacroBlock(old) != get_irn_MacroBlock(n)) + set_irg_pinned(current_ir_graph, op_pin_state_floats); + return n; +} /* identify_cons */ + /* Add a node to the identities value table. */ void add_identities(pset *value_table, ir_node *node) { if (get_opt_cse() && is_no_Block(node)) @@ -5554,7 +6039,7 @@ ir_node *optimize_node(ir_node *n) { } /* remove unnecessary nodes */ - if (get_opt_constant_folding() || + if (get_opt_algebraic_simplification() || (iro == iro_Phi) || /* always optimize these nodes. */ (iro == iro_Id) || (iro == iro_Proj) || @@ -5582,14 +6067,14 @@ ir_node *optimize_node(ir_node *n) { /* Some more constant expression evaluation that does not allow to free the node. */ iro = get_irn_opcode(n); - if (get_opt_constant_folding() || + if (get_opt_algebraic_simplification() || (iro == iro_Cond) || (iro == iro_Proj)) /* Flags tested local. */ n = transform_node(n); /* Remove nodes with dead (Bad) input. Run always for transformation induced Bads. */ - n = gigo (n); + n = gigo(n); /* Now we have a legal, useful node. Enter it in hash table for CSE */ if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) { @@ -5658,7 +6143,7 @@ ir_node *optimize_in_place_2(ir_node *n) { now all nodes are op_pin_state_pinned to blocks, i.e., the cse only finds common subexpressions within a block. */ if (get_opt_cse()) { - n = identify(current_ir_graph->value_table, n); + n = identify_remember(current_ir_graph->value_table, n); } /* Some more constant expression evaluation. */