X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firopt.c;h=9f9f0e7d7d71990c5413ddd98c7adc718d3c30eb;hb=8ccfe04ca59ff56fc32b2323b80cee2a5194694d;hp=03eeb57addb6c0dc35f22cb3e03915d919476c0f;hpb=128d686096cf3f9945d31b6bc9d36bd96ff76955;p=libfirm diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 03eeb57ad..9f9f0e7d7 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -131,6 +131,12 @@ static ir_tarval *computed_value_Add(const ir_node *n) if ((ta != tarval_bad) && (tb != tarval_bad)) return tarval_add(ta, tb); + /* x+~x => -1 */ + if ((is_Not(a) && get_Not_op(a) == b) + || (is_Not(b) && get_Not_op(b) == a)) { + return get_mode_all_one(get_irn_mode(n)); + } + return tarval_bad; } /* computed_value_Add */ @@ -264,10 +270,17 @@ static ir_tarval *computed_value_And(const ir_node *n) if ((ta != tarval_bad) && (tb != tarval_bad)) { return tarval_and (ta, tb); - } else { - if (tarval_is_null(ta)) return ta; - if (tarval_is_null(tb)) return tb; } + + if (tarval_is_null(ta)) return ta; + if (tarval_is_null(tb)) return tb; + + /* x&~x => 0 */ + if ((is_Not(a) && get_Not_op(a) == b) + || (is_Not(b) && get_Not_op(b) == a)) { + return get_mode_null(get_irn_mode(n)); + } + return tarval_bad; } /* computed_value_And */ @@ -284,9 +297,15 @@ static ir_tarval *computed_value_Or(const ir_node *n) if ((ta != tarval_bad) && (tb != tarval_bad)) { return tarval_or (ta, tb); - } else { - if (tarval_is_all_one(ta)) return ta; - if (tarval_is_all_one(tb)) return tb; + } + + if (tarval_is_all_one(ta)) return ta; + if (tarval_is_all_one(tb)) return tb; + + /* x|~x => -1 */ + if ((is_Not(a) && get_Not_op(a) == b) + || (is_Not(b) && get_Not_op(b) == a)) { + return get_mode_all_one(get_irn_mode(n)); } return tarval_bad; } /* computed_value_Or */ @@ -303,6 +322,11 @@ static ir_tarval *computed_value_Eor(const ir_node *n) if (a == b) return get_mode_null(get_irn_mode(n)); + /* x^~x => -1 */ + if ((is_Not(a) && get_Not_op(a) == b) + || (is_Not(b) && get_Not_op(b) == a)) { + return get_mode_all_one(get_irn_mode(n)); + } ta = value_of(a); tb = value_of(b); @@ -327,6 +351,28 @@ static ir_tarval *computed_value_Not(const ir_node *n) return tarval_bad; } /* computed_value_Not */ +/** + * Tests whether a shift shifts more bits than available in the mode + */ +static bool is_oversize_shift(const ir_node *n) +{ + ir_node *count = get_binop_right(n); + ir_mode *mode = get_irn_mode(n); + ir_tarval *tv = value_of(count); + long modulo_shift; + long shiftval; + if (tv == tarval_bad) + return false; + if (!tarval_is_long(tv)) + return false; + shiftval = get_tarval_long(tv); + modulo_shift = get_mode_modulo_shift(mode); + if (shiftval < 0 || (modulo_shift > 0 && shiftval >= modulo_shift)) + return false; + + return shiftval >= (long)get_mode_size_bits(mode); +} + /** * Return the value of a Shl. */ @@ -341,6 +387,10 @@ static ir_tarval *computed_value_Shl(const ir_node *n) if ((ta != tarval_bad) && (tb != tarval_bad)) { return tarval_shl(ta, tb); } + + if (is_oversize_shift(n)) + return get_mode_null(get_irn_mode(n)); + return tarval_bad; } /* computed_value_Shl */ @@ -358,6 +408,9 @@ static ir_tarval *computed_value_Shr(const ir_node *n) if ((ta != tarval_bad) && (tb != tarval_bad)) { return tarval_shr(ta, tb); } + if (is_oversize_shift(n)) + return get_mode_null(get_irn_mode(n)); + return tarval_bad; } /* computed_value_Shr */ @@ -395,17 +448,52 @@ static ir_tarval *computed_value_Rotl(const ir_node *n) return tarval_bad; } /* computed_value_Rotl */ +bool ir_zero_when_converted(const ir_node *node, ir_mode *dest_mode) +{ + ir_mode *mode = get_irn_mode(node); + if (get_mode_arithmetic(mode) != irma_twos_complement + || get_mode_arithmetic(dest_mode) != irma_twos_complement) + return false; + + if (is_Shl(node)) { + ir_node *count = get_Shl_right(node); + if (is_Const(count)) { + ir_tarval *tv = get_Const_tarval(count); + if (tarval_is_long(tv)) { + long shiftval = get_tarval_long(tv); + long destbits = get_mode_size_bits(dest_mode); + if (shiftval >= destbits + && shiftval < (long)get_mode_modulo_shift(mode)) + return true; + } + } + } + if (is_And(node)) { + ir_node *right = get_And_right(node); + if (is_Const(right)) { + ir_tarval *tv = get_Const_tarval(right); + ir_tarval *conved = tarval_convert_to(tv, dest_mode); + return tarval_is_null(conved); + } + } + return false; +} + /** * Return the value of a Conv. */ static ir_tarval *computed_value_Conv(const ir_node *n) { - ir_node *a = get_Conv_op(n); - ir_tarval *ta = value_of(a); + ir_node *a = get_Conv_op(n); + ir_tarval *ta = value_of(a); + ir_mode *mode = get_irn_mode(n); if (ta != tarval_bad) return tarval_convert_to(ta, get_irn_mode(n)); + if (ir_zero_when_converted(a, mode)) + return get_mode_null(mode); + return tarval_bad; } /* computed_value_Conv */ @@ -661,141 +749,6 @@ static ir_op_ops *firm_set_default_computed_value(ir_opcode code, ir_op_ops *ops #undef CASE } /* firm_set_default_computed_value */ -/** - * Returns a equivalent block for another block. - * If the block has only one predecessor, this is - * the equivalent one. If the only predecessor of a block is - * the block itself, this is a dead block. - * - * If both predecessors of a block are the branches of a binary - * Cond, the equivalent block is Cond's block. - * - * If all predecessors of a block are bad or lies in a dead - * block, the current block is dead as well. - * - * Note, that blocks are NEVER turned into Bad's, instead - * the dead_block flag is set. So, never test for is_Bad(block), - * always use is_dead_Block(block). - */ -static ir_node *equivalent_node_Block(ir_node *n) -{ - ir_node *oldn = n; - int n_preds; - ir_graph *irg; - - /* don't optimize dead or labeled blocks */ - if (is_Block_dead(n) || has_Block_entity(n)) - return n; - - n_preds = get_Block_n_cfgpreds(n); - - /* The Block constructor does not call optimize, but mature_immBlock() - calls the optimization. */ - assert(get_Block_matured(n)); - - irg = get_irn_irg(n); - - /* Straightening: a single entry Block following a single exit Block - can be merged, if it is not the Start block. */ - /* !!! Beware, all Phi-nodes of n must have been optimized away. - This should be true, as the block is matured before optimize is called. - But what about Phi-cycles with the Phi0/Id that could not be resolved? - Remaining Phi nodes are just Ids. */ - if (n_preds == 1) { - ir_node *pred = skip_Proj(get_Block_cfgpred(n, 0)); - - if (is_Jmp(pred)) { - ir_node *predblock = get_nodes_block(pred); - if (predblock == oldn) { - /* Jmp jumps into the block it is in -- deal self cycle. */ - n = set_Block_dead(n); - DBG_OPT_DEAD_BLOCK(oldn, n); - } else { - n = predblock; - DBG_OPT_STG(oldn, n); - } - } else if (is_Cond(pred)) { - ir_node *predblock = get_nodes_block(pred); - if (predblock == oldn) { - /* Jmp jumps into the block it is in -- deal self cycle. */ - n = set_Block_dead(n); - DBG_OPT_DEAD_BLOCK(oldn, n); - } - } - } else if (n_preds == 2) { - /* Test whether Cond jumps twice to this block - * The more general case which more than 2 predecessors is handles - * in optimize_cf(), we handle only this special case for speed here. - */ - ir_node *a = get_Block_cfgpred(n, 0); - ir_node *b = get_Block_cfgpred(n, 1); - - if (is_Proj(a) && is_Proj(b)) { - ir_node *cond = get_Proj_pred(a); - - if (cond == get_Proj_pred(b) && is_Cond(cond) && - get_irn_mode(get_Cond_selector(cond)) == mode_b) { - /* Also a single entry Block following a single exit Block. Phis have - twice the same operand and will be optimized away. */ - n = get_nodes_block(cond); - DBG_OPT_IFSIM1(oldn, a, b, n); - } - } - } else if (get_opt_unreachable_code() && - (n != get_irg_start_block(irg)) && - (n != get_irg_end_block(irg))) { - int i; - - /* If all inputs are dead, this block is dead too, except if it is - the start or end block. This is one step of unreachable code - elimination */ - for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) { - ir_node *pred = get_Block_cfgpred(n, i); - ir_node *pred_blk; - - if (is_Bad(pred)) continue; - pred_blk = get_nodes_block(skip_Proj(pred)); - - if (is_Block_dead(pred_blk)) continue; - - if (pred_blk != n) { - /* really found a living input */ - break; - } - } - if (i < 0) { - n = set_Block_dead(n); - DBG_OPT_DEAD_BLOCK(oldn, n); - } - } - - return n; -} /* equivalent_node_Block */ - -/** - * Returns a equivalent node for a Jmp, a Bad :-) - * Of course this only happens if the Block of the Jmp is dead. - */ -static ir_node *equivalent_node_Jmp(ir_node *n) -{ - ir_node *oldn = n; - - /* unreachable code elimination */ - if (is_Block_dead(get_nodes_block(n))) { - ir_graph *irg = get_irn_irg(n); - n = get_irg_bad(irg); - DBG_OPT_DEAD_BLOCK(oldn, n); - } - return n; -} /* equivalent_node_Jmp */ - -/** Raise is handled in the same way as Jmp. */ -#define equivalent_node_Raise equivalent_node_Jmp - - -/* We do not evaluate Cond here as we replace it by a new node, a Jmp. - See transform_node_Proj_Cond(). */ - /** * Optimize operations that are commutative and have neutral 0, * so a op 0 = 0 op a = a. @@ -1004,7 +957,7 @@ static ir_node *equivalent_node_Sub(ir_node *n) /** - * Optimize an "self-inverse unary op", ie op(op(n)) = n. + * Optimize an "self-inverse unary op", i.e. op(op(n)) = n. * * @todo * -(-a) == a, but might overflow two times. @@ -1299,7 +1252,6 @@ static ir_node *equivalent_node_Phi(ir_node *n) int i, n_preds; ir_node *oldn = n; - ir_node *block; ir_node *first_val = NULL; /* to shutup gcc */ if (!get_opt_optimize() && @@ -1308,52 +1260,25 @@ static ir_node *equivalent_node_Phi(ir_node *n) n_preds = get_Phi_n_preds(n); - block = get_nodes_block(n); - /* Control dead */ - if (is_Block_dead(block)) { - ir_graph *irg = get_irn_irg(n); - return get_irg_bad(irg); - } - - if (n_preds == 0) return n; /* Phi of dead Region without predecessors. */ + /* Phi of dead Region without predecessors. */ + if (n_preds == 0) + return n; /* Find first non-self-referencing input */ for (i = 0; i < n_preds; ++i) { first_val = get_Phi_pred(n, i); - if ( (first_val != n) /* not self pointer */ -#if 0 - /* BEWARE: when the if is changed to 1, Phis will ignore their Bad - * predecessors. Then, Phi nodes in unreachable code might be removed, - * causing nodes pointing to themselev (Adds for instance). - * This is really bad and causes endless recursion on several - * code pathes, so we do NOT optimize such code. - * This is not that bad as it sounds, optimize_cf() removes bad control flow - * (and bad Phi predecessors), so live code is optimized later. - */ - && (! is_Bad(get_Block_cfgpred(block, i))) -#endif - ) { /* value not dead */ - break; /* then found first value. */ + /* not self pointer */ + if (first_val != n) { + /* then found first value. */ + break; } } - if (i >= n_preds) { - ir_graph *irg = get_irn_irg(n); - /* A totally Bad or self-referencing Phi (we didn't break the above loop) */ - return get_irg_bad(irg); - } - /* search for rest of inputs, determine if any of these are non-self-referencing */ while (++i < n_preds) { ir_node *scnd_val = get_Phi_pred(n, i); - if ( (scnd_val != n) - && (scnd_val != first_val) -#if 0 - /* see above */ - && (! is_Bad(get_Block_cfgpred(block, i))) -#endif - ) { + if (scnd_val != n && scnd_val != first_val) { break; } } @@ -1366,49 +1291,6 @@ static ir_node *equivalent_node_Phi(ir_node *n) return n; } /* equivalent_node_Phi */ -/** - * Several optimizations: - * - fold Sync-nodes, iff they have only one predecessor except - * themselves. - */ -static ir_node *equivalent_node_Sync(ir_node *n) -{ - int arity = get_Sync_n_preds(n); - int i; - - for (i = 0; i < arity;) { - ir_node *pred = get_Sync_pred(n, i); - int j; - - /* Remove Bad predecessors */ - if (is_Bad(pred)) { - del_Sync_n(n, i); - --arity; - continue; - } - - /* Remove duplicate predecessors */ - for (j = 0;; ++j) { - if (j >= i) { - ++i; - break; - } - if (get_Sync_pred(n, j) == pred) { - del_Sync_n(n, i); - --arity; - break; - } - } - } - - if (arity == 0) { - ir_graph *irg = get_irn_irg(n); - return get_irg_bad(irg); - } - if (arity == 1) return get_Sync_pred(n, 0); - return n; -} /* equivalent_node_Sync */ - /** * Optimize Proj(Tuple). */ @@ -1473,13 +1355,6 @@ static ir_node *equivalent_node_Proj_CopyB(ir_node *proj) proj = get_CopyB_mem(copyb); DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP); break; - - case pn_CopyB_X_except: { - ir_graph *irg = get_irn_irg(proj); - DBG_OPT_EXC_REM(proj); - proj = get_irg_bad(irg); - break; - } } } return proj; @@ -1523,12 +1398,6 @@ static ir_node *equivalent_node_Proj_Bound(ir_node *proj) DBG_OPT_EXC_REM(proj); proj = get_Bound_mem(bound); break; - case pn_Bound_X_except: { - ir_graph *irg = get_irn_irg(proj); - DBG_OPT_EXC_REM(proj); - proj = get_irg_bad(irg); - break; - } case pn_Bound_res: proj = idx; DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP); @@ -1541,56 +1410,6 @@ static ir_node *equivalent_node_Proj_Bound(ir_node *proj) return proj; } /* equivalent_node_Proj_Bound */ -/** - * Optimize an Exception Proj(Load) with a non-null address. - */ -static ir_node *equivalent_node_Proj_Load(ir_node *proj) -{ - if (get_opt_ldst_only_null_ptr_exceptions()) { - if (get_irn_mode(proj) == mode_X) { - ir_node *load = get_Proj_pred(proj); - - /* get the Load address */ - const ir_node *addr = get_Load_ptr(load); - const ir_node *confirm; - - if (value_not_null(addr, &confirm)) { - if (get_Proj_proj(proj) == pn_Load_X_except) { - ir_graph *irg = get_irn_irg(proj); - DBG_OPT_EXC_REM(proj); - return get_irg_bad(irg); - } - } - } - } - return proj; -} /* equivalent_node_Proj_Load */ - -/** - * Optimize an Exception Proj(Store) with a non-null address. - */ -static ir_node *equivalent_node_Proj_Store(ir_node *proj) -{ - if (get_opt_ldst_only_null_ptr_exceptions()) { - if (get_irn_mode(proj) == mode_X) { - ir_node *store = get_Proj_pred(proj); - - /* get the load/store address */ - const ir_node *addr = get_Store_ptr(store); - const ir_node *confirm; - - if (value_not_null(addr, &confirm)) { - if (get_Proj_proj(proj) == pn_Store_X_except) { - ir_graph *irg = get_irn_irg(proj); - DBG_OPT_EXC_REM(proj); - return get_irg_bad(irg); - } - } - } - } - return proj; -} /* equivalent_node_Proj_Store */ - /** * Does all optimizations on nodes that must be done on its Projs * because of creating new nodes. @@ -1598,14 +1417,6 @@ static ir_node *equivalent_node_Proj_Store(ir_node *proj) static ir_node *equivalent_node_Proj(ir_node *proj) { ir_node *n = get_Proj_pred(proj); - - if (get_irn_mode(proj) == mode_X) { - if (is_Block_dead(get_nodes_block(n))) { - /* Remove dead control flow -- early gigo(). */ - ir_graph *irg = get_irn_irg(proj); - return get_irg_bad(irg); - } - } if (n->op->ops.equivalent_node_Proj) return n->op->ops.equivalent_node_Proj(proj); return proj; @@ -1784,9 +1595,6 @@ static ir_op_ops *firm_set_default_equivalent_node(ir_opcode code, ir_op_ops *op break switch (code) { - CASE(Block); - CASE(Jmp); - CASE(Raise); CASE(Eor); CASE(Add); CASE(Shl); @@ -1801,13 +1609,10 @@ static ir_op_ops *firm_set_default_equivalent_node(ir_opcode code, ir_op_ops *op CASE(And); CASE(Conv); CASE(Phi); - CASE(Sync); CASE_PROJ(Tuple); CASE_PROJ(Div); CASE_PROJ(CopyB); CASE_PROJ(Bound); - CASE_PROJ(Load); - CASE_PROJ(Store); CASE(Proj); CASE(Id); CASE(Mux); @@ -2235,22 +2040,6 @@ static ir_node *transform_node_Add(ir_node *n) DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_PLUS_1); return n; } - if (op == b) { - /* ~x + x = -1 */ - n = new_r_Const(irg, get_mode_minus_one(mode)); - DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_X_NOT_X); - return n; - } - } - if (is_Not(b)) { - ir_node *op = get_Not_op(b); - - if (op == a) { - /* x + ~x = -1 */ - n = new_r_Const(irg, get_mode_minus_one(mode)); - DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_X_NOT_X); - return n; - } } } } @@ -2259,9 +2048,9 @@ static ir_node *transform_node_Add(ir_node *n) b_vrp = vrp_get_info(b); if (a_vrp && b_vrp) { - ir_tarval *c = tarval_and(a_vrp->bits_not_set, b_vrp->bits_not_set); + ir_tarval *vrp_val = tarval_and(a_vrp->bits_not_set, b_vrp->bits_not_set); - if (tarval_is_null(c)) { + if (tarval_is_null(vrp_val)) { dbg_info *dbgi = get_irn_dbg_info(n); return new_rd_Or(dbgi, get_nodes_block(n), a, b, mode); } @@ -2392,6 +2181,7 @@ restart: } DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD); return n; +#if 0 } else if (is_Mul(b)) { /* a - (b * C) -> a + (b * -C) */ ir_node *m_right = get_Mul_right(b); if (is_Const(m_right)) { @@ -2410,6 +2200,7 @@ restart: return n; } } +#endif } /* Beware of Sub(P, P) which cannot be optimized into a simple Minus ... */ @@ -2571,8 +2362,8 @@ restart: } if (get_mode_arithmetic(mode) == irma_twos_complement) { + /* c - ~X = X + (c+1) */ if (is_Const(a) && is_Not(b)) { - /* c - ~X = X + (c+1) */ ir_tarval *tv = get_Const_tarval(a); tv = tarval_add(tv, get_mode_one(mode)); @@ -2585,6 +2376,24 @@ restart: return n; } } + /* x-(x&y) = x & ~y */ + if (is_And(b)) { + ir_node *and_left = get_And_left(b); + ir_node *and_right = get_And_right(b); + if (and_right == a) { + ir_node *tmp = and_left; + and_left = and_right; + and_right = tmp; + } + if (and_left == a) { + dbg_info *dbgi = get_irn_dbg_info(n); + ir_node *block = get_nodes_block(n); + ir_mode *mode = get_irn_mode(n); + ir_node *notn = new_rd_Not(dbgi, block, and_right, mode); + ir_node *and = new_rd_And(dbgi, block, a, notn, mode); + return and; + } + } } return n; } /* transform_node_Sub */ @@ -2856,10 +2665,10 @@ make_tuple: /* skip a potential Pin */ mem = skip_Pin(mem); - turn_into_tuple(n, pn_Div_max); + turn_into_tuple(n, pn_Div_max+1); set_Tuple_pred(n, pn_Div_M, mem); set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(blk)); - set_Tuple_pred(n, pn_Div_X_except, get_irg_bad(irg)); + set_Tuple_pred(n, pn_Div_X_except, new_r_Bad(irg, mode_X)); set_Tuple_pred(n, pn_Div_res, value); } return n; @@ -2948,10 +2757,10 @@ make_tuple: /* skip a potential Pin */ mem = skip_Pin(mem); - turn_into_tuple(n, pn_Mod_max); + turn_into_tuple(n, pn_Mod_max+1); set_Tuple_pred(n, pn_Mod_M, mem); set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(blk)); - set_Tuple_pred(n, pn_Mod_X_except, get_irg_bad(irg)); + set_Tuple_pred(n, pn_Mod_X_except, new_r_Bad(irg, mode_X)); set_Tuple_pred(n, pn_Mod_res, value); } return n; @@ -2982,13 +2791,13 @@ static ir_node *transform_node_Cond(ir_node *n) Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */ ir_node *blk = get_nodes_block(n); jmp = new_r_Jmp(blk); - turn_into_tuple(n, pn_Cond_max); + turn_into_tuple(n, pn_Cond_max+1); if (ta == tarval_b_true) { - set_Tuple_pred(n, pn_Cond_false, get_irg_bad(irg)); + set_Tuple_pred(n, pn_Cond_false, new_r_Bad(irg, mode_X)); set_Tuple_pred(n, pn_Cond_true, jmp); } else { set_Tuple_pred(n, pn_Cond_false, jmp); - set_Tuple_pred(n, pn_Cond_true, get_irg_bad(irg)); + set_Tuple_pred(n, pn_Cond_true, new_r_Bad(irg, mode_X)); } /* We might generate an endless loop, so keep it alive. */ add_End_keepalive(get_irg_end(irg), blk); @@ -3118,48 +2927,229 @@ static ir_node *create_zero_const(ir_graph *irg, ir_mode *mode) return cnst; } +static bool is_shiftop(const ir_node *n) +{ + return is_Shl(n) || is_Shr(n) || is_Shrs(n) || is_Rotl(n); +} + /** - * Transform an And. + * normalisation: (x & c1) >> c2 to (x >> c2) & (c1 >> c2) + * (we can use: + * - and, or, xor instead of & + * - Shl, Shr, Shrs, rotl instead of >> + * (with a special case for Or/Xor + Shrs) + * + * This normalisation is good for things like x-(x&y) esp. in 186.crafty. */ -static ir_node *transform_node_And(ir_node *n) +static ir_node *transform_node_shift_bitop(ir_node *n) { - ir_node *c, *oldn = n; - ir_node *a = get_And_left(n); - ir_node *b = get_And_right(n); - ir_mode *mode; - vrp_attr *a_vrp, *b_vrp; + ir_graph *irg = get_irn_irg(n); + ir_node *right = get_binop_right(n); + ir_mode *mode = get_irn_mode(n); + ir_node *left; + ir_node *bitop_left; + ir_node *bitop_right; + ir_op *op_left; + ir_node *block; + dbg_info *dbgi; + ir_node *new_shift; + ir_node *new_bitop; + ir_node *new_const; + ir_tarval *tv1; + ir_tarval *tv2; + ir_tarval *tv_shift; - if (is_Cmp(a) && is_Cmp(b)) { - ir_node *a_left = get_Cmp_left(a); - ir_node *a_right = get_Cmp_right(a); - ir_node *b_left = get_Cmp_left(b); - ir_node *b_right = get_Cmp_right(b); - ir_relation a_relation = get_Cmp_relation(a); - ir_relation b_relation = get_Cmp_relation(b); - /* we can combine the relations of two compares with the same - * operands */ - if (a_left == b_left && b_left == b_right) { - dbg_info *dbgi = get_irn_dbg_info(n); - ir_node *block = get_nodes_block(n); - ir_relation new_relation = a_relation & b_relation; - return new_rd_Cmp(dbgi, block, a_left, a_right, new_relation); - } - /* Cmp(a==0) and Cmp(b==0) can be optimized to Cmp(a|b==0) */ - if (is_Const(a_right) && is_Const_null(a_right) - && is_Const(b_right) && is_Const_null(b_right) - && a_relation == b_relation && a_relation == ir_relation_equal - && !mode_is_float(get_irn_mode(a_left)) - && !mode_is_float(get_irn_mode(b_left))) { - dbg_info *dbgi = get_irn_dbg_info(n); - ir_node *block = get_nodes_block(n); - ir_mode *mode = get_irn_mode(a_left); - ir_node *n_b_left = get_irn_mode(b_left) != mode ? - new_rd_Conv(dbgi, block, b_left, mode) : b_left; - ir_node *or = new_rd_Or(dbgi, block, a_left, n_b_left, mode); - ir_graph *irg = get_irn_irg(n); - ir_node *zero = create_zero_const(irg, mode); - return new_rd_Cmp(dbgi, block, or, zero, ir_relation_equal); - } + if (is_irg_state(irg, IR_GRAPH_STATE_NORMALISATION2)) + return n; + + assert(is_Shrs(n) || is_Shr(n) || is_Shl(n) || is_Rotl(n)); + + if (!is_Const(right)) + return n; + + left = get_binop_left(n); + op_left = get_irn_op(left); + if (op_left != op_And && op_left != op_Or && op_left != op_Eor) + return n; + + /* doing it with Shrs is not legal if the Or/Eor affects the topmost bit */ + if (is_Shrs(n) && (op_left == op_Or || op_left == op_Eor)) { + /* TODO: test if sign bit is affectes */ + return n; + } + + bitop_right = get_binop_right(left); + if (!is_Const(bitop_right)) + return n; + + bitop_left = get_binop_left(left); + + block = get_nodes_block(n); + dbgi = get_irn_dbg_info(n); + tv1 = get_Const_tarval(bitop_right); + tv2 = get_Const_tarval(right); + + assert(get_tarval_mode(tv1) == mode); + + if (is_Shl(n)) { + new_shift = new_rd_Shl(dbgi, block, bitop_left, right, mode); + tv_shift = tarval_shl(tv1, tv2); + } else if (is_Shr(n)) { + new_shift = new_rd_Shr(dbgi, block, bitop_left, right, mode); + tv_shift = tarval_shr(tv1, tv2); + } else if (is_Shrs(n)) { + new_shift = new_rd_Shrs(dbgi, block, bitop_left, right, mode); + tv_shift = tarval_shrs(tv1, tv2); + } else { + assert(is_Rotl(n)); + new_shift = new_rd_Rotl(dbgi, block, bitop_left, right, mode); + tv_shift = tarval_rotl(tv1, tv2); + } + + assert(get_tarval_mode(tv_shift) == mode); + irg = get_irn_irg(n); + new_const = new_r_Const(irg, tv_shift); + + if (op_left == op_And) { + new_bitop = new_rd_And(dbgi, block, new_shift, new_const, mode); + } else if (op_left == op_Or) { + new_bitop = new_rd_Or(dbgi, block, new_shift, new_const, mode); + } else { + assert(op_left == op_Eor); + new_bitop = new_rd_Eor(dbgi, block, new_shift, new_const, mode); + } + + return new_bitop; +} + +/** + * normalisation: (x >> c1) & c2 to (x & (c2<> c1 + * (we can use: + * - and, or, xor instead of & + * - Shl, Shr, Shrs, rotl instead of >> + * (with a special case for Or/Xor + Shrs) + * + * This normalisation is usually good for the backend since << C can often be + * matched as address-mode. + */ +static ir_node *transform_node_bitop_shift(ir_node *n) +{ + ir_graph *irg = get_irn_irg(n); + ir_node *left = get_binop_left(n); + ir_node *right = get_binop_right(n); + ir_mode *mode = get_irn_mode(n); + ir_node *shift_left; + ir_node *shift_right; + ir_node *block; + dbg_info *dbg_bitop; + dbg_info *dbg_shift; + ir_node *new_bitop; + ir_node *new_shift; + ir_node *new_const; + ir_tarval *tv1; + ir_tarval *tv2; + ir_tarval *tv_bitop; + + if (!is_irg_state(irg, IR_GRAPH_STATE_NORMALISATION2)) + return n; + + assert(is_And(n) || is_Or(n) || is_Eor(n)); + if (!is_Const(right) || !is_shiftop(left)) + return n; + + shift_left = get_binop_left(left); + shift_right = get_binop_right(left); + if (!is_Const(shift_right)) + return n; + + /* doing it with Shrs is not legal if the Or/Eor affects the topmost bit */ + if (is_Shrs(left)) { + /* TODO this could be improved */ + return n; + } + + irg = get_irn_irg(n); + block = get_nodes_block(n); + dbg_bitop = get_irn_dbg_info(n); + dbg_shift = get_irn_dbg_info(left); + tv1 = get_Const_tarval(shift_right); + tv2 = get_Const_tarval(right); + assert(get_tarval_mode(tv2) == mode); + + if (is_Shl(left)) { + tv_bitop = tarval_shr(tv2, tv1); + } else if (is_Shr(left)) { + tv_bitop = tarval_shl(tv2, tv1); + } else { + assert(is_Rotl(left)); + tv_bitop = tarval_rotl(tv2, tarval_neg(tv1)); + } + new_const = new_r_Const(irg, tv_bitop); + + if (is_And(n)) { + new_bitop = new_rd_And(dbg_bitop, block, shift_left, new_const, mode); + } else if (is_Or(n)) { + new_bitop = new_rd_Or(dbg_bitop, block, shift_left, new_const, mode); + } else { + assert(is_Eor(n)); + new_bitop = new_rd_Eor(dbg_bitop, block, shift_left, new_const, mode); + } + + if (is_Shl(left)) { + new_shift = new_rd_Shl(dbg_shift, block, new_bitop, shift_right, mode); + } else if (is_Shr(left)) { + new_shift = new_rd_Shr(dbg_shift, block, new_bitop, shift_right, mode); + } else { + assert(is_Rotl(left)); + new_shift = new_rd_Rotl(dbg_shift, block, new_bitop, shift_right, mode); + } + + return new_shift; +} + +/** + * Transform an And. + */ +static ir_node *transform_node_And(ir_node *n) +{ + ir_node *c, *oldn = n; + ir_node *a = get_And_left(n); + ir_node *b = get_And_right(n); + ir_mode *mode; + vrp_attr *a_vrp, *b_vrp; + + if (is_Cmp(a) && is_Cmp(b)) { + ir_node *a_left = get_Cmp_left(a); + ir_node *a_right = get_Cmp_right(a); + ir_node *b_left = get_Cmp_left(b); + ir_node *b_right = get_Cmp_right(b); + ir_relation a_relation = get_Cmp_relation(a); + ir_relation b_relation = get_Cmp_relation(b); + /* we can combine the relations of two compares with the same + * operands */ + if (a_left == b_left && b_left == b_right) { + dbg_info *dbgi = get_irn_dbg_info(n); + ir_node *block = get_nodes_block(n); + ir_relation new_relation = a_relation & b_relation; + return new_rd_Cmp(dbgi, block, a_left, a_right, new_relation); + } + /* Cmp(a==b) and Cmp(c==d) can be optimized to Cmp((a^b)|(c^d)==0) */ + if (a_relation == b_relation && a_relation == ir_relation_equal + && !mode_is_float(get_irn_mode(a_left)) + && !mode_is_float(get_irn_mode(b_left)) + && values_in_mode(get_irn_mode(a_left), get_irn_mode(b_left))) { + dbg_info *dbgi = get_irn_dbg_info(n); + ir_node *block = get_nodes_block(n); + ir_mode *a_mode = get_irn_mode(a_left); + ir_mode *b_mode = get_irn_mode(b_left); + ir_node *xora = new_rd_Eor(dbgi, block, a_left, a_right, a_mode); + ir_node *xorb = new_rd_Eor(dbgi, block, b_left, b_right, b_mode); + ir_node *conv = new_rd_Conv(dbgi, block, xorb, a_mode); + ir_node *or = new_rd_Or(dbgi, block, xora, conv, a_mode); + ir_graph *irg = get_irn_irg(n); + ir_node *zero = create_zero_const(irg, a_mode); + return new_rd_Cmp(dbgi, block, or, zero, ir_relation_equal); + } } mode = get_irn_mode(n); @@ -3281,6 +3271,8 @@ static ir_node *transform_node_And(ir_node *n) } n = transform_bitwise_distributive(n, transform_node_And); + if (is_And(n)) + n = transform_node_bitop_shift(n); return n; } /* transform_node_And */ @@ -3367,6 +3359,9 @@ static ir_node *transform_node_Eor(ir_node *n) } n = transform_bitwise_distributive(n, transform_node_Eor); + if (is_Eor(n)) + n = transform_node_bitop_shift(n); + return n; } /* transform_node_Eor */ @@ -3542,7 +3537,7 @@ static ir_node *transform_node_Proj_Load(ir_node *proj) if (get_Proj_proj(proj) == pn_Load_X_except) { ir_graph *irg = get_irn_irg(proj); DBG_OPT_EXC_REM(proj); - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } else { ir_node *blk = get_nodes_block(load); return new_r_Jmp(blk); @@ -3574,7 +3569,7 @@ static ir_node *transform_node_Proj_Store(ir_node *proj) if (get_Proj_proj(proj) == pn_Store_X_except) { ir_graph *irg = get_irn_irg(proj); DBG_OPT_EXC_REM(proj); - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } else { ir_node *blk = get_nodes_block(store); return new_r_Jmp(blk); @@ -3616,7 +3611,7 @@ static ir_node *transform_node_Proj_Div(ir_node *proj) ir_graph *irg = get_irn_irg(proj); /* we found an exception handler, remove it */ DBG_OPT_EXC_REM(proj); - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } case pn_Div_M: { @@ -3671,7 +3666,7 @@ static ir_node *transform_node_Proj_Mod(ir_node *proj) ir_graph *irg = get_irn_irg(proj); /* we found an exception handler, remove it */ DBG_OPT_EXC_REM(proj); - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } case pn_Mod_M: { @@ -3728,7 +3723,7 @@ static ir_node *transform_node_Proj_Cond(ir_node *proj) } else { ir_graph *irg = get_irn_irg(proj); /* this case will NEVER be taken, kill it */ - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } } } else { @@ -3742,19 +3737,19 @@ static ir_node *transform_node_Proj_Cond(ir_node *proj) ir_relation cmp_result = tarval_cmp(b_vrp->range_bottom, tp); ir_relation cmp_result2 = tarval_cmp(b_vrp->range_top, tp); - if ((cmp_result & ir_relation_greater) == cmp_result && (cmp_result2 - & ir_relation_less) == cmp_result2) { + if ((cmp_result & ir_relation_greater) == cmp_result + && (cmp_result2 & ir_relation_less) == cmp_result2) { ir_graph *irg = get_irn_irg(proj); - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } } else if (b_vrp->range_type == VRP_ANTIRANGE) { ir_relation cmp_result = tarval_cmp(b_vrp->range_bottom, tp); ir_relation cmp_result2 = tarval_cmp(b_vrp->range_top, tp); - if ((cmp_result & ir_relation_less_equal) == cmp_result && (cmp_result2 - & ir_relation_greater_equal) == cmp_result2) { + if ((cmp_result & ir_relation_less_equal) == cmp_result + && (cmp_result2 & ir_relation_greater_equal) == cmp_result2) { ir_graph *irg = get_irn_irg(proj); - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } } @@ -3763,7 +3758,7 @@ static ir_node *transform_node_Proj_Cond(ir_node *proj) b_vrp->bits_set ) == ir_relation_equal)) { ir_graph *irg = get_irn_irg(proj); - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } if (!(tarval_cmp( @@ -3773,10 +3768,8 @@ static ir_node *transform_node_Proj_Cond(ir_node *proj) tarval_not(b_vrp->bits_not_set)) == ir_relation_equal)) { ir_graph *irg = get_irn_irg(proj); - return get_irg_bad(irg); + return new_r_Bad(irg, mode_X); } - - } } } @@ -3827,30 +3820,51 @@ static ir_node *transform_node_Cmp(ir_node *n) } /* Remove unnecessary conversions */ - /* TODO handle conv+constant */ if (is_Conv(left) && is_Conv(right)) { - ir_node *op_left = get_Conv_op(left); - ir_node *op_right = get_Conv_op(right); - ir_mode *mode_left = get_irn_mode(op_left); - ir_mode *mode_right = get_irn_mode(op_right); + ir_node *op_left = get_Conv_op(left); + ir_node *op_right = get_Conv_op(right); + ir_mode *mode_left = get_irn_mode(op_left); + ir_mode *mode_right = get_irn_mode(op_right); if (smaller_mode(mode_left, mode) && smaller_mode(mode_right, mode) && mode_left != mode_b && mode_right != mode_b) { - ir_node *block = get_nodes_block(n); + ir_node *block = get_nodes_block(n); if (mode_left == mode_right) { - left = op_left; - right = op_right; + left = op_left; + right = op_right; changed = true; DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV_CONV); } else if (smaller_mode(mode_left, mode_right)) { - left = new_r_Conv(block, op_left, mode_right); - right = op_right; + left = new_r_Conv(block, op_left, mode_right); + right = op_right; changed = true; DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV); } else if (smaller_mode(mode_right, mode_left)) { - left = op_left; - right = new_r_Conv(block, op_right, mode_left); + left = op_left; + right = new_r_Conv(block, op_right, mode_left); + changed = true; + DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV); + } + mode = get_irn_mode(left); + } + } + if (is_Conv(left) && is_Const(right)) { + ir_node *op_left = get_Conv_op(left); + ir_mode *mode_left = get_irn_mode(op_left); + if (smaller_mode(mode_left, mode) && mode_left != mode_b) { + ir_tarval *tv = get_Const_tarval(right); + tarval_int_overflow_mode_t last_mode + = tarval_get_integer_overflow_mode(); + ir_tarval *new_tv; + tarval_set_integer_overflow_mode(TV_OVERFLOW_BAD); + new_tv = tarval_convert_to(tv, mode_left); + tarval_set_integer_overflow_mode(last_mode); + if (new_tv != tarval_bad) { + ir_graph *irg = get_irn_irg(n); + left = op_left; + right = new_r_Const(irg, new_tv); + mode = get_irn_mode(left); changed = true; DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV); } @@ -4192,19 +4206,6 @@ static ir_node *transform_node_Cmp(ir_node *n) /* the following reassociations work only for == and != */ if (relation == ir_relation_equal || relation == ir_relation_less_greater) { - -#if 0 /* Might be not that good in general */ - /* a-b == 0 ==> a == b, a-b != 0 ==> a != b */ - if (tarval_is_null(tv) && is_Sub(left)) { - right = get_Sub_right(left); - left = get_Sub_left(left); - - tv = value_of(right); - changed = 1; - DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C); - } -#endif - if (tv != tarval_bad) { /* a-c1 == c2 ==> a == c2+c1, a-c1 != c2 ==> a != c2+c1 */ if (is_Sub(left)) { @@ -4485,10 +4486,12 @@ static ir_node *transform_node_Proj_CopyB(ir_node *proj) DBG_OPT_EXC_REM(proj); proj = new_r_Jmp(get_nodes_block(copyb)); break; - case pn_CopyB_X_except: + case pn_CopyB_X_except: { + ir_graph *irg = get_irn_irg(proj); DBG_OPT_EXC_REM(proj); - proj = get_irg_bad(get_irn_irg(proj)); + proj = new_r_Bad(irg, mode_X); break; + } default: break; } @@ -4536,7 +4539,7 @@ static ir_node *transform_node_Proj_Bound(ir_node *proj) break; case pn_Bound_X_except: DBG_OPT_EXC_REM(proj); - proj = get_irg_bad(get_irn_irg(proj)); + proj = new_r_Bad(get_irn_irg(proj), mode_X); break; case pn_Bound_res: proj = idx; @@ -4566,14 +4569,57 @@ static ir_node *transform_node_Proj(ir_node *proj) return proj; } /* transform_node_Proj */ -/** - * Move Confirms down through Phi nodes. - */ +static bool is_block_unreachable(const ir_node *block) +{ + const ir_graph *irg = get_irn_irg(block); + if (!is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK)) + return false; + return get_Block_dom_depth(block) < 0; +} + +static ir_node *transform_node_Block(ir_node *block) +{ + ir_graph *irg = get_irn_irg(block); + int arity = get_irn_arity(block); + ir_node *bad = NULL; + int i; + + if (!is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK)) + return block; + + for (i = 0; i < arity; ++i) { + ir_node *pred = get_Block_cfgpred(block, i); + ir_node *pred_block = get_nodes_block(pred); + if (!is_Bad(pred) && !is_block_unreachable(pred_block)) + continue; + if (bad == NULL) + bad = new_r_Bad(irg, mode_X); + set_irn_n(block, i, bad); + } + + return block; +} + static ir_node *transform_node_Phi(ir_node *phi) { - int i, n; - ir_mode *mode = get_irn_mode(phi); + int n = get_irn_arity(phi); + ir_mode *mode = get_irn_mode(phi); + ir_node *block = get_nodes_block(phi); + ir_graph *irg = get_irn_irg(phi); + ir_node *bad = NULL; + int i; + + /* Set phi-operands for bad-block inputs to bad */ + for (i = 0; i < n; ++i) { + ir_node *pred = get_Block_cfgpred(block, i); + if (is_Bad(pred) || is_block_unreachable(get_nodes_block(pred))) { + if (bad == NULL) + bad = new_r_Bad(irg, mode); + set_irn_n(phi, i, bad); + } + } + /* Move Confirms down through Phi nodes. */ if (mode_is_reference(mode)) { n = get_irn_arity(phi); @@ -4802,20 +4848,19 @@ static ir_node *transform_node_Or_Rotl(ir_node *irn_or) return n; } /* transform_node_Or_Rotl */ -static bool is_cmp_unequal_zero(const ir_node *node) +static bool is_cmp_unequal(const ir_node *node) { ir_relation relation = get_Cmp_relation(node); ir_node *left = get_Cmp_left(node); ir_node *right = get_Cmp_right(node); ir_mode *mode = get_irn_mode(left); - if (!is_Const(right) || !is_Const_null(right)) - return false; - if (mode_is_signed(mode)) { - return relation == ir_relation_less_greater; - } else { + if (relation == ir_relation_less_greater) + return true; + + if (!mode_is_signed(mode) && is_Const(right) && is_Const_null(right)) return relation == ir_relation_greater; - } + return false; } /** @@ -4855,18 +4900,21 @@ static ir_node *transform_node_Or(ir_node *n) ir_relation new_relation = a_relation | b_relation; return new_rd_Cmp(dbgi, block, a_left, a_right, new_relation); } - /* Cmp(a!=0) or Cmp(b!=0) => Cmp(a|b != 0) */ - if (is_cmp_unequal_zero(a) && is_cmp_unequal_zero(b) - && !mode_is_float(get_irn_mode(a_left)) - && !mode_is_float(get_irn_mode(b_left))) { - ir_graph *irg = get_irn_irg(n); - dbg_info *dbgi = get_irn_dbg_info(n); - ir_node *block = get_nodes_block(n); - ir_mode *mode = get_irn_mode(a_left); - ir_node *n_b_left = get_irn_mode(b_left) != mode ? - new_rd_Conv(dbgi, block, b_left, mode) : b_left; - ir_node *or = new_rd_Or(dbgi, block, a_left, n_b_left, mode); - ir_node *zero = create_zero_const(irg, mode); + /* Cmp(a!=b) or Cmp(c!=d) => Cmp((a^b)|(c^d) != 0) */ + if (is_cmp_unequal(a) && is_cmp_unequal(b) + && !mode_is_float(get_irn_mode(a_left)) + && !mode_is_float(get_irn_mode(b_left)) + && values_in_mode(get_irn_mode(a_left), get_irn_mode(b_left))) { + ir_graph *irg = get_irn_irg(n); + dbg_info *dbgi = get_irn_dbg_info(n); + ir_node *block = get_nodes_block(n); + ir_mode *a_mode = get_irn_mode(a_left); + ir_mode *b_mode = get_irn_mode(b_left); + ir_node *xora = new_rd_Eor(dbgi, block, a_left, a_right, a_mode); + ir_node *xorb = new_rd_Eor(dbgi, block, b_left, b_right, b_mode); + ir_node *conv = new_rd_Conv(dbgi, block, xorb, a_mode); + ir_node *or = new_rd_Or(dbgi, block, xora, conv, a_mode); + ir_node *zero = create_zero_const(irg, a_mode); return new_rd_Cmp(dbgi, block, or, zero, ir_relation_less_greater); } } @@ -4880,6 +4928,8 @@ static ir_node *transform_node_Or(ir_node *n) return n; n = transform_bitwise_distributive(n, transform_node_Or); + if (is_Or(n)) + n = transform_node_bitop_shift(n); return n; } /* transform_node_Or */ @@ -4897,9 +4947,11 @@ static ir_node *transform_node_shift(ir_node *n) { ir_node *left, *right; ir_mode *mode; + ir_mode *count_mode; ir_tarval *tv1, *tv2, *res; ir_node *in[2], *irn, *block; ir_graph *irg; + int modulo_shf; left = get_binop_left(n); @@ -4908,7 +4960,7 @@ static ir_node *transform_node_shift(ir_node *n) return n; right = get_binop_right(n); - tv1 = value_of(right); + tv1 = value_of(right); if (tv1 == tarval_bad) return n; @@ -4916,37 +4968,55 @@ static ir_node *transform_node_shift(ir_node *n) if (tv2 == tarval_bad) return n; - res = tarval_add(tv1, tv2); - mode = get_irn_mode(n); - irg = get_irn_irg(n); + count_mode = get_tarval_mode(tv1); + if (get_tarval_mode(tv2) != count_mode) { + /* TODO: search bigger mode or something and convert... */ + return n; + } - /* beware: a simple replacement works only, if res < modulo shift */ - if (!is_Rotl(n)) { - int modulo_shf = get_mode_modulo_shift(mode); - if (modulo_shf > 0) { - ir_tarval *modulo = new_tarval_from_long(modulo_shf, - get_tarval_mode(res)); + mode = get_irn_mode(n); + modulo_shf = get_mode_modulo_shift(mode); - assert(modulo_shf >= (int) get_mode_size_bits(mode)); + if (modulo_shf > 0) { + ir_tarval *modulo_mask = new_tarval_from_long(modulo_shf-1, count_mode); - /* shifting too much */ - if (!(tarval_cmp(res, modulo) & ir_relation_less)) { - if (is_Shrs(n)) { - ir_node *block = get_nodes_block(n); - dbg_info *dbgi = get_irn_dbg_info(n); - ir_mode *smode = get_irn_mode(right); - ir_node *cnst = new_r_Const_long(irg, smode, get_mode_size_bits(mode) - 1); - return new_rd_Shrs(dbgi, block, get_binop_left(left), cnst, mode); - } + /* I'm not so sure what happens in one complement... */ + assert(get_mode_arithmetic(count_mode) == irma_twos_complement); + /* modulo shifts should always be a power of 2 (otherwise modulo_mask + * above will be invalid) */ + assert(modulo_shf<=0 || is_po2(modulo_shf)); + + tv1 = tarval_and(tv1, modulo_mask); + tv2 = tarval_and(tv2, modulo_mask); + } + res = tarval_add(tv1, tv2); + irg = get_irn_irg(n); - return new_r_Const(irg, get_mode_null(mode)); + /* beware: a simple replacement works only, if res < modulo shift */ + if (is_Rotl(n)) { + int bits = get_mode_size_bits(mode); + ir_tarval *modulo = new_tarval_from_long(bits, count_mode); + res = tarval_mod(res, modulo); + } else { + long bits = get_mode_size_bits(mode); + ir_tarval *mode_size = new_tarval_from_long(bits, count_mode); + + /* shifting too much */ + if (!(tarval_cmp(res, mode_size) & ir_relation_less)) { + if (is_Shrs(n)) { + ir_node *block = get_nodes_block(n); + dbg_info *dbgi = get_irn_dbg_info(n); + ir_mode *smode = get_irn_mode(right); + ir_node *cnst = new_r_Const_long(irg, smode, get_mode_size_bits(mode) - 1); + return new_rd_Shrs(dbgi, block, get_binop_left(left), cnst, mode); } + + return new_r_Const(irg, get_mode_null(mode)); } - } else { - res = tarval_mod(res, new_tarval_from_long(get_mode_size_bits(mode), get_tarval_mode(res))); } /* ok, we can replace it */ + assert(modulo_shf >= (int) get_mode_size_bits(mode)); block = get_nodes_block(n); in[0] = get_binop_left(left); @@ -4957,91 +5027,6 @@ static ir_node *transform_node_shift(ir_node *n) DBG_OPT_ALGSIM0(n, irn, FS_OPT_REASSOC_SHIFT); return transform_node(irn); -} /* transform_node_shift */ - -/** - * normalisation: (x & c1) >> c2 to (x >> c2) & (c1 >> c2) - * (we can use: - * - and, or, xor instead of & - * - Shl, Shr, Shrs, rotl instead of >> - * (with a special case for Or/Xor + Shrs) - */ -static ir_node *transform_node_bitop_shift(ir_node *n) -{ - ir_node *left; - ir_node *right = get_binop_right(n); - ir_mode *mode = get_irn_mode(n); - ir_node *bitop_left; - ir_node *bitop_right; - ir_op *op_left; - ir_node *block; - dbg_info *dbgi; - ir_graph *irg; - ir_node *new_shift; - ir_node *new_bitop; - ir_node *new_const; - ir_tarval *tv1; - ir_tarval *tv2; - ir_tarval *tv_shift; - - assert(is_Shrs(n) || is_Shr(n) || is_Shl(n) || is_Rotl(n)); - - if (!is_Const(right)) - return n; - - left = get_binop_left(n); - op_left = get_irn_op(left); - if (op_left != op_And && op_left != op_Or && op_left != op_Eor) - return n; - - /* doing it with Shrs is not legal if the Or/Eor affects the topmost bit */ - if (is_Shrs(n) && (op_left == op_Or || op_left == op_Eor)) { - /* TODO: test if sign bit is affectes */ - return n; - } - - bitop_right = get_binop_right(left); - if (!is_Const(bitop_right)) - return n; - - bitop_left = get_binop_left(left); - - block = get_nodes_block(n); - dbgi = get_irn_dbg_info(n); - tv1 = get_Const_tarval(bitop_right); - tv2 = get_Const_tarval(right); - - assert(get_tarval_mode(tv1) == mode); - - if (is_Shl(n)) { - new_shift = new_rd_Shl(dbgi, block, bitop_left, right, mode); - tv_shift = tarval_shl(tv1, tv2); - } else if (is_Shr(n)) { - new_shift = new_rd_Shr(dbgi, block, bitop_left, right, mode); - tv_shift = tarval_shr(tv1, tv2); - } else if (is_Shrs(n)) { - new_shift = new_rd_Shrs(dbgi, block, bitop_left, right, mode); - tv_shift = tarval_shrs(tv1, tv2); - } else { - assert(is_Rotl(n)); - new_shift = new_rd_Rotl(dbgi, block, bitop_left, right, mode); - tv_shift = tarval_rotl(tv1, tv2); - } - - assert(get_tarval_mode(tv_shift) == mode); - irg = get_irn_irg(n); - new_const = new_r_Const(irg, tv_shift); - - if (op_left == op_And) { - new_bitop = new_rd_And(dbgi, block, new_shift, new_const, mode); - } else if (op_left == op_Or) { - new_bitop = new_rd_Or(dbgi, block, new_shift, new_const, mode); - } else { - assert(op_left == op_Eor); - new_bitop = new_rd_Eor(dbgi, block, new_shift, new_const, mode); - } - - return new_bitop; } /** @@ -5162,7 +5147,7 @@ typedef ir_node*(*new_shift_func)(dbg_info *dbgi, ir_node *block, * then we can use that to minimize the value of Add(x, const) or * Sub(Const, x). In particular this often avoids 1 instruction in some * backends for the Shift(x, Sub(Const, y)) case because it can be replaced - * by Shift(x, Minus(y)) which doesnt't need an explicit Const constructed. + * by Shift(x, Minus(y)) which does not need an explicit Const constructed. */ static ir_node *transform_node_shift_modulo(ir_node *n, new_shift_func new_shift) @@ -5250,7 +5235,7 @@ static ir_node *transform_node_Shr(ir_node *n) if (is_Shr(n)) n = transform_node_shl_shr(n); if (is_Shr(n)) - n = transform_node_bitop_shift(n); + n = transform_node_shift_bitop(n); return n; } /* transform_node_Shr */ @@ -5265,13 +5250,23 @@ static ir_node *transform_node_Shrs(ir_node *n) ir_node *b = get_Shrs_right(n); ir_mode *mode = get_irn_mode(n); + if (is_oversize_shift(n)) { + ir_node *block = get_nodes_block(n); + dbg_info *dbgi = get_irn_dbg_info(n); + ir_mode *cmode = get_irn_mode(b); + long val = get_mode_size_bits(cmode)-1; + ir_graph *irg = get_irn_irg(n); + ir_node *cnst = new_r_Const_long(irg, cmode, val); + return new_rd_Shrs(dbgi, block, a, cnst, mode); + } + HANDLE_BINOP_PHI((eval_func) tarval_shrs, a, b, c, mode); n = transform_node_shift(n); if (is_Shrs(n)) n = transform_node_shift_modulo(n, new_rd_Shrs); if (is_Shrs(n)) - n = transform_node_bitop_shift(n); + n = transform_node_shift_bitop(n); return n; } /* transform_node_Shrs */ @@ -5294,7 +5289,7 @@ static ir_node *transform_node_Shl(ir_node *n) if (is_Shl(n)) n = transform_node_shl_shr(n); if (is_Shl(n)) - n = transform_node_bitop_shift(n); + n = transform_node_shift_bitop(n); return n; } /* transform_node_Shl */ @@ -5313,7 +5308,7 @@ static ir_node *transform_node_Rotl(ir_node *n) n = transform_node_shift(n); if (is_Rotl(n)) - n = transform_node_bitop_shift(n); + n = transform_node_shift_bitop(n); return n; } /* transform_node_Rotl */ @@ -5385,17 +5380,14 @@ static ir_node *transform_node_End(ir_node *n) for (i = j = 0; i < n_keepalives; ++i) { ir_node *ka = get_End_keepalive(n, i); - if (is_Block(ka)) { - if (! is_Block_dead(ka)) { - in[j++] = ka; - } + ir_node *block; + /* no need to keep Bad */ + if (is_Bad(ka)) continue; - } else if (is_irn_pinned_in_irg(ka) && is_Block_dead(get_nodes_block(ka))) { + /* do not keep unreachable code */ + block = is_Block(ka) ? ka : get_nodes_block(ka); + if (is_block_unreachable(block)) continue; - } else if (is_Bad(ka)) { - /* no need to keep Bad */ - continue; - } in[j++] = ka; } if (j != n_keepalives) @@ -5422,16 +5414,115 @@ int ir_is_negated_value(const ir_node *a, const ir_node *b) return false; } +static const ir_node *skip_upconv(const ir_node *node) +{ + while (is_Conv(node)) { + ir_mode *mode = get_irn_mode(node); + const ir_node *op = get_Conv_op(node); + ir_mode *op_mode = get_irn_mode(op); + if (!smaller_mode(op_mode, mode)) + break; + node = op; + } + return node; +} + +int ir_mux_is_abs(const ir_node *sel, const ir_node *mux_true, + const ir_node *mux_false) +{ + ir_node *cmp_left; + ir_node *cmp_right; + ir_mode *mode; + ir_relation relation; + + if (!is_Cmp(sel)) + return 0; + + /** + * Note further that these optimization work even for floating point + * with NaN's because -NaN == NaN. + * However, if +0 and -0 is handled differently, we cannot use the Abs/-Abs + * transformations. + */ + mode = get_irn_mode(mux_true); + if (mode_honor_signed_zeros(mode)) + return 0; + + /* must be <, <=, >=, > */ + relation = get_Cmp_relation(sel); + if ((relation & ir_relation_less_greater) == 0) + return 0; + + if (!ir_is_negated_value(mux_true, mux_false)) + return 0; + + mux_true = skip_upconv(mux_true); + mux_false = skip_upconv(mux_false); + + /* must be x cmp 0 */ + cmp_right = get_Cmp_right(sel); + if (!is_Const(cmp_right) || !is_Const_null(cmp_right)) + return 0; + + cmp_left = get_Cmp_left(sel); + if (cmp_left == mux_false) { + if (relation & ir_relation_less) { + return 1; + } else { + assert(relation & ir_relation_greater); + return -1; + } + } else if (cmp_left == mux_true) { + if (relation & ir_relation_less) { + return -1; + } else { + assert(relation & ir_relation_greater); + return 1; + } + } + + return 0; +} + +ir_node *ir_get_abs_op(const ir_node *sel, ir_node *mux_true, + ir_node *mux_false) +{ + ir_node *cmp_left = get_Cmp_left(sel); + return cmp_left == skip_upconv(mux_false) ? mux_false : mux_true; +} + /** * Optimize a Mux into some simpler cases. */ static ir_node *transform_node_Mux(ir_node *n) { - ir_node *oldn = n, *sel = get_Mux_sel(n); - ir_mode *mode = get_irn_mode(n); - ir_node *t = get_Mux_true(n); - ir_node *f = get_Mux_false(n); - ir_graph *irg = get_irn_irg(n); + ir_node *oldn = n; + ir_node *sel = get_Mux_sel(n); + ir_mode *mode = get_irn_mode(n); + ir_node *t = get_Mux_true(n); + ir_node *f = get_Mux_false(n); + ir_graph *irg = get_irn_irg(n); + + /* implement integer abs: abs(x) = x^(x >>s 31) - (x >>s 31) */ + if (get_mode_arithmetic(mode) == irma_twos_complement) { + int abs = ir_mux_is_abs(sel, t, f); + if (abs != 0) { + dbg_info *dbgi = get_irn_dbg_info(n); + ir_node *block = get_nodes_block(n); + ir_node *op = ir_get_abs_op(sel, t, f); + int bits = get_mode_size_bits(mode); + ir_node *shiftconst = new_r_Const_long(irg, mode_Iu, bits-1); + ir_node *sext = new_rd_Shrs(dbgi, block, op, shiftconst, mode); + ir_node *xorn = new_rd_Eor(dbgi, block, op, sext, mode); + ir_node *res; + if (abs > 0) { + res = new_rd_Sub(dbgi, block, xorn, sext, mode); + } else { + res = new_rd_Sub(dbgi, block, sext, xorn, mode); + } + return res; + } + } if (is_irg_state(irg, IR_GRAPH_STATE_KEEP_MUX)) return n; @@ -5656,6 +5747,24 @@ static ir_node *transform_node_Sync(ir_node *n) int pred_arity; int j; + /* Remove Bad predecessors */ + if (is_Bad(pred)) { + del_Sync_n(n, i); + --arity; + continue; + } + + /* Remove duplicate predecessors */ + for (j = 0; j < i; ++j) { + if (get_Sync_pred(n, j) == pred) { + del_Sync_n(n, i); + --arity; + break; + } + } + if (j < i) + continue; + if (!is_Sync(pred)) { ++i; continue; @@ -5680,11 +5789,78 @@ static ir_node *transform_node_Sync(ir_node *n) } } + if (arity == 0) { + ir_graph *irg = get_irn_irg(n); + return new_r_Bad(irg, mode_M); + } + if (arity == 1) { + return get_Sync_pred(n, 0); + } + /* rehash the sync node */ add_identities(n); + return n; +} + +static ir_node *transform_node_Load(ir_node *n) +{ + /* if our memory predecessor is a load from the same address, then reuse the + * previous result */ + ir_node *mem = get_Load_mem(n); + ir_node *mem_pred; + + if (!is_Proj(mem)) + return n; + /* don't touch volatile loads */ + if (get_Load_volatility(n) == volatility_is_volatile) + return n; + mem_pred = get_Proj_pred(mem); + if (is_Load(mem_pred)) { + ir_node *pred_load = mem_pred; + + /* conservatively compare the 2 loads. TODO: This could be less strict + * with fixup code in some situations (like smaller/bigger modes) */ + if (get_Load_ptr(pred_load) != get_Load_ptr(n)) + return n; + if (get_Load_mode(pred_load) != get_Load_mode(n)) + return n; + /* all combinations of aligned/unaligned pred/n should be fine so we do + * not compare the unaligned attribute */ + { + ir_node *block = get_nodes_block(n); + ir_node *jmp = new_r_Jmp(block); + ir_graph *irg = get_irn_irg(n); + ir_node *bad = new_r_Bad(irg, mode_X); + ir_mode *mode = get_Load_mode(n); + ir_node *res = new_r_Proj(pred_load, mode, pn_Load_res); + ir_node *in[pn_Load_max+1] = { mem, res, jmp, bad }; + ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in); + return tuple; + } + } else if (is_Store(mem_pred)) { + ir_node *pred_store = mem_pred; + ir_node *value = get_Store_value(pred_store); + + if (get_Store_ptr(pred_store) != get_Load_ptr(n)) + return n; + if (get_irn_mode(value) != get_Load_mode(n)) + return n; + /* all combinations of aligned/unaligned pred/n should be fine so we do + * not compare the unaligned attribute */ + { + ir_node *block = get_nodes_block(n); + ir_node *jmp = new_r_Jmp(block); + ir_graph *irg = get_irn_irg(n); + ir_node *bad = new_r_Bad(irg, mode_X); + ir_node *res = value; + ir_node *in[pn_Load_max+1] = { mem, res, jmp, bad }; + ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in); + return tuple; + } + } return n; -} /* transform_node_Sync */ +} /** * optimize a trampoline Call into a direct Call @@ -5808,6 +5984,7 @@ static ir_op_ops *firm_set_default_transform_node(ir_opcode code, ir_op_ops *ops switch (code) { CASE(Add); CASE(And); + CASE(Block); CASE(Call); CASE(Cmp); CASE(Conv); @@ -5829,13 +6006,13 @@ static ir_op_ops *firm_set_default_transform_node(ir_opcode code, ir_op_ops *ops CASE(Sync); CASE_PROJ(Bound); CASE_PROJ(CopyB); - CASE_PROJ(Load); CASE_PROJ(Store); CASE_PROJ_EX(Cond); CASE_PROJ_EX(Div); + CASE_PROJ_EX(Load); CASE_PROJ_EX(Mod); default: - /* leave NULL */; + break; } return ops; @@ -6021,29 +6198,31 @@ static int node_cmp_attr_ASM(const ir_node *a, const ir_node *b) /* Should we really check the constraints here? Should be better, but is strange. */ n = get_ASM_n_input_constraints(a); if (n != get_ASM_n_input_constraints(b)) - return 0; + return 1; ca = get_ASM_input_constraints(a); cb = get_ASM_input_constraints(b); for (i = 0; i < n; ++i) { - if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint) + if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint + || ca[i].mode != cb[i].mode) return 1; } n = get_ASM_n_output_constraints(a); if (n != get_ASM_n_output_constraints(b)) - return 0; + return 1; ca = get_ASM_output_constraints(a); cb = get_ASM_output_constraints(b); for (i = 0; i < n; ++i) { - if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint) + if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint + || ca[i].mode != cb[i].mode) return 1; } n = get_ASM_n_clobbers(a); if (n != get_ASM_n_clobbers(b)) - return 0; + return 1; cla = get_ASM_clobbers(a); clb = get_ASM_clobbers(b); @@ -6137,10 +6316,23 @@ int identities_cmp(const void *elt, const void *key) /* for pinned nodes, the block inputs must be equal */ if (get_irn_n(a, -1) != get_irn_n(b, -1)) return 1; - } else if (! get_opt_global_cse()) { - /* for block-local CSE both nodes must be in the same Block */ - if (get_nodes_block(a) != get_nodes_block(b)) - return 1; + } else { + ir_node *block_a = get_nodes_block(a); + ir_node *block_b = get_nodes_block(b); + if (! get_opt_global_cse()) { + /* for block-local CSE both nodes must be in the same Block */ + if (block_a != block_b) + return 1; + } else { + /* The optimistic approach would be to do nothing here. + * However doing GCSE optimistically produces a lot of partially dead code which appears + * to be worse in practice than the missed opportunities. + * So we use a very conservative variant here and only CSE if 1 value dominates the + * other. */ + if (!block_dominates(block_a, block_b) + && !block_dominates(block_b, block_a)) + return 1; + } } /* compare a->in[0..ins] with b->in[0..ins] */ @@ -6282,100 +6474,6 @@ void visit_all_identities(ir_graph *irg, irg_walk_func visit, void *env) current_ir_graph = rem; } /* visit_all_identities */ -/** - * Garbage in, garbage out. If a node has a dead input, i.e., the - * Bad node is input to the node, return the Bad node. - */ -static ir_node *gigo(ir_node *node) -{ - int i, irn_arity; - ir_op *op = get_irn_op(node); - - /* remove garbage blocks by looking at control flow that leaves the block - and replacing the control flow by Bad. */ - if (get_irn_mode(node) == mode_X) { - ir_node *block = get_nodes_block(skip_Proj(node)); - ir_graph *irg = get_irn_irg(block); - - /* Don't optimize nodes in immature blocks. */ - if (!get_Block_matured(block)) - return node; - /* Don't optimize End, may have Bads. */ - if (op == op_End) return node; - - if (is_Block(block)) { - if (is_Block_dead(block)) { - /* control flow from dead block is dead */ - return get_irg_bad(irg); - } - - for (i = get_irn_arity(block) - 1; i >= 0; --i) { - if (!is_Bad(get_irn_n(block, i))) - break; - } - if (i < 0) { - ir_graph *irg = get_irn_irg(block); - /* the start block is never dead */ - if (block != get_irg_start_block(irg) - && block != get_irg_end_block(irg)) { - /* - * Do NOT kill control flow without setting - * the block to dead of bad things can happen: - * We get a Block that is not reachable be irg_block_walk() - * but can be found by irg_walk()! - */ - set_Block_dead(block); - return get_irg_bad(irg); - } - } - } - } - - /* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the - blocks predecessors is dead. */ - if (op != op_Block && op != op_Phi && op != op_Tuple && op != op_Anchor) { - ir_graph *irg = get_irn_irg(node); - irn_arity = get_irn_arity(node); - - /* - * Beware: we can only read the block of a non-floating node. - */ - if (is_irn_pinned_in_irg(node) && - is_Block_dead(get_nodes_block(skip_Proj(node)))) - return get_irg_bad(irg); - - for (i = 0; i < irn_arity; i++) { - ir_node *pred = get_irn_n(node, i); - - if (is_Bad(pred)) - return get_irg_bad(irg); -#if 0 - /* Propagating Unknowns here seems to be a bad idea, because - sometimes we need a node as a input and did not want that - it kills its user. - However, it might be useful to move this into a later phase - (if you think that optimizing such code is useful). */ - if (is_Unknown(pred) && mode_is_data(get_irn_mode(node))) - return new_r_Unknown(irg, get_irn_mode(node)); -#endif - } - } -#if 0 - /* With this code we violate the agreement that local_optimize - only leaves Bads in Block, Phi and Tuple nodes. */ - /* If Block has only Bads as predecessors it's garbage. */ - /* If Phi has only Bads as predecessors it's garbage. */ - if ((op == op_Block && get_Block_matured(node)) || op == op_Phi) { - irn_arity = get_irn_arity(node); - for (i = 0; i < irn_arity; i++) { - if (!is_Bad(get_irn_n(node, i))) break; - } - if (i == irn_arity) node = get_irg_bad(irg); - } -#endif - return node; -} /* gigo */ - /** * These optimizations deallocate nodes from the obstack. * It can only be called if it is guaranteed that no other nodes @@ -6393,17 +6491,6 @@ ir_node *optimize_node(ir_node *n) /* Always optimize Phi nodes: part of the construction. */ if ((!get_opt_optimize()) && (iro != iro_Phi)) return n; - /* Remove nodes with dead (Bad) input. - Run always for transformation induced Bads. */ - n = gigo(n); - if (n != oldn) { - edges_node_deleted(oldn); - - /* We found an existing, better node, so we can deallocate the old node. */ - irg_kill_node(irg, oldn); - return n; - } - /* constant expression evaluation / constant folding */ if (get_opt_constant_folding()) { /* neither constants nor Tuple values can be evaluated */ @@ -6502,12 +6589,6 @@ ir_node *optimize_in_place_2(ir_node *n) if (iro == iro_Deleted) return n; - /* Remove nodes with dead (Bad) input. - Run always for transformation induced Bads. */ - n = gigo(n); - if (is_Bad(n)) - return n; - /* constant expression evaluation / constant folding */ if (get_opt_constant_folding()) { /* neither constants nor Tuple values can be evaluated */ @@ -6580,8 +6661,6 @@ ir_node *optimize_in_place(ir_node *n) if (get_opt_global_cse()) set_irg_pinned(irg, op_pin_state_floats); - if (get_irg_outs_state(irg) == outs_consistent) - set_irg_outs_inconsistent(irg); /* FIXME: Maybe we could also test whether optimizing the node can change the control graph. */ @@ -6624,7 +6703,7 @@ static unsigned hash_SymConst(const ir_node *node) * @return * The operations. */ -static ir_op_ops *firm_set_default_hash(ir_opcode code, ir_op_ops *ops) +static ir_op_ops *firm_set_default_hash(unsigned code, ir_op_ops *ops) { #define CASE(a) \ case iro_##a: \ @@ -6650,7 +6729,7 @@ static ir_op_ops *firm_set_default_hash(ir_opcode code, ir_op_ops *ops) /* * Sets the default operation for an ir_ops. */ -ir_op_ops *firm_set_default_operations(ir_opcode code, ir_op_ops *ops) +ir_op_ops *firm_set_default_operations(unsigned code, ir_op_ops *ops) { ops = firm_set_default_hash(code, ops); ops = firm_set_default_computed_value(code, ops);