X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firopt.c;h=dcbe3baec97dc1f3a70cc1cec51775c1a04d78af;hb=637542932dc27dcdfc7def09b58d9d5d4c34fb77;hp=324be194190f0048764a7b3e388b6c960484c884;hpb=da8d6ea948daf3315a1f25dee4ecbedbe2f1d319;p=libfirm diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 324be1941..dcbe3baec 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -39,6 +39,7 @@ # include "irhooks.h" # include "irarch.h" # include "hashptr.h" +# include "archop.h" # include "opt_polymorphy.h" /* Make types visible to allow most efficient access */ @@ -60,7 +61,7 @@ follow_Id (ir_node *n) */ static tarval *computed_value_Const(ir_node *n) { - return get_Const_tarval(n); + return get_Const_tarval(n); } /** @@ -603,9 +604,26 @@ different_identity (ir_node *a, ir_node *b) } #endif +/** + * Returns a equivalent block for another block. + * If the block has only one predecessor, this is + * the equivalent one. If the only predecessor of a block is + * the block itself, this is a dead block. + * + * If both predecessors of a block are the branches of a binary + * Cond, the equivalent block is Cond's block. + * + * If all predecessors of a block are bad or lies in a dead + * block, the current block is dead as well. + * + * Note, that blocks are NEVER turned into Bad's, instead + * the dead_block flag is set. So, never test for is_Bad(block), + * always use is_dead_Block(block). + */ static ir_node *equivalent_node_Block(ir_node *n) { ir_node *oldn = n; + int n_preds = get_Block_n_cfgpreds(n); /* The Block constructor does not call optimize, but mature_immBlock calls the optimization. */ @@ -617,8 +635,7 @@ static ir_node *equivalent_node_Block(ir_node *n) This should be true, as the block is matured before optimize is called. But what about Phi-cycles with the Phi0/Id that could not be resolved? Remaining Phi nodes are just Ids. */ - if ((get_Block_n_cfgpreds(n) == 1) && - (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp)) { + if ((n_preds == 1) && (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp)) { ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0)); if (predblock == oldn) { /* Jmp jumps into the block it is in -- deal self cycle. */ @@ -629,7 +646,7 @@ static ir_node *equivalent_node_Block(ir_node *n) DBG_OPT_STG(oldn, n); } } - else if ((get_Block_n_cfgpreds(n) == 1) && + else if ((n_preds == 1) && (get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Cond)) { ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0)); if (predblock == oldn) { @@ -638,7 +655,7 @@ static ir_node *equivalent_node_Block(ir_node *n) DBG_OPT_DEAD(oldn, n); } } - else if ((get_Block_n_cfgpreds(n) == 2) && + else if ((n_preds == 2) && (get_opt_control_flow_weak_simplification())) { /* Test whether Cond jumps twice to this block @@@ we could do this also with two loops finding two preds from several ones. */ @@ -770,6 +787,10 @@ static ir_node *equivalent_node_left_zero(ir_node *n) /** * Er, a "symmetic unop", ie op(op(n)) = n. + * + * @fixme -(-a) == a, but might overflow two times. + * We handle it anyway here but the better way would be a + * flag. This would be needed for Pascal for instance. */ static ir_node *equivalent_node_symmetric_unop(ir_node *n) { @@ -940,7 +961,7 @@ static ir_node *equivalent_node_Conv(ir_node *n) /** * A Cast may be removed if the type of the previous node - * is already to type of the Cast. + * is already the type of the Cast. */ static ir_node *equivalent_node_Cast(ir_node *n) { ir_node *pred = get_Cast_op(n); @@ -1156,14 +1177,18 @@ static ir_node *equivalent_node_Mux(ir_node *n) /** * Optimize -a CMP -b into b CMP a. - * This works even for floating point + * This works only for for modes where unary Minus + * cannot Overflow. + * Note that two-complement integers can Overflow + * so it will NOT work. */ static ir_node *equivalent_node_Cmp(ir_node *n) { ir_node *left = get_Cmp_left(n); ir_node *right = get_Cmp_right(n); - if (get_irn_op(left) == op_Minus && get_irn_op(right) == op_Minus) { + if (get_irn_op(left) == op_Minus && get_irn_op(right) == op_Minus && + !mode_overflow_on_unary_Minus(get_irn_mode(left))) { left = get_Minus_op(left); right = get_Minus_op(right); set_Cmp_left(n, right); @@ -1369,16 +1394,14 @@ static ir_node *transform_node_Sub(ir_node *n) n = transform_node_AddSub(n); mode = get_irn_mode(n); - if (mode_is_num(mode)) { - if (classify_Const(get_Sub_left(n)) == CNST_NULL) { - n = new_rd_Minus( - get_irn_dbg_info(n), - current_ir_graph, - get_nodes_block(n), - get_Sub_right(n), - mode); - DBG_OPT_ALGSIM0(oldn, n); - } + if (mode_is_num(mode) && (classify_Const(get_Sub_left(n)) == CNST_NULL)) { + n = new_rd_Minus( + get_irn_dbg_info(n), + current_ir_graph, + get_nodes_block(n), + get_Sub_right(n), + mode); + DBG_OPT_ALGSIM0(oldn, n); } return n; @@ -1625,7 +1648,7 @@ static ir_node *transform_node_Not(ir_node *n) static ir_node *transform_node_Cast(ir_node *n) { ir_node *oldn = n; ir_node *pred = get_Cast_op(n); - type *tp = get_irn_type(pred); + type *tp = get_irn_type(n); if (get_irn_op(pred) == op_Const && get_Const_type(pred) != tp) { n = new_rd_Const_type(NULL, current_ir_graph, get_nodes_block(pred), get_irn_mode(pred), @@ -1636,6 +1659,7 @@ static ir_node *transform_node_Cast(ir_node *n) { get_SymConst_kind(pred), tp); DBG_OPT_CSTEVAL(oldn, n); } + return n; } @@ -1749,7 +1773,7 @@ static ir_node *transform_node_Proj(ir_node *proj) return proj; case iro_Cmp: - if (0 && get_opt_reassociation()) { + if (get_opt_reassociation()) { ir_node *left = get_Cmp_left(n); ir_node *right = get_Cmp_right(n); ir_node *c = NULL; @@ -1797,11 +1821,19 @@ static ir_node *transform_node_Proj(ir_node *proj) tv = get_Const_tarval(c); if (tv != tarval_bad) { - /* the following optimization is possibe on non-int values either: - * -a CMP c ==> a swap(CMP) -c */ - if (get_opt_constant_folding() && get_irn_op(left) == op_Minus) { + /* the following optimization is possibe on modes without Overflow + * on Unary Minus or on == and !=: + * -a CMP c ==> a swap(CMP) -c + * + * Beware: for two-complement Overflow may occur, so only == and != can + * be optimized, see this: + * -MININT < 0 =/=> MININT > 0 !!! + */ + if (get_opt_constant_folding() && get_irn_op(left) == op_Minus && + (!mode_overflow_on_unary_Minus(mode) || + (mode_is_int(mode) && (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg)))) { left = get_Minus_op(left); - tv = tarval_sub(get_tarval_one(mode), tv); + tv = tarval_sub(get_tarval_null(mode), tv); proj_nr = get_swapped_pnc(proj_nr); changed |= 2; @@ -1811,8 +1843,10 @@ static ir_node *transform_node_Proj(ir_node *proj) if (mode_is_int(mode)) { /* Ne includes Unordered which is not possible on integers. * However, frontends often use this wrong, so fix it here */ - if (proj_nr == pn_Cmp_Ne) + if (proj_nr == pn_Cmp_Ne) { proj_nr = pn_Cmp_Lg; + set_Proj_proj(proj, proj_nr); + } /* c > 0 : a < c ==> a <= (c-1) a >= c ==> a > (c-1) */ if ((proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Ge) && @@ -1844,7 +1878,7 @@ static ir_node *transform_node_Proj(ir_node *proj) } } - if (tv != tarval_bad) { + if ((tv != tarval_bad) && (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg)) { ir_op *op = get_irn_op(left); /* a-c1 == c2 ==> a == c2+c1, a-c1 != c2 ==> a != c2+c1 */ @@ -2225,7 +2259,7 @@ static ir_node *transform_node_End(ir_node *n) { } /** - * Optimize a Mux into some simplier cases + * Optimize a Mux into some simplier cases. */ static ir_node *transform_node_Mux(ir_node *n) { @@ -2259,7 +2293,8 @@ static ir_node *transform_node_Mux(ir_node *n) current_ir_graph, block, t, mode); - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM1(oldn, cmp, sel, n); + return n; } else if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) { /* Mux(a <=/< 0, -a, a) ==> Minus(Abs(a)) */ @@ -2272,7 +2307,8 @@ static ir_node *transform_node_Mux(ir_node *n) block, n, mode); - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM1(oldn, cmp, sel, n); + return n; } } else if (get_irn_op(t) == op_Minus && @@ -2285,7 +2321,8 @@ static ir_node *transform_node_Mux(ir_node *n) current_ir_graph, block, f, mode); - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM1(oldn, cmp, sel, n); + return n; } else if (proj_nr == pn_Cmp_Ge || proj_nr == pn_Cmp_Gt) { /* Mux(a >=/> 0, a, -a) ==> Minus(Abs(a)) */ @@ -2298,50 +2335,61 @@ static ir_node *transform_node_Mux(ir_node *n) block, n, mode); - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM1(oldn, cmp, sel, n); + return n; } } if (mode_is_int(mode) && mode_is_signed(mode) && get_mode_arithmetic(mode) == irma_twos_complement) { + ir_node *x = get_Cmp_left(cmp); + /* the following optimization works only with signed integer two-complement mode */ - if ((proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Le) && - classify_Const(t) == CNST_ALL_ONE && - classify_Const(f) == CNST_NULL) { + if (mode == get_irn_mode(x)) { /* - * Mux(x:T Shrs(x, sizeof_bits(T) - 1) - * Conditions: - * T must be signed. + * FIXME: this restriction is two rigid, as it would still + * work if mode(x) = Hs and mode == Is, but at least it removes + * all wrong cases. */ - n = new_rd_Shrs(get_irn_dbg_info(n), - current_ir_graph, block, get_Cmp_left(cmp), - new_r_Const_long(current_ir_graph, block, mode_Iu, - get_mode_size_bits(mode) - 1), - mode); - DBG_OPT_ALGSIM0(oldn, n); - } - else if ((proj_nr == pn_Cmp_Gt || proj_nr == pn_Cmp_Ge) && - classify_Const(t) == CNST_ONE && - classify_Const(f) == CNST_NULL) { - /* - * Mux(x:T >/>= 0, 0, 1) -> Shr(-x, sizeof_bits(T) - 1) - * Conditions: - * T must be signed. - */ - n = new_rd_Shr(get_irn_dbg_info(n), - current_ir_graph, block, - new_r_Minus(current_ir_graph, block, - get_Cmp_left(cmp), mode), - new_r_Const_long(current_ir_graph, block, mode_Iu, - get_mode_size_bits(mode) - 1), - mode); - DBG_OPT_ALGSIM0(oldn, n); + if ((proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Le) && + classify_Const(t) == CNST_ALL_ONE && + classify_Const(f) == CNST_NULL) { + /* + * Mux(x:T Shrs(x, sizeof_bits(T) - 1) + * Conditions: + * T must be signed. + */ + n = new_rd_Shrs(get_irn_dbg_info(n), + current_ir_graph, block, x, + new_r_Const_long(current_ir_graph, block, mode_Iu, + get_mode_size_bits(mode) - 1), + mode); + DBG_OPT_ALGSIM1(oldn, cmp, sel, n); + return n; + } + else if ((proj_nr == pn_Cmp_Gt || proj_nr == pn_Cmp_Ge) && + classify_Const(t) == CNST_ONE && + classify_Const(f) == CNST_NULL) { + /* + * Mux(x:T >/>= 0, 0, 1) -> Shr(-x, sizeof_bits(T) - 1) + * Conditions: + * T must be signed. + */ + n = new_rd_Shr(get_irn_dbg_info(n), + current_ir_graph, block, + new_r_Minus(current_ir_graph, block, x, mode), + new_r_Const_long(current_ir_graph, block, mode_Iu, + get_mode_size_bits(mode) - 1), + mode); + DBG_OPT_ALGSIM1(oldn, cmp, sel, n); + return n; + } } } } } - return n; + return arch_transform_node_Mux(n); } /** @@ -2387,7 +2435,7 @@ static ir_op *firm_set_default_transform_node(ir_op *op) CASE(End); CASE(Mux); default: - op->transform_node = NULL; + op->transform_node = NULL; } return op;