X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firopt.c;h=69ce9d0f25bcb339030c92f1e1eb68320291b118;hb=abb11c60e50826d82d0efcee52842c0ccdde95e0;hp=7f2dd8f2ecc78dd1bc39eb8af484aefef67b5194;hpb=a28fc0680d9f068ab64c2baa4b71d3eabd1c8abc;p=libfirm diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 7f2dd8f2e..69ce9d0f2 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -513,7 +513,7 @@ static tarval *computed_value_Proj_Cmp(ir_node *n) } } - return computed_value_Cmp_Confirm(aa, ab, proj_nr); + return computed_value_Cmp_Confirm(a, aa, ab, proj_nr); } /** @@ -692,7 +692,7 @@ static ir_node *equivalent_node_Block(ir_node *n) if (predblock == oldn) { /* Jmp jumps into the block it is in -- deal self cycle. */ n = set_Block_dead(n); - DBG_OPT_DEAD(oldn, n); + DBG_OPT_DEAD_BLOCK(oldn, n); } else if (get_opt_control_flow_straightening()) { n = predblock; DBG_OPT_STG(oldn, n); @@ -700,17 +700,19 @@ static ir_node *equivalent_node_Block(ir_node *n) } else if ((n_preds == 1) && (get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Cond)) { - ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0)); + ir_node *predblock = get_Block_cfgpred_block(n, 0); if (predblock == oldn) { /* Jmp jumps into the block it is in -- deal self cycle. */ n = set_Block_dead(n); - DBG_OPT_DEAD(oldn, n); + DBG_OPT_DEAD_BLOCK(oldn, n); } } else if ((n_preds == 2) && (get_opt_control_flow_weak_simplification())) { /* Test whether Cond jumps twice to this block - @@@ we could do this also with two loops finding two preds from several ones. */ + * The more general case which more than 2 predecessors is handles + * in optimize_cf(), we handle only this special case for speed here. + */ ir_node *a = get_Block_cfgpred(n, 0); ir_node *b = get_Block_cfgpred(n, 1); @@ -722,11 +724,12 @@ static ir_node *equivalent_node_Block(ir_node *n) /* Also a single entry Block following a single exit Block. Phis have twice the same operand and will be optimized away. */ n = get_nodes_block(a); - DBG_OPT_IFSIM(oldn, a, b, n); + DBG_OPT_IFSIM1(oldn, a, b, n); } - } else if (get_opt_unreachable_code() && - (n != current_ir_graph->start_block) && - (n != current_ir_graph->end_block) ) { + } + else if (get_opt_unreachable_code() && + (n != current_ir_graph->start_block) && + (n != current_ir_graph->end_block) ) { int i, n_cfg = get_Block_n_cfgpreds(n); /* If all inputs are dead, this block is dead too, except if it is @@ -737,7 +740,7 @@ static ir_node *equivalent_node_Block(ir_node *n) ir_node *pred_blk; if (is_Bad(pred)) continue; - pred_blk = get_nodes_block(pred); + pred_blk = get_nodes_block(skip_Proj(pred)); if (is_Block_dead(pred_blk)) continue; @@ -767,12 +770,8 @@ static ir_node *equivalent_node_Jmp(ir_node *n) return n; } -static ir_node *equivalent_node_Cond(ir_node *n) -{ - /* We do not evaluate Cond here as we replace it by a new node, a Jmp. - See cases for iro_Cond and iro_Proj in transform_node(). */ - return n; -} +/* We do not evaluate Cond here as we replace it by a new node, a Jmp. + See transform_node_Proj_Cond(). */ /** * optimize operations that are commutative and have neutral 0, @@ -798,19 +797,71 @@ static ir_node *equivalent_node_neutral_zero(ir_node *n) return n; /* If this predecessors constant value is zero, the operation is - unnecessary. Remove it: */ + * unnecessary. Remove it. + * + * Beware: If n is a Add, the mode of on and n might be different + * which happens in this rare construction: NULL + 3. + * Then, a Conv would be needed which we cannot include here. + */ if (classify_tarval (tv) == TV_CLASSIFY_NULL) { - n = on; + if (get_irn_mode(on) == get_irn_mode(n)) { + n = on; - DBG_OPT_ALGSIM1(oldn, a, b, n); + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_0); + } } return n; } -#define equivalent_node_Add equivalent_node_neutral_zero #define equivalent_node_Eor equivalent_node_neutral_zero +/* + * Optimize a - 0 and (a - x) + x (for modes with wrap-around). + * + * The second one looks strange, but this construct + * is used heavily in the LCC sources :-). + * + * Beware: The Mode of an Add may be different than the mode of its + * predecessors, so we could not return a predecessors in all cases. + */ +static ir_node *equivalent_node_Add(ir_node *n) +{ + ir_node *oldn = n; + ir_node *left, *right; + + n = equivalent_node_neutral_zero(n); + if (n != oldn) + return n; + + left = get_Add_left(n); + right = get_Add_right(n); + + if (get_irn_op(left) == op_Sub) { + if (get_Sub_right(left) == right) { + /* (a - x) + x */ + + n = get_Sub_left(left); + if (get_irn_mode(oldn) == get_irn_mode(n)) { + DBG_OPT_ALGSIM1(oldn, left, right, n, FS_OPT_ADD_SUB); + return n; + } + } + } + if (get_irn_op(right) == op_Sub) { + if (get_Sub_right(right) == left) { + /* x + (a - x) */ + + n = get_Sub_left(right); + if (get_irn_mode(oldn) == get_irn_mode(n)) { + DBG_OPT_ALGSIM1(oldn, left, right, n, FS_OPT_ADD_SUB); + return n; + } + } + } + return n; +} + /** * optimize operations that are not commutative but have neutral 0 on left, * so a op 0 = a. @@ -825,18 +876,67 @@ static ir_node *equivalent_node_left_zero(ir_node *n) if (classify_tarval(value_of(b)) == TV_CLASSIFY_NULL) { n = a; - DBG_OPT_ALGSIM1(oldn, a, b, n); + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_0); } return n; } -#define equivalent_node_Sub equivalent_node_left_zero #define equivalent_node_Shl equivalent_node_left_zero #define equivalent_node_Shr equivalent_node_left_zero #define equivalent_node_Shrs equivalent_node_left_zero #define equivalent_node_Rot equivalent_node_left_zero +/** + * Optimize a - 0 and (a + x) - x (for modes with wrap-around). + * + * The second one looks strange, but this construct + * is used heavily in the LCC sources :-). + * + * Beware: The Mode of a Sub may be different than the mode of its + * predecessors, so we could not return a predecessors in all cases. + */ +static ir_node *equivalent_node_Sub(ir_node *n) +{ + ir_node *oldn = n; + + ir_node *a = get_Sub_left(n); + ir_node *b = get_Sub_right(n); + + /* Beware: modes might be different */ + if (classify_tarval(value_of(b)) == TV_CLASSIFY_NULL) { + if (get_irn_mode(n) == get_irn_mode(a)) { + n = a; + + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_0); + } + } + else if (get_irn_op(a) == op_Add) { + ir_mode *mode = get_irn_mode(n); + + if (mode_wrap_around(mode)) { + ir_node *left = get_Add_left(a); + ir_node *right = get_Add_right(a); + + if (left == b) { + if (get_irn_mode(n) == get_irn_mode(right)) { + n = right; + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB); + } + } + else if (right == b) { + if (get_irn_mode(n) == get_irn_mode(left)) { + n = left; + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB); + } + } + } + } + + return n; +} + + /** * Optimize an "idempotent unary op", ie op(op(n)) = n. * @@ -877,10 +977,10 @@ static ir_node *equivalent_node_Mul(ir_node *n) /* Mul is commutative and has again an other neutral element. */ if (classify_tarval(value_of(a)) == TV_CLASSIFY_ONE) { n = b; - DBG_OPT_ALGSIM1(oldn, a, b, n); + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1); } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { n = a; - DBG_OPT_ALGSIM1(oldn, a, b, n); + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1); } return n; } @@ -940,12 +1040,13 @@ static ir_node *equivalent_node_Or(ir_node *n) if (a == b) { n = a; /* Or has it's own neutral element */ + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_OR); } else if (classify_tarval(value_of(a)) == TV_CLASSIFY_NULL) { n = b; - DBG_OPT_ALGSIM1(oldn, a, b, n); + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_OR); } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_NULL) { n = a; - DBG_OPT_ALGSIM1(oldn, a, b, n); + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_OR); } return n; @@ -963,12 +1064,13 @@ static ir_node *equivalent_node_And(ir_node *n) if (a == b) { n = a; /* And has it's own neutral element */ + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_AND); } else if (classify_tarval(value_of(a)) == TV_CLASSIFY_ALL_ONE) { n = b; - DBG_OPT_ALGSIM1(oldn, a, b, n); + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_AND); } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_ALL_ONE) { n = a; - DBG_OPT_ALGSIM1(oldn, a, b, n); + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_AND); } return n; } @@ -987,7 +1089,7 @@ static ir_node *equivalent_node_Conv(ir_node *n) if (n_mode == a_mode) { /* No Conv necessary */ n = a; - DBG_OPT_ALGSIM3(oldn, a, n); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV); } else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */ ir_mode *b_mode; @@ -998,12 +1100,12 @@ static ir_node *equivalent_node_Conv(ir_node *n) if (n_mode == b_mode) { if (n_mode == mode_b) { n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */ - DBG_OPT_ALGSIM1(oldn, a, b, n); + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV); } else if (mode_is_int(n_mode) || mode_is_character(n_mode)) { if (smaller_mode(b_mode, a_mode)){ n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */ - DBG_OPT_ALGSIM1(oldn, a, b, n); + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV); } } } @@ -1016,9 +1118,13 @@ static ir_node *equivalent_node_Conv(ir_node *n) * is already the type of the Cast. */ static ir_node *equivalent_node_Cast(ir_node *n) { + ir_node *oldn = n; ir_node *pred = get_Cast_op(n); - if (get_irn_type(pred) == get_Cast_type(n)) + + if (get_irn_type(pred) == get_Cast_type(n)) { n = pred; + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CAST); + } return n; } @@ -1050,43 +1156,27 @@ static ir_node *equivalent_node_Phi(ir_node *n) if (n_preds == 0) return n; /* Phi of dead Region without predecessors. */ -#if 0 - /* first we test for a special case: */ - /* Confirm is a special node fixing additional information for a - value that is known at a certain point. This is useful for - dataflow analysis. */ - if (n_preds == 2) { - ir_node *a = get_Phi_pred(n, 0); - ir_node *b = get_Phi_pred(n, 1); - if ( (get_irn_op(a) == op_Confirm) - && (get_irn_op(b) == op_Confirm) - && (get_irn_n(a, 0) == get_irn_n(b, 0)) - && (get_irn_n(a, 1) == get_irn_n(b, 1)) - && (a->data.num == (~b->data.num & irpn_True) )) { - return get_irn_n(a, 0); - } - } -#endif - /* If the Block has a Bad pred, we also have one. */ for (i = 0; i < n_preds; ++i) - if (is_Bad (get_Block_cfgpred(block, i))) + if (is_Bad(get_Block_cfgpred(block, i))) set_Phi_pred(n, i, new_Bad()); /* Find first non-self-referencing input */ - for (i = 0; i < n_preds; ++i) { + for (i = 0; i < n_preds; ++i) { first_val = get_Phi_pred(n, i); if ( (first_val != n) /* not self pointer */ #if 1 - && (get_irn_op(first_val) != op_Bad) + && (! is_Bad(first_val)) #endif ) { /* value not dead */ break; /* then found first value. */ } } - /* A totally Bad or self-referencing Phi (we didn't break the above loop) */ - if (i >= n_preds) { return new_Bad(); } + if (i >= n_preds) { + /* A totally Bad or self-referencing Phi (we didn't break the above loop) */ + return new_Bad(); + } scnd_val = NULL; @@ -1097,17 +1187,17 @@ static ir_node *equivalent_node_Phi(ir_node *n) if ( (scnd_val != n) && (scnd_val != first_val) #if 1 - && (get_irn_op(scnd_val) != op_Bad) + && (! is_Bad(scnd_val)) #endif ) { break; } } - /* Fold, if no multiple distinct non-self-referencing inputs */ if (i >= n_preds) { + /* Fold, if no multiple distinct non-self-referencing inputs */ n = first_val; - DBG_OPT_PHI(oldn, first_val, n); + DBG_OPT_PHI(oldn, n); } else { /* skip the remaining Ids (done in get_Phi_pred). */ /* superfluous, since we walk all to propagate Block's Bads. @@ -1117,7 +1207,7 @@ static ir_node *equivalent_node_Phi(ir_node *n) } /** - * optimize Proj(Tuple) and gigo for ProjX in Bad block + * optimize Proj(Tuple) and gigo() for ProjX in Bad block */ static ir_node *equivalent_node_Proj(ir_node *n) { @@ -1134,10 +1224,11 @@ static ir_node *equivalent_node_Proj(ir_node *n) assert(0); /* This should not happen! */ n = new_Bad(); } - } else if (get_irn_mode(n) == mode_X && - is_Block_dead(get_nodes_block(n))) { - /* Remove dead control flow -- early gigo. */ - n = new_Bad(); + } else if (get_irn_mode(n) == mode_X) { + if (is_Block_dead(get_nodes_block(skip_Proj(n)))) { + /* Remove dead control flow -- early gigo(). */ + n = new_Bad(); + } } return n; } @@ -1166,19 +1257,19 @@ static ir_node *equivalent_node_Mux(ir_node *n) tarval *ts = value_of(sel); /* Mux(true, f, t) == t */ - if (ts == get_tarval_b_true()) { + if (ts == tarval_b_true) { n = get_Mux_true(n); - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_C); } /* Mux(false, f, t) == f */ - else if (ts == get_tarval_b_false()) { + else if (ts == tarval_b_false) { n = get_Mux_false(n); - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_C); } /* Mux(v, x, x) == x */ else if (get_Mux_false(n) == get_Mux_true(n)) { n = get_Mux_true(n); - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_EQ); } else if (get_irn_op(sel) == op_Proj && !mode_honor_signed_zeros(get_irn_mode(n))) { ir_node *cmp = get_Proj_pred(sel); @@ -1202,12 +1293,12 @@ static ir_node *equivalent_node_Mux(ir_node *n) if (proj_nr == pn_Cmp_Eq) { /* Mux(a == 0, -a, a) ==> -a */ n = b; - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM); } else if (proj_nr == pn_Cmp_Lg || proj_nr == pn_Cmp_Ne) { /* Mux(a != 0, -a, a) ==> a */ n = a; - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM); } } else if (classify_Const(b) == CNST_NULL) { @@ -1215,18 +1306,17 @@ static ir_node *equivalent_node_Mux(ir_node *n) if (proj_nr == pn_Cmp_Lg || proj_nr == pn_Cmp_Ne) { /* Mux(a != 0, 0, a) ==> a */ n = a; - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM); } else if (proj_nr == pn_Cmp_Eq) { /* Mux(a == 0, 0, a) ==> 0 */ n = b; - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM); } } } } } - return n; } @@ -1299,7 +1389,6 @@ static ir_op *firm_set_default_equivalent_node(ir_op *op) switch (op->code) { CASE(Block); CASE(Jmp); - CASE(Cond); CASE(Or); CASE(Add); CASE(Eor); @@ -1444,7 +1533,7 @@ static ir_node *transform_node_Add(ir_node *n) ir_node *a = get_Add_left(n); if (a == get_Add_right(n)) { - ir_node *block = get_nodes_block(n); + ir_node *block = get_irn_n(n, -1); n = new_rd_Mul( get_irn_dbg_info(n), @@ -1453,7 +1542,7 @@ static ir_node *transform_node_Add(ir_node *n) a, new_r_Const_long(current_ir_graph, block, mode, 2), mode); - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_A); } else { ir_node *b = get_Add_right(n); @@ -1462,21 +1551,21 @@ static ir_node *transform_node_Add(ir_node *n) n = new_rd_Sub( get_irn_dbg_info(n), current_ir_graph, - get_nodes_block(n), + get_irn_n(n, -1), b, get_Minus_op(a), mode); - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B); } else if (get_irn_op(b) == op_Minus) { n = new_rd_Sub( get_irn_dbg_info(n), current_ir_graph, - get_nodes_block(n), + get_irn_n(n, -1), a, get_Minus_op(b), mode); - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B); } } } @@ -1498,17 +1587,17 @@ static ir_node *transform_node_Sub(ir_node *n) n = new_rd_Minus( get_irn_dbg_info(n), current_ir_graph, - get_nodes_block(n), + get_irn_n(n, -1), get_Sub_right(n), mode); - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_0_A); } return n; } /** - Transform Mul(a,-1) into -a. + * Transform Mul(a,-1) into -a. * Do architecture dependent optimizations on Mul nodes */ static ir_node *transform_node_Mul(ir_node *n) { @@ -1525,8 +1614,8 @@ static ir_node *transform_node_Mul(ir_node *n) { else if (value_of(b) == get_mode_minus_one(mode)) r = a; if (r) { - n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), r, mode); - DBG_OPT_ALGSIM1(oldn, a, b, n); + n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), r, mode); + DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1); return n; } } @@ -1649,7 +1738,6 @@ static ir_node *transform_node_DivMod(ir_node *n) set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */ set_Tuple_pred(n, pn_DivMod_res_div, a); set_Tuple_pred(n, pn_DivMod_res_mod, b); - assert(get_nodes_block(n)); } return n; @@ -1676,7 +1764,7 @@ static ir_node *transform_node_Abs(ir_node *n) * not run it in the equivalent_node() context. */ n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, - get_nodes_block(n), a, mode); + get_irn_n(n, -1), a, mode); DBG_OPT_CONFIRM(oldn, n); } @@ -1701,6 +1789,10 @@ static ir_node *transform_node_Cond(ir_node *n) ir_node *a = get_Cond_selector(n); tarval *ta = value_of(a); + /* we need block info which is not available in floating irgs */ + if (get_irg_pinned(current_ir_graph) == op_pin_state_floats) + return n; + if ((ta != tarval_bad) && (get_irn_mode(a) == mode_b) && (get_opt_unreachable_code())) { @@ -1757,9 +1849,9 @@ static ir_node *transform_node_Eor(ir_node *n) if (a == b) { /* a ^ a = 0 */ - n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), + n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), mode, get_mode_null(mode)); - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_A_A); } else if ((mode == mode_b) && (get_irn_op(a) == op_Proj) @@ -1767,18 +1859,18 @@ static ir_node *transform_node_Eor(ir_node *n) && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE) && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) { /* The Eor negates a Cmp. The Cmp has the negated result anyways! */ - n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a), - mode_b, get_negated_pnc(get_Proj_proj(a))); + n = new_r_Proj(current_ir_graph, get_irn_n(n, -1), get_Proj_pred(a), + mode_b, get_negated_pnc(get_Proj_proj(a), mode)); - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT_BOOL); } else if ((mode == mode_b) && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)) { /* The Eor is a Not. Replace it by a Not. */ /* ????!!!Extend to bitfield 1111111. */ - n = new_r_Not(current_ir_graph, get_nodes_block(n), a, mode_b); + n = new_r_Not(current_ir_graph, get_irn_n(n, -1), a, mode_b); - DBG_OPT_ALGSIM0(oldn, n); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT); } return n; @@ -1797,9 +1889,9 @@ static ir_node *transform_node_Not(ir_node *n) && (get_irn_mode(a) == mode_b) && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) { /* We negate a Cmp. The Cmp has the negated result anyways! */ - n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a), - mode_b, get_negated_pnc(get_Proj_proj(a))); - DBG_OPT_ALGSIM0(oldn, n); + n = new_r_Proj(current_ir_graph, get_irn_n(n, -1), get_Proj_pred(a), + mode_b, get_negated_pnc(get_Proj_proj(a), mode_b)); + DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_CMP); } return n; @@ -1814,11 +1906,11 @@ static ir_node *transform_node_Cast(ir_node *n) { type *tp = get_irn_type(n); if (get_irn_op(pred) == op_Const && get_Const_type(pred) != tp) { - n = new_rd_Const_type(NULL, current_ir_graph, get_nodes_block(pred), get_irn_mode(pred), + n = new_rd_Const_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_irn_mode(pred), get_Const_tarval(pred), tp); DBG_OPT_CSTEVAL(oldn, n); } else if ((get_irn_op(pred) == op_SymConst) && (get_SymConst_value_type(pred) != tp)) { - n = new_rd_SymConst_type(NULL, current_ir_graph, get_nodes_block(pred), get_SymConst_symbol(pred), + n = new_rd_SymConst_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_SymConst_symbol(pred), get_SymConst_kind(pred), tp); DBG_OPT_CSTEVAL(oldn, n); } @@ -1846,12 +1938,12 @@ static ir_node *transform_node_Proj_Div(ir_node *proj) if (proj_nr == pn_Div_X_except) { /* we found an exception handler, remove it */ return new_Bad(); - } else { + } else if (proj_nr == pn_Div_M) { /* the memory Proj can be removed */ ir_node *res = get_Div_mem(n); set_Div_mem(n, get_irg_no_mem(current_ir_graph)); - if (proj_nr == pn_Div_M) - return res; + + return res; } } return proj; @@ -1877,12 +1969,20 @@ static ir_node *transform_node_Proj_Mod(ir_node *proj) if (proj_nr == pn_Mod_X_except) { /* we found an exception handler, remove it */ return new_Bad(); - } else { + } else if (proj_nr == pn_Mod_M) { /* the memory Proj can be removed */ ir_node *res = get_Mod_mem(n); set_Mod_mem(n, get_irg_no_mem(current_ir_graph)); - if (proj_nr == pn_Mod_M) - return res; + + return res; + } + else if (proj_nr == pn_Mod_res && get_Mod_left(n) == b) { + /* a % a = 0 if a != 0 */ + ir_mode *mode = get_irn_mode(proj); + ir_node *res = new_Const(mode, get_mode_null(mode)); + + DBG_OPT_CSTEVAL(n, res); + return res; } } return proj; @@ -1909,12 +2009,20 @@ static ir_node *transform_node_Proj_DivMod(ir_node *proj) /* we found an exception handler, remove it */ return new_Bad(); } - else { + else if (proj_nr == pn_DivMod_M) { /* the memory Proj can be removed */ ir_node *res = get_DivMod_mem(n); set_DivMod_mem(n, get_irg_no_mem(current_ir_graph)); - if (proj_nr == pn_DivMod_M) - return res; + + return res; + } + else if (proj_nr == pn_DivMod_res_mod && get_DivMod_left(n) == b) { + /* a % a = 0 if a != 0 */ + ir_mode *mode = get_irn_mode(proj); + ir_node *res = new_Const(mode, get_mode_null(mode)); + + DBG_OPT_CSTEVAL(n, res); + return res; } } return proj; @@ -2025,8 +2133,8 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) if (mode_is_int(mode)) { /* Ne includes Unordered which is not possible on integers. * However, frontends often use this wrong, so fix it here */ - if (proj_nr == pn_Cmp_Ne) { - proj_nr = pn_Cmp_Lg; + if (proj_nr & pn_Cmp_Uo) { + proj_nr &= ~pn_Cmp_Uo; set_Proj_proj(proj, proj_nr); } @@ -2130,7 +2238,7 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) } if (changed) { - ir_node *block = get_nodes_block(n); + ir_node *block = get_irn_n(n, -1); /* Beware of get_nodes_Block() */ if (changed & 2) /* need a new Const */ right = new_Const(mode, tv); @@ -2267,7 +2375,7 @@ static ir_node *transform_node_Or_bf_store(ir_node *or) } /* ok, all conditions met */ - block = get_nodes_block(or); + block = get_irn_n(or, -1); new_and = new_r_And(current_ir_graph, block, value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode); @@ -2330,11 +2438,11 @@ static ir_node *transform_node_Or_Rot(ir_node *or) return or; /* yet, condition met */ - block = get_nodes_block(or); + block = get_irn_n(or, -1); n = new_r_Rot(current_ir_graph, block, x, c1, mode); - DBG_OPT_ALGSIM1(or, shl, shr, n); + DBG_OPT_ALGSIM1(or, shl, shr, n, FS_OPT_OR_SHFT_TO_ROT); return n; } else if (get_irn_op(c1) == op_Sub) { @@ -2361,7 +2469,7 @@ static ir_node *transform_node_Or_Rot(ir_node *or) /* a Rot right is not supported, so use a rot left */ n = new_r_Rot(current_ir_graph, block, x, sub, mode); - DBG_OPT_ALGSIM0(or, n); + DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROT); return n; } else if (get_irn_op(c2) == op_Sub) { @@ -2380,12 +2488,12 @@ static ir_node *transform_node_Or_Rot(ir_node *or) return or; /* yet, condition met */ - block = get_nodes_block(or); + block = get_irn_n(or, -1); /* a Rot Left */ n = new_r_Rot(current_ir_graph, block, x, v, mode); - DBG_OPT_ALGSIM0(or, n); + DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROT); return n; } @@ -2407,7 +2515,9 @@ static ir_node *transform_node_Or(ir_node *or) static ir_node *transform_node(ir_node *n); /** - * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl + * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl. + * + * Should be moved to reassociation? */ static ir_node *transform_node_shift(ir_node *n) { @@ -2450,14 +2560,14 @@ static ir_node *transform_node_shift(ir_node *n) if (flag) { /* ok, we can replace it */ - ir_node *in[2], *irn, *block = get_nodes_block(n); + ir_node *in[2], *irn, *block = get_irn_n(n, -1); in[0] = get_binop_left(left); in[1] = new_r_Const(current_ir_graph, block, get_tarval_mode(res), res); irn = new_ir_node(NULL, current_ir_graph, block, get_irn_op(n), mode, 2, in); - DBG_OPT_ALGSIM0(n, irn); + DBG_OPT_ALGSIM0(n, irn, FS_OPT_REASSOC_SHIFT); return transform_node(irn); } @@ -2469,14 +2579,20 @@ static ir_node *transform_node_shift(ir_node *n) #define transform_node_Shl transform_node_shift /** - * Remove dead blocks in keep alive list. We do not generate a new End node. + * Remove dead blocks and nodes in dead blocks + * in keep alive list. We do not generate a new End node. */ static ir_node *transform_node_End(ir_node *n) { int i, n_keepalives = get_End_n_keepalives(n); for (i = 0; i < n_keepalives; ++i) { ir_node *ka = get_End_keepalive(n, i); - if (is_Block(ka) && is_Block_dead(ka)) + if (is_Block(ka)) { + if (is_Block_dead(ka)) { + set_End_keepalive(n, i, new_Bad()); + } + } + else if (is_irn_pinned_in_irg(ka) && is_Block_dead(get_nodes_block(ka))) set_End_keepalive(n, i, new_Bad()); } return n; @@ -2497,7 +2613,7 @@ static ir_node *transform_node_Mux(ir_node *n) ir_node *t = get_Mux_true(n); if (get_irn_op(cmp) == op_Cmp && classify_Const(get_Cmp_right(cmp)) == CNST_NULL) { - ir_node *block = get_nodes_block(n); + ir_node *block = get_irn_n(n, -1); /* * Note: normalization puts the constant on the right site, @@ -2517,7 +2633,7 @@ static ir_node *transform_node_Mux(ir_node *n) current_ir_graph, block, t, mode); - DBG_OPT_ALGSIM1(oldn, cmp, sel, n); + DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS); return n; } else if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) { @@ -2531,7 +2647,7 @@ static ir_node *transform_node_Mux(ir_node *n) block, n, mode); - DBG_OPT_ALGSIM1(oldn, cmp, sel, n); + DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS); return n; } } @@ -2545,7 +2661,7 @@ static ir_node *transform_node_Mux(ir_node *n) current_ir_graph, block, f, mode); - DBG_OPT_ALGSIM1(oldn, cmp, sel, n); + DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS); return n; } else if (proj_nr == pn_Cmp_Ge || proj_nr == pn_Cmp_Gt) { @@ -2559,7 +2675,7 @@ static ir_node *transform_node_Mux(ir_node *n) block, n, mode); - DBG_OPT_ALGSIM1(oldn, cmp, sel, n); + DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS); return n; } } @@ -2589,7 +2705,7 @@ static ir_node *transform_node_Mux(ir_node *n) new_r_Const_long(current_ir_graph, block, mode_Iu, get_mode_size_bits(mode) - 1), mode); - DBG_OPT_ALGSIM1(oldn, cmp, sel, n); + DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_SHR); return n; } else if ((proj_nr == pn_Cmp_Gt || proj_nr == pn_Cmp_Ge) && @@ -2606,7 +2722,7 @@ static ir_node *transform_node_Mux(ir_node *n) new_r_Const_long(current_ir_graph, block, mode_Iu, get_mode_size_bits(mode) - 1), mode); - DBG_OPT_ALGSIM1(oldn, cmp, sel, n); + DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_SHR); return n; } } @@ -2990,16 +3106,19 @@ static INLINE ir_node * gigo (ir_node *node) { int i, irn_arity; - ir_op* op = get_irn_op(node); + ir_op *op = get_irn_op(node); /* remove garbage blocks by looking at control flow that leaves the block and replacing the control flow by Bad. */ if (get_irn_mode(node) == mode_X) { - ir_node *block = get_nodes_block(node); - if (!get_Block_matured(block)) return node; /* Don't optimize nodes in immature blocks. */ - if (op == op_End) return node; /* Don't optimize End, may have Bads. */ + ir_node *block = get_nodes_block(skip_Proj(node)); - if (get_irn_op(block) == op_Block && get_Block_matured(block)) { + /* Don't optimize nodes in immature blocks. */ + if (!get_Block_matured(block)) return node; + /* Don't optimize End, may have Bads. */ + if (op == op_End) return node; + + if (is_Block(block)) { irn_arity = get_irn_arity(block); for (i = 0; i < irn_arity; i++) { if (!is_Bad(get_irn_n(block, i))) break; @@ -3013,7 +3132,11 @@ gigo (ir_node *node) if ( op != op_Block && op != op_Phi && op != op_Tuple) { irn_arity = get_irn_arity(node); - if (is_Block_dead(get_nodes_block(node))) + /* + * Beware: we can only read the block of a non-floating node. + */ + if (is_irn_pinned_in_irg(node) && + is_Block_dead(get_nodes_block(node))) return new_Bad(); for (i = 0; i < irn_arity; i++) { @@ -3142,7 +3265,7 @@ optimize_node(ir_node *n) Run always for transformation induced Bads. */ n = gigo (n); - /* Now we have a legal, useful node. Enter it in hash table for cse */ + /* Now we have a legal, useful node. Enter it in hash table for CSE */ if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) { n = identify_remember (current_ir_graph->value_table, n); }