X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fir%2Firopt.c;h=30134911b00b0adbf9c9a9c5c6bcfe00a5ccaff8;hb=f9d25133f86594ca2b1f33fb0b41a591ecc9b914;hp=69ce9d0f25bcb339030c92f1e1eb68320291b118;hpb=abb11c60e50826d82d0efcee52842c0ccdde95e0;p=libfirm diff --git a/ir/ir/iropt.c b/ir/ir/iropt.c index 69ce9d0f2..30134911b 100644 --- a/ir/ir/iropt.c +++ b/ir/ir/iropt.c @@ -723,19 +723,19 @@ static ir_node *equivalent_node_Block(ir_node *n) (get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) { /* Also a single entry Block following a single exit Block. Phis have twice the same operand and will be optimized away. */ - n = get_nodes_block(a); + n = get_nodes_block(get_Proj_pred(a)); DBG_OPT_IFSIM1(oldn, a, b, n); } } else if (get_opt_unreachable_code() && (n != current_ir_graph->start_block) && (n != current_ir_graph->end_block) ) { - int i, n_cfg = get_Block_n_cfgpreds(n); + int i; /* If all inputs are dead, this block is dead too, except if it is - the start or end block. This is a step of unreachable code + the start or end block. This is one step of unreachable code elimination */ - for (i = 0; i < n_cfg; i++) { + for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) { ir_node *pred = get_Block_cfgpred(n, i); ir_node *pred_blk; @@ -749,8 +749,10 @@ static ir_node *equivalent_node_Block(ir_node *n) break; } } - if (i == n_cfg) + if (i < 0) { n = set_Block_dead(n); + DBG_OPT_DEAD_BLOCK(oldn, n); + } } return n; @@ -762,7 +764,6 @@ static ir_node *equivalent_node_Block(ir_node *n) */ static ir_node *equivalent_node_Jmp(ir_node *n) { - /* GL: Why not same for op_Raise?? */ /* unreachable code elimination */ if (is_Block_dead(get_nodes_block(n))) n = new_Bad(); @@ -770,6 +771,10 @@ static ir_node *equivalent_node_Jmp(ir_node *n) return n; } +/* Same for op_Raise */ +#define equivalent_node_Raise equivalent_node_Jmp + + /* We do not evaluate Cond here as we replace it by a new node, a Jmp. See transform_node_Proj_Cond(). */ @@ -1224,12 +1229,14 @@ static ir_node *equivalent_node_Proj(ir_node *n) assert(0); /* This should not happen! */ n = new_Bad(); } - } else if (get_irn_mode(n) == mode_X) { + } + else if (get_irn_mode(n) == mode_X) { if (is_Block_dead(get_nodes_block(skip_Proj(n)))) { /* Remove dead control flow -- early gigo(). */ n = new_Bad(); } } + return n; } @@ -1344,23 +1351,54 @@ static ir_node *equivalent_node_Cmp(ir_node *n) /** * Remove Confirm nodes if setting is on. + * Replace Confirms(x, '=', Constlike) by Constlike. */ static ir_node *equivalent_node_Confirm(ir_node *n) { - if (get_Confirm_cmp(n) == pn_Cmp_Eq) { + ir_node *pred = get_Confirm_value(n); + pn_Cmp pnc = get_Confirm_cmp(n); + + if (get_irn_op(pred) == op_Confirm && pnc == get_Confirm_cmp(pred)) { + /* + * rare case: two identical Confirms one after another, + * replace the second one with the first. + */ + return pred; + } + if (pnc == pn_Cmp_Eq) { ir_node *bound = get_Confirm_bound(n); - ir_op *op = get_irn_op(bound); /* * Optimize a rare case: - * Confirm(x, '=', Const) ==> Const + * Confirm(x, '=', Constlike) ==> Constlike */ - if (op == op_Const || op == op_SymConst) + if (is_irn_constlike(bound)) { + DBG_OPT_CONFIRM(n, bound); return bound; + } } return get_opt_remove_Confirm() ? get_Confirm_value(n) : n; } +/** + * Optimize CopyB(mem, x, x) into a Nop + */ +static ir_node *equivalent_node_CopyB(ir_node *n) +{ + ir_node *a = get_CopyB_dst(n); + ir_node *b = get_CopyB_src(n); + + if (a == b) { + /* Turn CopyB into a tuple (mem, bad, bad) */ + ir_node *mem = get_CopyB_mem(n); + turn_into_tuple(n, pn_CopyB_max); + set_Tuple_pred(n, pn_CopyB_M, mem); + set_Tuple_pred(n, pn_CopyB_X_except, new_Bad()); /* no exception */ + set_Tuple_pred(n, pn_Call_M_except, new_Bad()); + } + return n; +} + /** * equivalent_node() returns a node equivalent to input n. It skips all nodes that * perform no actual computation, as, e.g., the Id nodes. It does not create @@ -1389,6 +1427,7 @@ static ir_op *firm_set_default_equivalent_node(ir_op *op) switch (op->code) { CASE(Block); CASE(Jmp); + CASE(Raise); CASE(Or); CASE(Add); CASE(Eor); @@ -1411,6 +1450,7 @@ static ir_op *firm_set_default_equivalent_node(ir_op *op) CASE(Mux); CASE(Cmp); CASE(Confirm); + CASE(CopyB); default: op->equivalent_node = NULL; } @@ -1644,7 +1684,7 @@ static ir_node *transform_node_Div(ir_node *n) /* Turn Div into a tuple (mem, bad, value) */ ir_node *mem = get_Div_mem(n); - turn_into_tuple(n, 3); + turn_into_tuple(n, pn_Div_max); set_Tuple_pred(n, pn_Div_M, mem); set_Tuple_pred(n, pn_Div_X_except, new_Bad()); set_Tuple_pred(n, pn_Div_res, value); @@ -1809,30 +1849,6 @@ static ir_node *transform_node_Cond(ir_node *n) } /* We might generate an endless loop, so keep it alive. */ add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_block(n)); - } else if ((ta != tarval_bad) && - (get_irn_mode(a) == mode_Iu) && - (get_Cond_kind(n) == dense) && - (get_opt_unreachable_code())) { - /* I don't want to allow Tuples smaller than the biggest Proj. - Also this tuple might get really big... - I generate the Jmp here, and remember it in link. Link is used - when optimizing Proj. */ - set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_block(n))); - /* We might generate an endless loop, so keep it alive. */ - add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_block(n)); - } else if ((get_irn_op(a) == op_Eor) - && (get_irn_mode(a) == mode_b) - && (classify_tarval(value_of(get_Eor_right(a))) == TV_CLASSIFY_ONE)) { - /* The Eor is a negate. Generate a new Cond without the negate, - simulate the negate by exchanging the results. */ - set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_block(n), - get_Eor_left(a))); - } else if ((get_irn_op(a) == op_Not) - && (get_irn_mode(a) == mode_b)) { - /* A Not before the Cond. Generate a new Cond without the Not, - simulate the Not by exchanging the results. */ - set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_block(n), - get_Not_op(a))); } return n; } @@ -2036,21 +2052,24 @@ static ir_node *transform_node_Proj_Cond(ir_node *proj) if (get_opt_unreachable_code()) { ir_node *n = get_Proj_pred(proj); ir_node *b = get_Cond_selector(n); - tarval *tb = value_of(b); - if (tb != tarval_bad && mode_is_int(get_tarval_mode(tb))) { - /* we have a constant switch */ - long num = get_Proj_proj(proj); + if (mode_is_int(get_irn_mode(b))) { + tarval *tb = value_of(b); - if (num != get_Cond_defaultProj(n)) { /* we cannot optimize default Proj's yet */ - if (get_tarval_long(tb) == num) { - /* Do NOT create a jump here, or we will have 2 control flow ops - * in a block. This case is optimized away in optimize_cf(). */ - return proj; - } - else { - /* this case will NEVER be taken, kill it */ - return new_Bad(); + if (tb != tarval_bad) { + /* we have a constant switch */ + long num = get_Proj_proj(proj); + + if (num != get_Cond_defaultProj(n)) { /* we cannot optimize default Proj's yet */ + if (get_tarval_long(tb) == num) { + /* Do NOT create a jump here, or we will have 2 control flow ops + * in a block. This case is optimized away in optimize_cf(). */ + return proj; + } + else { + /* this case will NEVER be taken, kill it */ + return new_Bad(); + } } } } @@ -3121,7 +3140,8 @@ gigo (ir_node *node) if (is_Block(block)) { irn_arity = get_irn_arity(block); for (i = 0; i < irn_arity; i++) { - if (!is_Bad(get_irn_n(block, i))) break; + if (!is_Bad(get_irn_n(block, i))) + break; } if (i == irn_arity) return new_Bad(); } @@ -3140,9 +3160,12 @@ gigo (ir_node *node) return new_Bad(); for (i = 0; i < irn_arity; i++) { - if (is_Bad(get_irn_n(node, i))) { + ir_node *pred = get_irn_n(node, i); + + if (is_Bad(pred)) return new_Bad(); - } + if (is_Unknown(pred) && mode_is_data(get_irn_mode(node))) + return new_Unknown(get_irn_mode(node)); } } #if 0 @@ -3166,6 +3189,8 @@ gigo (ir_node *node) * These optimizations deallocate nodes from the obstack. * It can only be called if it is guaranteed that no other nodes * reference this one, i.e., right after construction of a node. + * + * current_ir_graph must be set to the graph of the node! */ ir_node * optimize_node(ir_node *n)