{
ir_node *a = get_Sub_left(n);
ir_node *b = get_Sub_right(n);
+ tarval *ta;
+ tarval *tb;
- tarval *ta = value_of(a);
- tarval *tb = value_of(b);
+ /* a - a */
+ if (a == b)
+ return get_tarval_null(get_irn_mode(n));
+
+ ta = value_of(a);
+ tb = value_of(b);
if ((ta != tarval_bad) && (tb != tarval_bad)
&& (get_irn_mode(a) == get_irn_mode(b))
if (tb != get_mode_null(get_tarval_mode(tb))) /* div by zero: return tarval_bad */
return tarval_mod(ta, tb);
}
+
return tarval_bad;
}
ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
if (predblock == oldn) {
/* Jmp jumps into the block it is in -- deal self cycle. */
- n = new_Bad(); DBG_OPT_DEAD;
+ n = new_Bad();
+ DBG_OPT_DEAD(oldn, n);
} else if (get_opt_control_flow_straightening()) {
- n = predblock; DBG_OPT_STG;
+ n = predblock;
+ DBG_OPT_STG(oldn, n);
}
}
else if ((get_Block_n_cfgpreds(n) == 1) &&
ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
if (predblock == oldn) {
/* Jmp jumps into the block it is in -- deal self cycle. */
- n = new_Bad(); DBG_OPT_DEAD;
+ n = new_Bad();
+ DBG_OPT_DEAD(oldn, n);
}
}
else if ((get_Block_n_cfgpreds(n) == 2) &&
(get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
/* Also a single entry Block following a single exit Block. Phis have
twice the same operand and will be optimized away. */
- n = get_nodes_block(a); DBG_OPT_IFSIM;
+ n = get_nodes_block(a);
+ DBG_OPT_IFSIM(oldn, a, b, n);
}
} else if (get_opt_unreachable_code() &&
(n != current_ir_graph->start_block) &&
/* remove a v a */
if (a == b) {
- n = a; DBG_OPT_ALGSIM1;
+ n = a;
+
+ DBG_OPT_ALGSIM1(oldn, a, b, n);
}
return n;
/* If this predecessors constant value is zero, the operation is
unnecessary. Remove it: */
if (classify_tarval (tv) == TV_CLASSIFY_NULL) {
- n = on; DBG_OPT_ALGSIM1;
+ n = on;
+
+ DBG_OPT_ALGSIM1(oldn, a, b, n);
}
return n;
ir_node *b = get_binop_right(n);
if (classify_tarval(computed_value(b)) == TV_CLASSIFY_NULL) {
- n = a; DBG_OPT_ALGSIM1;
+ n = a;
+
+ DBG_OPT_ALGSIM1(oldn, a, b, n);
}
return n;
/* optimize symmetric unop */
if (get_irn_op(pred) == get_irn_op(n)) {
- n = get_unop_op(pred); DBG_OPT_ALGSIM2;
+ n = get_unop_op(pred);
+ DBG_OPT_ALGSIM2(oldn, pred, n);
}
return n;
}
/* Mul is commutative and has again an other neutral element. */
if (classify_tarval (computed_value (a)) == TV_CLASSIFY_ONE) {
- n = b; DBG_OPT_ALGSIM1;
+ n = b;
+ DBG_OPT_ALGSIM1(oldn, a, b, n);
} else if (classify_tarval (computed_value (b)) == TV_CLASSIFY_ONE) {
- n = a; DBG_OPT_ALGSIM1;
+ n = a;
+ DBG_OPT_ALGSIM1(oldn, a, b, n);
}
return n;
}
n = a; /* And has it's own neutral element */
} else if (classify_tarval(computed_value(a)) == TV_CLASSIFY_ALL_ONE) {
n = b;
+ DBG_OPT_ALGSIM1(oldn, a, b, n);
} else if (classify_tarval(computed_value(b)) == TV_CLASSIFY_ALL_ONE) {
n = a;
+ DBG_OPT_ALGSIM1(oldn, a, b, n);
}
- if (n != oldn) DBG_OPT_ALGSIM1;
return n;
}
ir_mode *a_mode = get_irn_mode(a);
if (n_mode == a_mode) { /* No Conv necessary */
- n = a; DBG_OPT_ALGSIM3;
+ n = a;
+ DBG_OPT_ALGSIM3(oldn, a, n);
} else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
ir_mode *b_mode;
if (n_mode == b_mode) {
if (n_mode == mode_b) {
- n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */ DBG_OPT_ALGSIM1;
+ n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
+ DBG_OPT_ALGSIM1(oldn, a, b, n);
}
else if (mode_is_int(n_mode) || mode_is_character(n_mode)) {
if (smaller_mode(b_mode, a_mode)){
- n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */ DBG_OPT_ALGSIM1;
+ n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
+ DBG_OPT_ALGSIM1(oldn, a, b, n);
}
}
}
/* Fold, if no multiple distinct non-self-referencing inputs */
if (i >= n_preds) {
- n = first_val; DBG_OPT_PHI;
+ n = first_val;
+ DBG_OPT_PHI(oldn, first_val, n);
} else {
/* skip the remaining Ids (done in get_Phi_pred). */
/* superfluous, since we walk all to propagate Block's Bads.
if ( get_irn_op(a) == op_Tuple) {
/* Remove the Tuple/Proj combination. */
if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
- n = get_Tuple_pred(a, get_Proj_proj(n)); DBG_OPT_TUPLE;
+ n = get_Tuple_pred(a, get_Proj_proj(n));
+ DBG_OPT_TUPLE(oldn, a, n);
} else {
assert(0); /* This should not happen! */
n = new_Bad();
{
ir_node *oldn = n;
- n = follow_Id(n); DBG_OPT_ID;
+ n = follow_Id(n);
+ DBG_OPT_ID(oldn, n);
return n;
}
ir_node *n = get_Proj_pred(proj);
ir_node *b;
tarval *tb;
+ long proj_nr;
switch (get_irn_opcode(n)) {
case iro_Div:
- if (get_Proj_proj(proj) == pn_Div_X_except) {
- b = get_Div_right(n);
- tb = computed_value(b);
+ b = get_Div_right(n);
+ tb = computed_value(b);
+
+ if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* div(x, c) && c != 0 */
+ proj_nr = get_Proj_proj(proj);
- /* we found an exception handler, see if we can remove it */
- if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* div(x, c) && c != 0 */
+ if (proj_nr == pn_Div_X_except) {
+ /* we found an exception handler, remove it */
return new_Bad();
}
+ else if (proj_nr == pn_Div_M) {
+ /* the memory Proj can be removed */
+ ir_node *res = get_Div_mem(n);
+ set_Div_mem(n, get_irg_initial_mem(current_ir_graph));
+ return res;
+ }
}
break;
case iro_Mod:
- if (get_Proj_proj(proj) == pn_Mod_X_except) {
- b = get_Mod_right(n);
- tb = computed_value(b);
+ b = get_Mod_right(n);
+ tb = computed_value(b);
+
+ if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* mod(x, c) && c != 0 */
+ proj_nr = get_Proj_proj(proj);
- if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* mod(x, c) && c != 0 */
+ if (proj_nr == pn_Mod_X_except) {
+ /* we found an exception handler, remove it */
return new_Bad();
}
+ else if (proj_nr == pn_Mod_M) {
+ /* the memory Proj can be removed */
+ ir_node *res = get_Mod_mem(n);
+ set_Mod_mem(n, get_irg_initial_mem(current_ir_graph));
+ return res;
+ }
}
break;
case iro_DivMod:
- if (get_Proj_proj(proj) == pn_DivMod_X_except) {
- b = get_DivMod_right(n);
- tb = computed_value(b);
+ b = get_DivMod_right(n);
+ tb = computed_value(b);
+
+ if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* DivMod(x, c) && c != 0 */
+ proj_nr = get_Proj_proj(proj);
- if (tb != tarval_bad && classify_tarval(tb) != TV_CLASSIFY_NULL) { /* DivMod(x, c) && c != 0 */
+ if (proj_nr == pn_DivMod_X_except) {
return new_Bad();
}
+ else if (proj_nr == pn_DivMod_M) {
+ /* the memory Proj can be removed */
+ ir_node *res = get_DivMod_mem(n);
+ set_DivMod_mem(n, get_irg_initial_mem(current_ir_graph));
+ return res;
+ }
}
break;
return proj;
case iro_Tuple:
- /* should not happen, but if it does will optimize */
+ /* should not happen, but if it does will be optimized away */
break;
default:
* for DBG_OPT_ALGSIM0
*/
int node_size = offsetof(ir_node, attr) + n->op->attr_size;
- ir_node *x = alloca(node_size);
+ oldn = alloca(node_size);
- memcpy(x, n, node_size);
- oldn = x;
+ memcpy(oldn, n, node_size);
+ CLONE_ARR_A(ir_node *, oldn->in, n->in);
+
+ /* ARG, copy the in array, we need it for statistics */
+ memcpy(oldn->in, n->in, ARR_LEN(n->in) * sizeof(n->in[0]));
/* evaluation was successful -- replace the node. */
obstack_free (current_ir_graph->obst, n);
n = new_Const (get_tarval_mode (tv), tv);
- DBG_OPT_ALGSIM0;
+
+ DBG_OPT_ALGSIM0(oldn, n);
return n;
}
}
if ((get_irn_mode(n) != mode_T) && (tv != tarval_bad)) {
/* evaluation was successful -- replace the node. */
n = new_Const (get_tarval_mode (tv), tv);
- DBG_OPT_ALGSIM0;
+
+ DBG_OPT_ALGSIM0(oldn, n);
return n;
}
}