break;
case iro_Minus:
if (ta && mode_is_float(get_irn_mode(a)))
- res = /*tarval_minus (ta);*/ res;
+ res = tarval_neg (ta);
break;
case iro_Mul:
if (ta && tb) /* tarval_mul tests for equivalent modes itself */ {
res = tarval_mul (ta, tb);
} else {
- /* calls computed_value recursive and returns the 0 with proper
- mode. Why is this an extra case? */
+ /* a*0 = 0 or 0*b = 0:
+ calls computed_value recursive and returns the 0 with proper
+ mode. */
tarval *v;
if ( (tarval_classify ((v = computed_value (a))) == 0)
|| (tarval_classify ((v = computed_value (b))) == 0)) {
}
}
break;
+ case iro_Quot:
+ /* This was missing in original implementation. Why? */
+ if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
+ if (tarval_classify(tb) == 0) {res = NULL; break;}
+ res = tarval_quo(ta, tb);
+ }
+ break;
+ case iro_Div:
+ /* This was missing in original implementation. Why? */
+ if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
+ if (tarval_classify(tb) == 0) {res = NULL; break;}
+ res = tarval_div(ta, tb);
+ }
+ break;
+ case iro_Mod:
+ /* This was missing in original implementation. Why? */
+ if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
+ if (tarval_classify(tb) == 0) {res = NULL; break;}
+ res = tarval_mod(ta, tb);
+ }
+ break;
+ /* for iro_DivMod see iro_Proj */
case iro_Abs:
if (ta)
- res = /*tarval_abs (ta);*/ res;
- /* allowed or problems with max/min ?? */
+ res = tarval_abs (ta);
break;
case iro_And:
if (ta && tb) {
}
break;
case iro_Eor: if (ta && tb) { res = tarval_eor (ta, tb); } break;
- case iro_Not: if (ta) { /*res = tarval_not (ta)*/; } break;
+ case iro_Not: if (ta) { res = tarval_neg (ta); } break;
case iro_Shl: if (ta && tb) { res = tarval_shl (ta, tb); } break;
+ /* tarval_shr is faulty !! */
case iro_Shr: if (ta && tb) { res = tarval_shr (ta, tb); } break;
- case iro_Shrs: if(ta && tb) { /*res = tarval_shrs (ta, tb)*/; } break;
+ case iro_Shrs:if (ta && tb) { /*res = tarval_shrs (ta, tb)*/; } break;
case iro_Rot: if (ta && tb) { /*res = tarval_rot (ta, tb)*/; } break;
- case iro_Conv: if(ta) { res = tarval_convert_to (ta, get_irn_mode (n)); }
+ case iro_Conv:if (ta) { res = tarval_convert_to (ta, get_irn_mode (n)); }
break;
- case iro_Proj:
+ case iro_Proj: /* iro_Cmp */
{
ir_node *aa, *ab;
res = tarval_from_long (mode_b, get_Proj_proj(n) & irpn_Ne);
}
}
+ } else if (get_irn_op(a) == op_DivMod) {
+ ta = value_of(get_DivMod_left(a));
+ tb = value_of(get_DivMod_right(a));
+ if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
+ if (tarval_classify(tb) == 0) {res = NULL; break;}
+ if (get_Proj_proj(n)== 0) /* Div */
+ res = tarval_div(ta, tb);
+ else /* Mod */
+ res = tarval_mod(ta, tb);
+ }
} else {
/* printf(" # comp_val: Proj node, not optimized\n"); */
}
set_Tuple_pred(n, 0, jmp);
set_Tuple_pred(n, 1, new_Bad());
}
- } else if (ta && (get_irn_mode(a) == mode_I)) {
+ } else if (ta && (get_irn_mode(a) == mode_I) && (get_Cond_kind(n) == dense)) {
/* I don't want to allow Tuples smaller than the biggest Proj.
Also this tuple might get really big...
I generate the Jmp here, and remember it in link. Link is used
set_Proj_proj(n, 0);
} else if ( (get_irn_op(a) == op_Cond)
&& (get_irn_mode(get_Cond_selector(a)) == mode_I)
- && value_of(a)) {
+ && value_of(a)
+ && (get_Cond_kind(a) == dense)) {
/* The Cond is a Switch on a Constant */
if (get_Proj_proj(n) == tv_val_CHIL(value_of(a))) {
/* The always taken branch, reuse the existing Jmp. */
return (get_irn_symconst_attr(a).num != get_irn_symconst_attr(b).num)
|| (get_irn_symconst_attr(a).tori.typ != get_irn_symconst_attr(b).tori.typ);
case iro_Call:
- return (get_irn_call_attr(a)->kind != get_irn_call_attr(b)->kind)
- || (get_irn_call_attr(a)->arity != get_irn_call_attr(b)->arity);
+ return (get_irn_call_attr(a) != get_irn_call_attr(b));
case iro_Sel:
return (get_irn_sel_attr(a).ent->kind != get_irn_sel_attr(b).ent->kind)
|| (get_irn_sel_attr(a).ent->name != get_irn_sel_attr(b).ent->name)