- /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
- if (i >= n_preds) { n = new_Bad(); break; }
-
- scnd_val = NULL;
-
- /* follow_Id () for rest of inputs, determine if any of these
- are non-self-referencing */
- while (++i < n_preds) {
- scnd_val = follow_Id(get_Phi_pred(n, i));
- /* skip Id's */
- set_Phi_pred(n, i, scnd_val);
- if ( (scnd_val != n)
- && (scnd_val != first_val)
- && (get_irn_op(scnd_val) != op_Bad)
- && !(is_Bad (get_Block_cfgpred(block, i))) ) {
- break;
- }
- }
+static ir_node *equivalent_node_Eor(ir_node *n)
+{
+ return equivalent_node_neutral_zero(n);
+}
+
+static ir_node *equivalent_node_left_zero(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ ir_node *a = get_binop_left(n);
+ ir_node *b = get_binop_right(n);
+
+ /* optimize operations that are not commutative but have neutral 0 on left. Test only one predecessor. */
+ if (tarval_classify (computed_value (b)) == TV_CLASSIFY_NULL) {
+ n = a; DBG_OPT_ALGSIM1;
+ }
+
+ return n;
+}
+
+static ir_node *equivalent_node_Sub(ir_node *n)
+{
+ return equivalent_node_left_zero(n);
+}
+
+static ir_node *equivalent_node_Shl(ir_node *n)
+{
+ return equivalent_node_left_zero(n);
+}
+
+static ir_node *equivalent_node_Shr(ir_node *n)
+{
+ return equivalent_node_left_zero(n);
+}
+
+static ir_node *equivalent_node_Shrs(ir_node *n)
+{
+ return equivalent_node_left_zero(n);
+}
+
+static ir_node *equivalent_node_Rot(ir_node *n)
+{
+ return equivalent_node_left_zero(n);
+}
+
+static ir_node *equivalent_node_symmetric_unop(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ /* optimize symmetric unop */
+ if (get_irn_op(get_unop_op(n)) == get_irn_op(n)) {
+ n = get_unop_op(get_unop_op(n)); DBG_OPT_ALGSIM2;
+ }
+ return n;
+}
+
+static ir_node *equivalent_node_Not(ir_node *n)
+{
+ /* NotNot x == x */
+ return equivalent_node_symmetric_unop(n);
+}
+
+static ir_node *equivalent_node_Minus(ir_node *n)
+{
+ /* --x == x */ /* ??? Is this possible or can --x raise an
+ out of bounds exception if min =! max? */
+ return equivalent_node_symmetric_unop(n);
+}
+
+static ir_node *equivalent_node_Mul(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ ir_node *a = get_Mul_left(n);
+ ir_node *b = get_Mul_right(n);
+
+ /* Mul is commutative and has again an other neutral element. */
+ if (tarval_classify (computed_value (a)) == TV_CLASSIFY_ONE) {
+ n = b; DBG_OPT_ALGSIM1;
+ } else if (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE) {
+ n = a; DBG_OPT_ALGSIM1;
+ }
+ return n;
+}
+
+static ir_node *equivalent_node_Div(ir_node *n)
+{
+ ir_node *a = get_Div_left(n);
+ ir_node *b = get_Div_right(n);
+
+ /* Div is not commutative. */
+ if (tarval_classify (computed_value (b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
+ /* Turn Div into a tuple (mem, bad, a) */
+ ir_node *mem = get_Div_mem(n);
+ turn_into_tuple(n, 3);
+ set_Tuple_pred(n, 0, mem);
+ set_Tuple_pred(n, 1, new_Bad());
+ set_Tuple_pred(n, 2, a);
+ }
+ return n;
+}
+
+static ir_node *equivalent_node_And(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ ir_node *a = get_And_left(n);
+ ir_node *b = get_And_right(n);
+
+ if (a == b) {
+ n = a; /* And has it's own neutral element */
+ } else if (tarval_classify (computed_value (a)) == TV_CLASSIFY_ALL_ONE) {
+ n = b;
+ } else if (tarval_classify (computed_value (b)) == TV_CLASSIFY_ALL_ONE) {
+ n = a;
+ }
+ if (n != oldn) DBG_OPT_ALGSIM1;
+ return n;
+}
+
+static ir_node *equivalent_node_Conv(ir_node *n)
+{
+ ir_node *oldn = n;
+ ir_node *a = get_Conv_op(n);
+ ir_node *b;
+
+ ir_mode *n_mode = get_irn_mode(n);
+ ir_mode *a_mode = get_irn_mode(a);
+
+ if (n_mode == a_mode) { /* No Conv necessary */
+ n = a; DBG_OPT_ALGSIM3;
+ } else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
+ ir_mode *b_mode;
+
+ b = get_Conv_op(a);
+ n_mode = get_irn_mode(n);
+ b_mode = get_irn_mode(b);