+ }
+ return tarval_bad;
+}
+
+static tarval *computed_value_Eor(ir_node *n)
+{
+ ir_node *a = get_Eor_left(n);
+ ir_node *b = get_Eor_right(n);
+
+ tarval *ta = value_of(a);
+ tarval *tb = value_of(b);
+
+ if ((ta != tarval_bad) && (tb != tarval_bad)) {
+ return tarval_eor (ta, tb);
+ }
+ return tarval_bad;
+}
+
+static tarval *computed_value_Not(ir_node *n)
+{
+ ir_node *a = get_Not_op(n);
+ tarval *ta = value_of(a);
+
+ if (ta != tarval_bad)
+ return tarval_not(ta);
+
+ return tarval_bad;
+}
+
+static tarval *computed_value_Shl(ir_node *n)
+{
+ ir_node *a = get_Shl_left(n);
+ ir_node *b = get_Shl_right(n);
+
+ tarval *ta = value_of(a);
+ tarval *tb = value_of(b);
+
+ if ((ta != tarval_bad) && (tb != tarval_bad)) {
+ return tarval_shl (ta, tb);
+ }
+ return tarval_bad;
+}
+
+static tarval *computed_value_Shr(ir_node *n)
+{
+ ir_node *a = get_Shr_left(n);
+ ir_node *b = get_Shr_right(n);
+
+ tarval *ta = value_of(a);
+ tarval *tb = value_of(b);
+
+ if ((ta != tarval_bad) && (tb != tarval_bad)) {
+ return tarval_shr (ta, tb);
+ }
+ return tarval_bad;
+}
+
+static tarval *computed_value_Shrs(ir_node *n)
+{
+ ir_node *a = get_Shrs_left(n);
+ ir_node *b = get_Shrs_right(n);
+
+ tarval *ta = value_of(a);
+ tarval *tb = value_of(b);
+
+ if ((ta != tarval_bad) && (tb != tarval_bad)) {
+ return tarval_shrs (ta, tb);
+ }
+ return tarval_bad;
+}
+
+static tarval *computed_value_Rot(ir_node *n)
+{
+ ir_node *a = get_Rot_left(n);
+ ir_node *b = get_Rot_right(n);
+
+ tarval *ta = value_of(a);
+ tarval *tb = value_of(b);
+
+ if ((ta != tarval_bad) && (tb != tarval_bad)) {
+ /* return tarval_rot (ta, tb); */
+ }
+ return tarval_bad;
+}
+
+static tarval *computed_value_Conv(ir_node *n)
+{
+ ir_node *a = get_Conv_op(n);
+ tarval *ta = value_of(a);
+
+ if (ta != tarval_bad)
+ return tarval_convert_to(ta, get_irn_mode(n));
+
+ return tarval_bad;
+}
+
+static tarval *computed_value_Proj(ir_node *n)
+{
+ ir_node *a = get_Proj_pred(n), *b;
+ ir_node *aa, *ab;
+
+ /* Optimize Cmp nodes.
+ This performs a first step of unreachable code elimination.
+ Proj can not be computed, but folding a Cmp above the Proj here is
+ not as wasteful as folding a Cmp into a Tuple of 16 Consts of which
+ only 1 is used.
+ There are several case where we can evaluate a Cmp node:
+ 1. The nodes compared are both the same. If we compare for
+ equal, greater equal, ... this will return true, else it
+ will return false. This step relies on cse.
+ 2. The predecessors of Cmp are target values. We can evaluate
+ the Cmp.
+ 3. The predecessors are Allocs or void* constants. Allocs never
+ return NULL, they raise an exception. Therefore we can predict
+ the Cmp result. */
+ if (get_irn_op(a) == op_Cmp) {
+ aa = get_Cmp_left(a);
+ ab = get_Cmp_right(a);
+
+ if (aa == ab) { /* 1.: */
+ /* This is a tric with the bits used for encoding the Cmp
+ Proj numbers, the following statement is not the same:
+ return new_tarval_from_long ((get_Proj_proj(n) == Eq), mode_b) */
+ return new_tarval_from_long ((get_Proj_proj(n) & Eq), mode_b);