- tarval *v;
- if ( (tarval_classify ((v = computed_value (a))) == -1)
- || (tarval_classify ((v = computed_value (b))) == -1)) {
- res = v;
- }
- }
- break;
- case iro_Eor: if (ta && tb) { res = tarval_eor (ta, tb); } break;
- case iro_Not: if (ta) { res = tarval_neg (ta); } break;
- case iro_Shl: if (ta && tb) { res = tarval_shl (ta, tb); } break;
- /* tarval_shr is faulty !! */
- case iro_Shr: if (ta && tb) { res = tarval_shr (ta, tb); } break;
- case iro_Shrs:if (ta && tb) { /*res = tarval_shrs (ta, tb)*/; } break;
- case iro_Rot: if (ta && tb) { /*res = tarval_rot (ta, tb)*/; } break;
- case iro_Conv:if (ta) { res = tarval_convert_to (ta, get_irn_mode (n)); }
- break;
- case iro_Proj: /* iro_Cmp */
- {
- ir_node *aa, *ab;
-
- a = get_Proj_pred(n);
- /* Optimize Cmp nodes.
- This performs a first step of unreachable code elimination.
- Proj can not be computed, but folding a Cmp above the Proj here is
- not as wasteful as folding a Cmp into a Tuple of 16 Consts of which
- only 1 is used.
- There are several case where we can evaluate a Cmp node:
- 1. The nodes compared are both the same. If we compare for
- equal, this will return true, else it will return false.
- This step relies on cse.
- 2. The predecessors of Cmp are target values. We can evaluate
- the Cmp.
- 3. The predecessors are Allocs or void* constants. Allocs never
- return NULL, they raise an exception. Therefore we can predict
- the Cmp result. */
- if (get_irn_op(a) == op_Cmp) {
- aa = get_Cmp_left(a);
- ab = get_Cmp_right(a);
- if (aa == ab) { /* 1.: */
- /* This is a tric with the bits used for encoding the Cmp
- Proj numbers, the following statement is not the same:
- res = tarval_from_long (mode_b, (get_Proj_proj(n) == Eq)): */
- res = tarval_from_long (mode_b, (get_Proj_proj(n) & irpn_Eq));
- } else {
- tarval *taa = computed_value (aa);
- tarval *tab = computed_value (ab);
- if (taa && tab) { /* 2.: */
- /* strange checks... */
- ir_pncmp flags = tarval_comp (taa, tab);
- if (flags != irpn_False) {
- res = tarval_from_long (mode_b, get_Proj_proj(n) & flags);
- }
- } else { /* check for 3.: */
- ir_node *aaa = skip_nop(skip_Proj(aa));
- ir_node *aba = skip_nop(skip_Proj(ab));
- if ( ( (/* aa is ProjP and aaa is Alloc */
- (get_irn_op(aa) == op_Proj)
- && (get_irn_mode(aa) == mode_p)
- && (get_irn_op(aaa) == op_Alloc))
- && ( (/* ab is constant void */
- (get_irn_op(ab) == op_Const)
- && (get_irn_mode(ab) == mode_p)
- && (get_Const_tarval(ab) == tarval_p_void))
- || (/* ab is other Alloc */
- (get_irn_op(ab) == op_Proj)
- && (get_irn_mode(ab) == mode_p)
- && (get_irn_op(aba) == op_Alloc)
- && (aaa != aba))))
- || (/* aa is void and aba is Alloc */
- (get_irn_op(aa) == op_Const)
- && (get_irn_mode(aa) == mode_p)
- && (get_Const_tarval(aa) == tarval_p_void)
- && (get_irn_op(ab) == op_Proj)
- && (get_irn_mode(ab) == mode_p)
- && (get_irn_op(aba) == op_Alloc)))
- /* 3.: */
- res = tarval_from_long (mode_b, get_Proj_proj(n) & irpn_Ne);
- }
- }
- } else if (get_irn_op(a) == op_DivMod) {
- ta = value_of(get_DivMod_left(a));
- tb = value_of(get_DivMod_right(a));
- if (ta && tb && (get_irn_mode(a) == get_irn_mode(b))) {
- if (tarval_classify(tb) == 0) {res = NULL; break;}
- if (get_Proj_proj(n)== 0) /* Div */
- res = tarval_div(ta, tb);
- else /* Mod */
- res = tarval_mod(ta, tb);