ir_node *a = get_Minus_op(n);
tarval *ta = value_of(a);
- if ((ta != tarval_bad) && mode_is_signed(get_irn_mode(a)))
+ if (ta != tarval_bad)
return tarval_neg(ta);
return tarval_bad;
n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
} else if (mode_is_int(n_mode)) {
- if (smaller_mode(b_mode, a_mode)){
+ if (get_mode_size_bits(b_mode) <= get_mode_size_bits(a_mode)) {
n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
}
* @param phi the Phi node
* @param other the other operand
* @param eval an evaluator function
+ * @param mode the mode of the result, may be different from the mode of the Phi!
* @param left if non-zero, other is the left operand, else the right
*
* @return a new Phi node if the conversion was successful, NULL else
*/
-static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, tarval *(*eval)(tarval *, tarval *), int left) {
+static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, tarval *(*eval)(tarval *, tarval *), ir_mode *mode, int left) {
tarval *tv;
void **res;
ir_node *pred;
- ir_mode *mode;
ir_graph *irg;
int i, n = get_irn_arity(phi);
res[i] = tv;
}
}
- mode = get_irn_mode(phi);
irg = current_ir_graph;
for (i = 0; i < n; ++i) {
pred = get_irn_n(phi, i);
return new_r_Phi(irg, get_nodes_block(phi), n, (ir_node **)res, mode);
} /* apply_unop_on_phi */
+/**
+ * Apply a conversion on a constant operator (a Phi).
+ *
+ * @param phi the Phi node
+ *
+ * @return a new Phi node if the conversion was successful, NULL else
+ */
+static ir_node *apply_conv_on_phi(ir_node *phi, ir_mode *mode) {
+ tarval *tv;
+ void **res;
+ ir_node *pred;
+ ir_graph *irg;
+ int i, n = get_irn_arity(phi);
+
+ NEW_ARR_A(void *, res, n);
+ for (i = 0; i < n; ++i) {
+ pred = get_irn_n(phi, i);
+ tv = get_Const_tarval(pred);
+ tv = tarval_convert_to(tv, mode);
+
+ if (tv == tarval_bad) {
+ /* folding failed, bad */
+ return 0;
+ }
+ res[i] = tv;
+ }
+ irg = current_ir_graph;
+ for (i = 0; i < n; ++i) {
+ pred = get_irn_n(phi, i);
+ res[i] = new_r_Const_type(irg, get_irg_start_block(irg),
+ mode, res[i], get_Const_type(pred));
+ }
+ return new_r_Phi(irg, get_nodes_block(phi), n, (ir_node **)res, mode);
+} /* apply_conv_on_phi */
+
/**
* Transform AddP(P, ConvIs(Iu)), AddP(P, ConvIu(Is)) and
* SubP(P, ConvIs(Iu)), SubP(P, ConvIu(Is)).
return n;
} /* transform_node_AddSub */
-#define HANDLE_BINOP_PHI(op,a,b,c) \
- c = NULL; \
- if (is_Const(b) && is_const_Phi(a)) { \
- /* check for Op(Phi, Const) */ \
- c = apply_binop_on_phi(a, get_Const_tarval(b), op, 0); \
- } \
- else if (is_Const(a) && is_const_Phi(b)) { \
- /* check for Op(Const, Phi) */ \
- c = apply_binop_on_phi(b, get_Const_tarval(a), op, 1); \
- } \
- if (c) { \
- DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI); \
- return c; \
+#define HANDLE_BINOP_PHI(eval, a, b, c, mode) \
+ c = NULL; \
+ if (is_Const(b) && is_const_Phi(a)) { \
+ /* check for Op(Phi, Const) */ \
+ c = apply_binop_on_phi(a, get_Const_tarval(b), eval, mode, 0);\
+ } \
+ else if (is_Const(a) && is_const_Phi(b)) { \
+ /* check for Op(Const, Phi) */ \
+ c = apply_binop_on_phi(b, get_Const_tarval(a), eval, mode, 1);\
+ } \
+ if (c) { \
+ DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI); \
+ return c; \
}
-#define HANDLE_UNOP_PHI(op,a,c) \
+#define HANDLE_UNOP_PHI(eval, a, c) \
c = NULL; \
if (is_const_Phi(a)) { \
/* check for Op(Phi) */ \
- c = apply_unop_on_phi(a, op); \
+ c = apply_unop_on_phi(a, eval); \
if (c) { \
DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI); \
return c; \
a = get_Add_left(n);
b = get_Add_right(n);
- HANDLE_BINOP_PHI(tarval_add, a,b,c);
-
mode = get_irn_mode(n);
+ HANDLE_BINOP_PHI(tarval_add, a, b, c, mode);
/* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
return n;
} /* transform_node_Add */
-/* returns -cnst */
+/**
+ * returns -cnst or NULL if impossible
+ */
static ir_node *const_negate(ir_node *cnst) {
tarval *tv = tarval_neg(get_Const_tarval(cnst));
dbg_info *dbgi = get_irn_dbg_info(cnst);
mode = get_irn_mode(n);
restart:
- HANDLE_BINOP_PHI(tarval_sub, a,b,c);
+ HANDLE_BINOP_PHI(tarval_sub, a, b, c, mode);
/* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
ir_node *left = get_Minus_op(a);
- ir_mode *mode = get_irn_mode(n);
ir_node *add = new_rd_Add(dbg, irg, block, left, b, mode);
n = new_rd_Minus(dbg, irg, block, add, mode);
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
ir_node *right = get_Minus_op(b);
- ir_mode *mode = get_irn_mode(n);
n = new_rd_Add(dbg, irg, block, a, right, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MINUS);
ir_node *sub = new_rd_Sub(s_dbg, irg, s_block, s_left, s_right, s_mode);
dbg_info *a_dbg = get_irn_dbg_info(n);
ir_node *a_block = get_nodes_block(n);
- ir_mode *a_mode = get_irn_mode(n);
- n = new_rd_Add(a_dbg, irg, a_block, a, sub, a_mode);
+ n = new_rd_Add(a_dbg, irg, a_block, a, sub, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
return n;
} else if (is_Mul(b)) { /* a - (b * C) -> a + (b * -C) */
ir_node *mul = new_rd_Mul(m_dbg, irg, m_block, m_left, cnst2, m_mode);
dbg_info *a_dbg = get_irn_dbg_info(n);
ir_node *a_block = get_nodes_block(n);
- ir_mode *a_mode = get_irn_mode(n);
- n = new_rd_Add(a_dbg, irg, a_block, a, mul, a_mode);
+ n = new_rd_Add(a_dbg, irg, a_block, a, mul, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
return n;
}
if (mode != get_irn_mode(a))
return transform_node_Mul2n(n, mode);
- HANDLE_BINOP_PHI(tarval_mul, a,b,c);
+ HANDLE_BINOP_PHI(tarval_mul, a, b, c, mode);
if (mode_is_signed(mode)) {
ir_node *r = NULL;
* Optimize Abs(x) into -x if x is Confirmed <= 0
*/
static ir_node *transform_node_Abs(ir_node *n) {
- ir_node *oldn = n;
- ir_node *a = get_Abs_op(n);
- value_classify_sign sign = classify_value_sign(a);
+ ir_node *c, *oldn = n;
+ ir_node *a = get_Abs_op(n);
+ ir_mode *mode;
- if (sign == value_classified_negative) {
- ir_mode *mode = get_irn_mode(n);
+ HANDLE_UNOP_PHI(tarval_abs, a, c);
+
+ switch (classify_value_sign(a)) {
+ case value_classified_negative:
+ mode = get_irn_mode(n);
/*
* We can replace the Abs by -x here.
get_irn_n(n, -1), a, mode);
DBG_OPT_CONFIRM(oldn, n);
- } else if (sign == value_classified_positive) {
+ return n;
+ case value_classified_positive:
/* n is positive, Abs is not needed */
n = a;
DBG_OPT_CONFIRM(oldn, n);
+ return n;
+ default:
+ return n;
}
-
- return n;
} /* transform_node_Abs */
/**
ir_node *b = get_And_right(n);
ir_mode *mode;
- HANDLE_BINOP_PHI(tarval_and, a,b,c);
-
mode = get_irn_mode(n);
+ HANDLE_BINOP_PHI(tarval_and, a, b, c, mode);
/* we can evaluate 2 Projs of the same Cmp */
if (mode == mode_b && is_Proj(a) && is_Proj(b)) {
ir_node *b = get_Eor_right(n);
ir_mode *mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_eor, a,b,c);
+ HANDLE_BINOP_PHI(tarval_eor, a, b, c, mode);
/* we can evaluate 2 Projs of the same Cmp */
if (mode == mode_b && is_Proj(a) && is_Proj(b)) {
* Optimize:
* -(~x) = x + 1
* -(a-b) = b - a
+ * -(a >>u (size-1)) = a >>s (size-1)
+ * -(a >>s (size-1)) = a >>u (size-1)
+ * -(a * const) -> a * -const
*/
static ir_node *transform_node_Minus(ir_node *n) {
ir_node *c, *oldn = n;
ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(a);
n = new_rd_Mul(dbg, irg, block, mul_l, cnst, mode);
+ DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_MUL_C);
return n;
}
}
long proj_nr = get_Proj_proj(proj);
/* we can evaluate this direct */
- switch(proj_nr) {
+ switch (proj_nr) {
case pn_Cmp_False:
return new_Const(mode_b, get_tarval_b_false());
case pn_Cmp_True:
break;
}
+ /* remove Casts */
+ if (is_Cast(left))
+ left = get_Cast_op(left);
+ if (is_Cast(right))
+ right = get_Cast_op(right);
+
/* Remove unnecessary conversions */
/* TODO handle constants */
if (is_Conv(left) && is_Conv(right)) {
left = op_left;
right = op_right;
changed |= 1;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV_CONV);
} else if (smaller_mode(mode_left, mode_right)) {
left = new_r_Conv(irg, block, op_left, mode_right);
right = op_right;
changed |= 1;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
} else if (smaller_mode(mode_right, mode_left)) {
left = op_left;
right = new_r_Conv(irg, block, op_right, mode_left);
changed |= 1;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
}
}
}
- /* TODO extend to arbitrary constants */
- if (is_Conv(left) && is_Const(right) && is_Const_null(right)) {
- ir_mode* mode = get_irn_mode(left);
- ir_node* op = get_Conv_op(left);
- ir_mode* op_mode = get_irn_mode(op);
-
- if (get_mode_size_bits(mode) > get_mode_size_bits(op_mode) &&
- (mode_is_signed(mode) || !mode_is_signed(op_mode))) {
- ir_node *null = new_Const(op_mode, get_mode_null(op_mode));
- set_Cmp_left( n, op);
- set_Cmp_right(n, null);
- return proj;
- }
- }
-
- /* remove Casts */
- if (is_Cast(left))
- left = get_Cast_op(left);
- if (is_Cast(right))
- right = get_Cast_op(right);
-
/* remove operation of both sides if possible */
if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
ir_opcode lop = get_irn_opcode(left);
left = get_unop_op(left);
right = get_unop_op(right);
changed |= 1;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
break;
case iro_Add:
ll = get_Add_left(left);
left = lr;
right = rr;
changed |= 1;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
} else if (ll == rr) {
/* X + a CMP b + X ==> a CMP b */
left = lr;
right = rl;
changed |= 1;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
} else if (lr == rl) {
/* a + X CMP X + b ==> a CMP b */
left = ll;
right = rr;
changed |= 1;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
} else if (lr == rr) {
/* a + X CMP b + X ==> a CMP b */
left = ll;
right = rl;
changed |= 1;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
}
break;
case iro_Sub:
left = lr;
right = rr;
changed |= 1;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
} else if (lr == rr) {
/* a - X CMP b - X ==> a CMP b */
left = ll;
right = rl;
changed |= 1;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
}
break;
case iro_Rot:
left = get_Rot_left(left);
right = get_Rot_left(right);
changed |= 1;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
}
break;
default:
if (get_irn_mode(left) == mode_b) {
ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(n);
+ ir_node *bres;
switch (proj_nr) {
- case pn_Cmp_Le: return new_r_Or( irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b);
- case pn_Cmp_Lt: return new_r_And(irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b);
- case pn_Cmp_Ge: return new_r_Or( irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b);
- case pn_Cmp_Gt: return new_r_And(irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b);
- case pn_Cmp_Lg: return new_r_Eor(irg, block, left, right, mode_b);
- case pn_Cmp_Eq: return new_r_Not(irg, block, new_r_Eor(irg, block, left, right, mode_b), mode_b);
+ case pn_Cmp_Le: bres = new_r_Or( irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b); break;
+ case pn_Cmp_Lt: bres = new_r_And(irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b); break;
+ case pn_Cmp_Ge: bres = new_r_Or( irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b); break;
+ case pn_Cmp_Gt: bres = new_r_And(irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b); break;
+ case pn_Cmp_Lg: bres = new_r_Eor(irg, block, left, right, mode_b); break;
+ case pn_Cmp_Eq: bres = new_r_Not(irg, block, new_r_Eor(irg, block, left, right, mode_b), mode_b); break;
+ default: bres = NULL;
+ }
+ if (bres) {
+ DBG_OPT_ALGSIM0(n, bres, FS_OPT_CMP_TO_BOOL);
+ return bres;
}
}
*/
if (is_Const(right)) {
c = right;
- } else if (is_Const(left)) {
+ } else if (is_irn_constlike(left)) {
c = left;
left = right;
right = c;
+ if(!is_Const(c))
+ c = NULL;
proj_nr = get_inversed_pnc(proj_nr);
changed |= 1;
mode = get_irn_mode(c);
tv = get_Const_tarval(c);
+ /* TODO extend to arbitrary constants */
+ if (is_Conv(left) && tarval_is_null(tv)) {
+ ir_node *op = get_Conv_op(left);
+ ir_mode *op_mode = get_irn_mode(op);
+
+ /*
+ * UpConv(x) REL 0 ==> x REL 0
+ */
+ if (get_mode_size_bits(mode) > get_mode_size_bits(op_mode) &&
+ ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) ||
+ mode_is_signed(mode) || !mode_is_signed(op_mode))) {
+ tv = get_mode_null(op_mode);
+ left = op;
+ mode = op_mode;
+ changed |= 2;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
+ }
+ }
+
if (tv != tarval_bad) {
/* the following optimization is possible on modes without Overflow
* on Unary Minus or on == and !=:
left = get_Minus_op(left);
proj_nr = get_inversed_pnc(proj_nr);
changed |= 2;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C);
}
} else if (is_Not(left) && (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg)) {
+ /* Not(a) ==/!= c ==> a ==/!= Not(c) */
tv = tarval_not(tv);
if (tv != tarval_bad) {
left = get_Not_op(left);
changed |= 2;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C);
}
}
if (tv != tarval_bad) {
proj_nr ^= pn_Cmp_Eq;
changed |= 2;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CNST_MAGN);
}
}
/* c < 0 : a > c ==> a >= (c+1) a <= c ==> a < (c+1) */
if (tv != tarval_bad) {
proj_nr ^= pn_Cmp_Eq;
changed |= 2;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CNST_MAGN);
}
}
/* the following reassociations work only for == and != */
if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
+#if 0 /* Might be not that good in general */
/* a-b == 0 ==> a == b, a-b != 0 ==> a != b */
if (tarval_is_null(tv) && is_Sub(left)) {
right = get_Sub_right(left);
tv = value_of(right);
changed = 1;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C);
}
+#endif
if (tv != tarval_bad) {
/* a-c1 == c2 ==> a == c2+c1, a-c1 != c2 ==> a != c2+c1 */
left = get_Sub_left(left);
tv = tv2;
changed |= 2;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C);
}
}
}
left = a;
tv = tv2;
changed |= 2;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C);
}
}
}
left = get_Minus_op(left);
tv = tv2;
changed |= 2;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C);
}
}
}
}
} /* mode_is_int */
- /*
- * optimization for AND:
- * Optimize:
- * And(x, C) == C ==> And(x, C) != 0
- * And(x, C) != C ==> And(X, C) == 0
- *
- * if C is a single Bit constant.
- */
- if ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) && is_And(left)) {
- if (tarval_is_single_bit(tv)) {
- /* check for Constant's match. We have check hare the tarvals,
- because our const might be changed */
- ir_node *la = get_And_left(left);
- ir_node *ra = get_And_right(left);
- if ((is_Const(la) && get_Const_tarval(la) == tv) ||
- (is_Const(ra) && get_Const_tarval(ra) == tv)) {
- /* fine: do the transformation */
- tv = get_mode_null(get_tarval_mode(tv));
- proj_nr ^= pn_Cmp_Leg;
- changed |= 2;
+ if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
+ switch (get_irn_opcode(left)) {
+ ir_node *c1;
+
+ case iro_And:
+ c1 = get_And_right(left);
+ if (is_Const(c1)) {
+ /*
+ * And(x, C1) == C2 ==> FALSE if C2 & C1 != C2
+ * And(x, C1) != C2 ==> TRUE if C2 & C1 != C2
+ */
+ tarval *mask = tarval_and(get_Const_tarval(c1), tv);
+ if (mask != tv) {
+ /* TODO: move to constant evaluation */
+ tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
+ c1 = new_Const(mode_b, tv);
+ DBG_OPT_CSTEVAL(proj, c1);
+ return c1;
+ }
+
+ if (tarval_is_single_bit(tv)) {
+ /*
+ * optimization for AND:
+ * Optimize:
+ * And(x, C) == C ==> And(x, C) != 0
+ * And(x, C) != C ==> And(X, C) == 0
+ *
+ * if C is a single Bit constant.
+ */
+
+ /* check for Constant's match. We have check hare the tarvals,
+ because our const might be changed */
+ if (get_Const_tarval(c1) == tv) {
+ /* fine: do the transformation */
+ tv = get_mode_null(get_tarval_mode(tv));
+ proj_nr ^= pn_Cmp_Leg;
+ changed |= 2;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CNST_MAGN);
+ }
+ }
}
- }
+ break;
+ case iro_Or:
+ c1 = get_Or_right(left);
+ if (is_Const(c1) && tarval_is_null(tv)) {
+ /*
+ * Or(x, C) == 0 && C != 0 ==> FALSE
+ * Or(x, C) != 0 && C != 0 ==> TRUE
+ */
+ if (! tarval_is_null(get_Const_tarval(c1))) {
+ /* TODO: move to constant evaluation */
+ tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
+ c1 = new_Const(mode_b, tv);
+ DBG_OPT_CSTEVAL(proj, c1);
+ return c1;
+ }
+ }
+ break;
+ case iro_Shl:
+ /*
+ * optimize x << c1 == c into x & (-1 >>u c1) == c >> c1 if c & (-1 << c1) == c
+ * FALSE else
+ * optimize x << c1 != c into x & (-1 >>u c1) != c >> c1 if c & (-1 << c1) == c
+ * TRUE else
+ */
+ c1 = get_Shl_right(left);
+ if (is_Const(c1)) {
+ tarval *tv1 = get_Const_tarval(c1);
+ ir_mode *mode = get_irn_mode(left);
+ tarval *minus1 = get_mode_all_one(mode);
+ tarval *amask = tarval_shr(minus1, tv1);
+ tarval *cmask = tarval_shl(minus1, tv1);
+ ir_node *sl, *blk;
+
+ if (tarval_and(tv, cmask) != tv) {
+ /* condition not met */
+ tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
+ c1 = new_Const(mode_b, tv);
+ DBG_OPT_CSTEVAL(proj, c1);
+ return c1;
+ }
+ sl = get_Shl_left(left);
+ blk = get_nodes_block(n);
+ left = new_rd_And(get_irn_dbg_info(left), current_ir_graph, blk, sl, new_Const(mode, amask), mode);
+ tv = tarval_shr(tv, tv1);
+ changed |= 2;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND);
+ }
+ break;
+ case iro_Shr:
+ /*
+ * optimize x >>u c1 == c into x & (-1 << c1) == c << c1 if c & (-1 >>u c1) == c
+ * FALSE else
+ * optimize x >>u c1 != c into x & (-1 << c1) != c << c1 if c & (-1 >>u c1) == c
+ * TRUE else
+ */
+ c1 = get_Shr_right(left);
+ if (is_Const(c1)) {
+ tarval *tv1 = get_Const_tarval(c1);
+ ir_mode *mode = get_irn_mode(left);
+ tarval *minus1 = get_mode_all_one(mode);
+ tarval *amask = tarval_shl(minus1, tv1);
+ tarval *cmask = tarval_shr(minus1, tv1);
+ ir_node *sl, *blk;
+
+ if (tarval_and(tv, cmask) != tv) {
+ /* condition not met */
+ tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
+ c1 = new_Const(mode_b, tv);
+ DBG_OPT_CSTEVAL(proj, c1);
+ return c1;
+ }
+ sl = get_Shr_left(left);
+ blk = get_nodes_block(n);
+ left = new_rd_And(get_irn_dbg_info(left), current_ir_graph, blk, sl, new_Const(mode, amask), mode);
+ tv = tarval_shl(tv, tv1);
+ changed |= 2;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND);
+ }
+ break;
+ case iro_Shrs:
+ /*
+ * optimize x >>s c1 == c into x & (-1 << c1) == c << c1 if (c >>s (BITS - c1)) \in {0,-1}
+ * FALSE else
+ * optimize x >>s c1 != c into x & (-1 << c1) != c << c1 if (c >>s (BITS - c1)) \in {0,-1}
+ * TRUE else
+ */
+ c1 = get_Shrs_right(left);
+ if (is_Const(c1)) {
+ tarval *tv1 = get_Const_tarval(c1);
+ ir_mode *mode = get_irn_mode(left);
+ tarval *minus1 = get_mode_all_one(mode);
+ tarval *amask = tarval_shl(minus1, tv1);
+ tarval *cond = new_tarval_from_long(get_mode_size_bits(mode), get_tarval_mode(tv1));
+ ir_node *sl, *blk;
+
+ cond = tarval_sub(cond, tv1);
+ cond = tarval_shrs(tv, cond);
+
+ if (!tarval_is_all_one(cond) && !tarval_is_null(cond)) {
+ /* condition not met */
+ tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
+ c1 = new_Const(mode_b, tv);
+ DBG_OPT_CSTEVAL(proj, c1);
+ return c1;
+ }
+ sl = get_Shrs_left(left);
+ blk = get_nodes_block(n);
+ left = new_rd_And(get_irn_dbg_info(left), current_ir_graph, blk, sl, new_Const(mode, amask), mode);
+ tv = tarval_shl(tv, tv1);
+ changed |= 2;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND);
+ }
+ break;
+ } /* switch */
}
} /* tarval != bad */
}
tv = tarval_sub(tv, get_mode_one(mode));
left = new_rd_And(get_irn_dbg_info(op), current_ir_graph, blk, v, new_Const(mode, tv), mode);
changed |= 1;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_MOD_TO_AND);
}
}
}
ir_node *c, *oldn = n;
ir_node *a = get_Or_left(n);
ir_node *b = get_Or_right(n);
+ ir_mode *mode;
if (is_Not(a) && is_Not(b)) {
/* ~a | ~b = ~(a&b) */
ir_node *block = get_nodes_block(n);
- ir_mode *mode = get_irn_mode(n);
+ mode = get_irn_mode(n);
a = get_Not_op(a);
b = get_Not_op(b);
n = new_rd_And(get_irn_dbg_info(n), current_ir_graph, block, a, b, mode);
}
}
- HANDLE_BINOP_PHI(tarval_or, a,b,c);
+ mode = get_irn_mode(n);
+ HANDLE_BINOP_PHI(tarval_or, a, b, c, mode);
n = transform_node_Or_bf_store(n);
n = transform_node_Or_Rot(n);
static ir_node *transform_node(ir_node *n);
/**
- * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl.
+ * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl, Rot.
*
* Should be moved to reassociation?
*/
*/
static ir_node *transform_node_Shr(ir_node *n) {
ir_node *c, *oldn = n;
- ir_node *a = get_Shr_left(n);
- ir_node *b = get_Shr_right(n);
+ ir_node *a = get_Shr_left(n);
+ ir_node *b = get_Shr_right(n);
+ ir_mode *mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_shr, a, b, c);
+ HANDLE_BINOP_PHI(tarval_shr, a, b, c, mode);
return transform_node_shift(n);
} /* transform_node_Shr */
*/
static ir_node *transform_node_Shrs(ir_node *n) {
ir_node *c, *oldn = n;
- ir_node *a = get_Shrs_left(n);
- ir_node *b = get_Shrs_right(n);
+ ir_node *a = get_Shrs_left(n);
+ ir_node *b = get_Shrs_right(n);
+ ir_mode *mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_shrs, a, b, c);
+ HANDLE_BINOP_PHI(tarval_shrs, a, b, c, mode);
return transform_node_shift(n);
} /* transform_node_Shrs */
*/
static ir_node *transform_node_Shl(ir_node *n) {
ir_node *c, *oldn = n;
- ir_node *a = get_Shl_left(n);
- ir_node *b = get_Shl_right(n);
+ ir_node *a = get_Shl_left(n);
+ ir_node *b = get_Shl_right(n);
+ ir_mode *mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_shl, a, b, c);
+ HANDLE_BINOP_PHI(tarval_shl, a, b, c, mode);
return transform_node_shift(n);
} /* transform_node_Shl */
+/**
+ * Transform a Rot.
+ */
+static ir_node *transform_node_Rot(ir_node *n) {
+ ir_node *c, *oldn = n;
+ ir_node *a = get_Rot_left(n);
+ ir_node *b = get_Rot_right(n);
+ ir_mode *mode = get_irn_mode(n);
+
+ HANDLE_BINOP_PHI(tarval_rot, a, b, c, mode);
+ return transform_node_shift(n);
+} /* transform_node_Rot */
+
+/**
+ * Transform a Conv.
+ */
+static ir_node *transform_node_Conv(ir_node *n) {
+ ir_node *c, *oldn = n;
+ ir_node *a = get_Conv_op(n);
+
+ if (is_const_Phi(a)) {
+ c = apply_conv_on_phi(a, get_irn_mode(n));
+ if (c) {
+ DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI);
+ return c;
+ }
+ }
+ return n;
+} /* transform_node_Conv */
+
/**
* Remove dead blocks and nodes in dead blocks
* in keep alive list. We do not generate a new End node.
CASE(Shr);
CASE(Shrs);
CASE(Shl);
+ CASE(Rot);
+ CASE(Conv);
CASE(End);
CASE(Mux);
CASE(Psi);