static tarval *computed_value_Mul(ir_node *n) {
ir_node *a = get_Mul_left(n);
ir_node *b = get_Mul_right(n);
+ ir_mode *mode;
tarval *ta = value_of(a);
tarval *tb = value_of(b);
- if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
+ mode = get_irn_mode(n);
+ if (mode != get_irn_mode(a)) {
+ /* n * n = 2n bit multiplication */
+ ta = tarval_convert_to(ta, mode);
+ tb = tarval_convert_to(tb, mode);
+ }
+
+ if (ta != tarval_bad && tb != tarval_bad) {
return tarval_mul(ta, tb);
} else {
- /* a*0 = 0 or 0*b = 0:
- calls computed_value recursive and returns the 0 with proper
- mode. */
- if ((ta != tarval_bad) && (ta == get_mode_null(get_tarval_mode(ta))))
+ /* a*0 = 0 or 0*b = 0 */
+ if (ta == get_mode_null(mode))
return ta;
- if ((tb != tarval_bad) && (tb == get_mode_null(get_tarval_mode(tb))))
+ if (tb == get_mode_null(mode))
return tb;
}
return tarval_bad;
static ir_node *equivalent_node_Mul(ir_node *n) {
ir_node *oldn = n;
ir_node *a = get_Mul_left(n);
- ir_node *b = get_Mul_right(n);
- /* Mul is commutative and has again an other neutral element. */
- if (classify_tarval(value_of(a)) == TV_CLASSIFY_ONE) {
- n = b;
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
- } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) {
- n = a;
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
+ /* we can handle here only the n * n = n bit cases */
+ if (get_irn_mode(n) == get_irn_mode(a)) {
+ ir_node *b = get_Mul_right(n);
+
+ /* Mul is commutative and has again an other neutral element. */
+ if (classify_tarval(value_of(a)) == TV_CLASSIFY_ONE) {
+ n = b;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
+ } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) {
+ n = a;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
+ }
}
return n;
} /* equivalent_node_Mul */
#undef CASE
} /* firm_set_default_equivalent_node */
-/**
- * Do node specific optimizations of nodes predecessors.
- */
-static void optimize_preds(ir_node *n) {
- switch (get_irn_opcode(n)) {
-
- case iro_Cmp: { /* We don't want Cast as input to Cmp. */
- ir_node *a = get_Cmp_left(n), *b = get_Cmp_right(n);
-
- if (get_irn_op(a) == op_Cast) {
- a = get_Cast_op(a);
- set_Cmp_left(n, a);
- }
- if (get_irn_op(b) == op_Cast) {
- b = get_Cast_op(b);
- set_Cmp_right(n, b);
- }
- break;
- }
-
- default: break;
- } /* end switch */
-} /* optimize_preds */
-
/**
* Returns non-zero if a node is a Phi node
* with all predecessors constant.
return c; \
}
-#define HANDLE_UNOP_PHI(op,a,c) \
- c = NULL; \
- if (is_const_Phi(a)) { \
- /* check for Op(Phi) */ \
- c = apply_unop_on_phi(a, op); \
- } \
- if (c) { \
- DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI); \
- return c; \
+#define HANDLE_UNOP_PHI(op,a,c) \
+ c = NULL; \
+ if (is_const_Phi(a)) { \
+ /* check for Op(Phi) */ \
+ c = apply_unop_on_phi(a, op); \
+ if (c) { \
+ DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI); \
+ return c; \
+ } \
}
-
/**
* Do the AddSub optimization, then Transform
* Constant folding on Phi
return n;
if (mode_is_num(mode)) {
- if (a == b && mode_is_int(mode)) {
+ /* the following code leads to endless recursion when Mul are replaced by a simple instruction chain */
+ if (!get_opt_arch_dep_running() && a == b && mode_is_int(mode)) {
ir_node *block = get_irn_n(n, -1);
n = new_rd_Mul(
new_r_Const_long(current_ir_graph, block, mode, 2),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_A);
- } else if (get_irn_op(a) == op_Minus) {
+ return n;
+ }
+ if (is_Minus(a)) {
n = new_rd_Sub(
get_irn_dbg_info(n),
current_ir_graph,
get_Minus_op(a),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B);
- } else if (get_irn_op(b) == op_Minus) {
+ return n;
+ }
+ if (is_Minus(b)) {
n = new_rd_Sub(
get_irn_dbg_info(n),
current_ir_graph,
get_Minus_op(b),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B);
+ return n;
}
- /* do NOT execute this code if reassociation is enabled, it does the inverse! */
- else if (!get_opt_reassociation() && get_irn_op(a) == op_Mul) {
- ir_node *ma = get_Mul_left(a);
- ir_node *mb = get_Mul_right(a);
-
- if (b == ma) {
- ir_node *blk = get_irn_n(n, -1);
- n = new_rd_Mul(
- get_irn_dbg_info(n), current_ir_graph, blk,
- ma,
- new_rd_Add(
- get_irn_dbg_info(n), current_ir_graph, blk,
- mb,
- new_r_Const_long(current_ir_graph, blk, mode, 1),
- mode),
- mode);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
- } else if (b == mb) {
- ir_node *blk = get_irn_n(n, -1);
- n = new_rd_Mul(
- get_irn_dbg_info(n), current_ir_graph, blk,
- mb,
- new_rd_Add(
+ if (! get_opt_reassociation()) {
+ /* do NOT execute this code if reassociation is enabled, it does the inverse! */
+ if (is_Mul(a)) {
+ ir_node *ma = get_Mul_left(a);
+ ir_node *mb = get_Mul_right(a);
+
+ if (b == ma) {
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_rd_Mul(
get_irn_dbg_info(n), current_ir_graph, blk,
ma,
- new_r_Const_long(current_ir_graph, blk, mode, 1),
- mode),
- mode);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
- }
- }
- /* do NOT execute this code if reassociation is enabled, it does the inverse! */
- else if (!get_opt_reassociation() && get_irn_op(b) == op_Mul) {
- ir_node *ma = get_Mul_left(b);
- ir_node *mb = get_Mul_right(b);
-
- if (a == ma) {
- ir_node *blk = get_irn_n(n, -1);
- n = new_rd_Mul(
- get_irn_dbg_info(n), current_ir_graph, blk,
- ma,
- new_rd_Add(
+ new_rd_Add(
+ get_irn_dbg_info(n), current_ir_graph, blk,
+ mb,
+ new_r_Const_long(current_ir_graph, blk, mode, 1),
+ mode),
+ mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
+ return n;
+ } else if (b == mb) {
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_rd_Mul(
get_irn_dbg_info(n), current_ir_graph, blk,
mb,
- new_r_Const_long(current_ir_graph, blk, mode, 1),
- mode),
- mode);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
- } else if (a == mb) {
- ir_node *blk = get_irn_n(n, -1);
- n = new_rd_Mul(
- get_irn_dbg_info(n), current_ir_graph, blk,
- mb,
- new_rd_Add(
+ new_rd_Add(
+ get_irn_dbg_info(n), current_ir_graph, blk,
+ ma,
+ new_r_Const_long(current_ir_graph, blk, mode, 1),
+ mode),
+ mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
+ return n;
+ }
+ }
+ if (is_Mul(b)) {
+ ir_node *ma = get_Mul_left(b);
+ ir_node *mb = get_Mul_right(b);
+
+ if (a == ma) {
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_rd_Mul(
get_irn_dbg_info(n), current_ir_graph, blk,
ma,
- new_r_Const_long(current_ir_graph, blk, mode, 1),
- mode),
- mode);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
+ new_rd_Add(
+ get_irn_dbg_info(n), current_ir_graph, blk,
+ mb,
+ new_r_Const_long(current_ir_graph, blk, mode, 1),
+ mode),
+ mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
+ return n;
+ }
+ if (a == mb) {
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_rd_Mul(
+ get_irn_dbg_info(n), current_ir_graph, blk,
+ mb,
+ new_rd_Add(
+ get_irn_dbg_info(n), current_ir_graph, blk,
+ ma,
+ new_r_Const_long(current_ir_graph, blk, mode, 1),
+ mode),
+ mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
+ return n;
+ }
}
}
/* Here we rely on constants be on the RIGHT side */
- else if (get_mode_arithmetic(mode) == irma_twos_complement &&
+ if (get_mode_arithmetic(mode) == irma_twos_complement &&
is_Not(a) && classify_Const(b) == CNST_ONE) {
/* ~x + 1 = -x */
ir_node *op = get_Not_op(a);
ir_node *blk = get_irn_n(n, -1);
n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, op, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_PLUS_1);
+ return n;
}
}
return n;
} /* transform_node_Add */
+/* returns -cnst */
+static ir_node* const_negate(ir_node* cnst)
+{
+ tarval *tv = tarval_neg(get_Const_tarval(cnst));
+ dbg_info *dbgi = get_irn_dbg_info(cnst);
+ ir_graph *irg = get_irn_irg(cnst);
+ ir_node *block = get_nodes_block(cnst);
+ ir_mode *mode = get_irn_mode(cnst);
+ if (tv == tarval_bad) return NULL;
+ return new_rd_Const(dbgi, irg, block, mode, tv);
+}
+
/**
* Do the AddSub optimization, then Transform
* Constant folding on Phi
return n;
/* Sub(a, Const) -> Add(a, -Const) */
- if (is_Const(b)) {
- tarval *tv = get_Const_tarval(b);
-
- tv = tarval_neg(tv);
- if(tv != tarval_bad) {
- ir_node *cnst = new_Const(get_irn_mode(b), tv);
+ if (is_Const(b) && get_irn_mode(b) != mode_P) {
+ ir_node* cnst = const_negate(b);
+ if (cnst != NULL) {
ir_node *block = get_nodes_block(n);
dbg_info *dbgi = get_irn_dbg_info(n);
ir_graph *irg = get_irn_irg(n);
}
}
+ if (is_Minus(a)) { /* -a - b -> -(a + b) */
+ ir_graph *irg = current_ir_graph;
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_node *left = get_Minus_op(a);
+ ir_mode *mode = get_irn_mode(n);
+ ir_node *add = new_rd_Add(dbg, irg, block, left, b, mode);
+ ir_node *neg = new_rd_Minus(dbg, irg, block, add, mode);
+ return neg;
+ } else if (is_Minus(b)) { /* a - -b -> a + b */
+ ir_graph *irg = current_ir_graph;
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_node *right = get_Minus_op(b);
+ ir_mode *mode = get_irn_mode(n);
+ ir_node *add = new_rd_Add(dbg, irg, block, a, right, mode);
+ return add;
+ } else if (is_Sub(b)) { /* a - (b - c) -> a + (c - b) */
+ ir_graph *irg = current_ir_graph;
+ dbg_info *s_dbg = get_irn_dbg_info(b);
+ ir_node *s_block = get_nodes_block(b);
+ ir_node *s_left = get_Sub_right(b);
+ ir_node *s_right = get_Sub_left(b);
+ ir_mode *s_mode = get_irn_mode(b);
+ ir_node *sub = new_rd_Sub(s_dbg, irg, s_block, s_left, s_right, s_mode);
+ dbg_info *a_dbg = get_irn_dbg_info(n);
+ ir_node *a_block = get_nodes_block(n);
+ ir_mode *a_mode = get_irn_mode(n);
+ ir_node *add = new_rd_Add(a_dbg, irg, a_block, a, sub, a_mode);
+ return add;
+ } else if (is_Mul(b)) { /* a - (b * const2) -> a + (b * -const2) */
+ ir_node* m_right = get_Mul_right(b);
+ if (is_Const(m_right)) {
+ ir_node* cnst2 = const_negate(m_right);
+ if (cnst2 != NULL) {
+ ir_graph *irg = current_ir_graph;
+ dbg_info *m_dbg = get_irn_dbg_info(b);
+ ir_node *m_block = get_nodes_block(b);
+ ir_node *m_left = get_Mul_left(b);
+ ir_mode *m_mode = get_irn_mode(b);
+ ir_node *mul = new_rd_Mul(m_dbg, irg, m_block, m_left, cnst2, m_mode);
+ dbg_info *a_dbg = get_irn_dbg_info(n);
+ ir_node *a_block = get_nodes_block(n);
+ ir_mode *a_mode = get_irn_mode(n);
+ ir_node *add = new_rd_Add(a_dbg, irg, a_block, a, mul, a_mode);
+ return add;
+ }
+ }
+ }
+
+ /* Beware of Sub(P, P) which cannot be optimized into a simple Minus ... */
+ if (mode_is_num(mode) && mode == get_irn_mode(a) && (classify_Const(a) == CNST_NULL)) {
+ n = new_rd_Minus(
+ get_irn_dbg_info(n),
+ current_ir_graph,
+ get_irn_n(n, -1),
+ b,
+ mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_0_A);
+ return n;
+ }
if (is_Add(a)) {
if (mode_wrap_around(mode)) {
ir_node *left = get_Add_left(a);
}
n = right;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
+ return n;
} else if (right == b) {
if (mode != get_irn_mode(left)) {
/* This Sub is an effective Cast */
}
n = left;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
+ return n;
}
}
- } else if (is_Add(b)) {
+ }
+ if (is_Add(b)) {
if (mode_wrap_around(mode)) {
ir_node *left = get_Add_left(b);
ir_node *right = get_Add_right(b);
n = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), n, mode);
}
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
+ return n;
} else if (right == a) {
ir_mode *l_mode = get_irn_mode(left);
n = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), n, mode);
}
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
+ return n;
}
}
- } else if (mode_is_int(mode) && is_Conv(a) && is_Conv(b)) {
+ }
+ if (mode_is_int(mode) && is_Conv(a) && is_Conv(b)) {
ir_mode *mode = get_irn_mode(a);
if (mode == get_irn_mode(b)) {
}
}
}
- /* Beware of Sub(P, P) which cannot be optimized into a simple Minus ... */
- else if (mode_is_num(mode) && mode == get_irn_mode(a) && (classify_Const(a) == CNST_NULL)) {
- n = new_rd_Minus(
- get_irn_dbg_info(n),
- current_ir_graph,
- get_irn_n(n, -1),
- b,
- mode);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_0_A);
- }
/* do NOT execute this code if reassociation is enabled, it does the inverse! */
- else if (get_opt_reassociation() && get_irn_op(a) == op_Mul) {
+ if (get_opt_reassociation() && get_irn_op(a) == op_Mul) {
ir_node *ma = get_Mul_left(a);
ir_node *mb = get_Mul_right(a);
mode),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A);
+ return n;
} else if (mb == b) {
ir_node *blk = get_irn_n(n, -1);
n = new_rd_Mul(
mode),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A);
+ return n;
}
- } else if (get_irn_op(a) == op_Sub) {
+ }
+ if (is_Sub(a)) {
ir_node *x = get_Sub_left(a);
ir_node *y = get_Sub_right(a);
ir_node *blk = get_irn_n(n, -1);
set_Sub_left(n, x);
set_Sub_right(n, add);
DBG_OPT_ALGSIM0(n, n, FS_OPT_SUB_SUB_X_Y_Z);
+ return n;
}
return n;
} /* transform_node_Sub */
+/**
+ * Several transformation done on n*n=2n bits mul.
+ * These transformations must be done here because new nodes may be produced.
+ */
+static ir_node *transform_node_Mul2n(ir_node *n, ir_mode *mode) {
+ ir_node *oldn = n;
+ ir_node *a = get_Mul_left(n);
+ ir_node *b = get_Mul_right(n);
+ tarval *ta = value_of(a);
+ tarval *tb = value_of(b);
+ ir_mode *smode = get_irn_mode(a);
+
+ if (ta == get_mode_one(smode)) {
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, b, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
+ return n;
+ }
+ else if (ta == get_mode_minus_one(smode)) {
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, b, smode);
+ n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, n, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
+ return n;
+ }
+ if (tb == get_mode_one(smode)) {
+ ir_node *blk = get_irn_n(a, -1);
+ n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, a, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
+ return n;
+ }
+ else if (tb == get_mode_minus_one(smode)) {
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, a, smode);
+ n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, n, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
+ return n;
+ }
+ return n;
+}
+
/**
* Transform Mul(a,-1) into -a.
* Do constant evaluation of Phi nodes.
*/
static ir_node *transform_node_Mul(ir_node *n) {
ir_node *c, *oldn = n;
+ ir_mode *mode = get_irn_mode(n);
ir_node *a = get_Mul_left(n);
ir_node *b = get_Mul_right(n);
- ir_mode *mode;
+
+ if (is_Bad(a) || is_Bad(b))
+ return n;
+
+ if (mode != get_irn_mode(a))
+ return transform_node_Mul2n(n, mode);
HANDLE_BINOP_PHI(tarval_mul, a,b,c);
- mode = get_irn_mode(n);
if (mode_is_signed(mode)) {
ir_node *r = NULL;
return n;
}
}
+ if (is_Minus(a)) {
+ if (is_Const(b)) { /* -a * const -> a * -const */
+ ir_node* cnst = const_negate(b);
+ if (cnst != NULL) {
+ set_Mul_left( n, get_Minus_op(a));
+ set_Mul_right(n, cnst);
+ return n;
+ }
+ } else if (is_Minus(b)) { /* -a * -b -> a * b */
+ set_Mul_left( n, get_Minus_op(a));
+ set_Mul_right(n, get_Minus_op(b));
+ return n;
+ } else if (is_Sub(b)) { /* -a * (b - c) -> a * (c - b) */
+ ir_node *sub_l = get_Sub_left(b);
+ ir_node *sub_r = get_Sub_right(b);
+ dbg_info *dbgi = get_irn_dbg_info(b);
+ ir_graph *irg = current_ir_graph;
+ ir_mode *mode = get_irn_mode(b);
+ ir_node *block = get_nodes_block(b);
+ ir_node *new_b = new_rd_Sub(dbgi, irg, block, sub_r, sub_l, mode);
+ set_Mul_left( n, get_Minus_op(a));
+ set_Mul_right(n, new_b);
+ return n;
+ }
+ } else if (is_Minus(b)) {
+ if (is_Sub(a)) { /* (a - b) * -c -> (b - a) * c */
+ ir_node *sub_l = get_Sub_left(a);
+ ir_node *sub_r = get_Sub_right(a);
+ dbg_info *dbgi = get_irn_dbg_info(a);
+ ir_graph *irg = current_ir_graph;
+ ir_mode *mode = get_irn_mode(a);
+ ir_node *block = get_nodes_block(a);
+ ir_node *new_a = new_rd_Sub(dbgi, irg, block, sub_r, sub_l, mode);
+ set_Mul_left (n, new_a);
+ set_Mul_right(n, get_Minus_op(b));
+ return n;
+ }
+ }
if (get_mode_arithmetic(mode) == irma_ieee754) {
if (is_Const(a)) {
tarval *tv = get_Const_tarval(a);
/* Remove unnecessary conversions */
/* TODO handle constants */
if (is_Conv(left) && is_Conv(right)) {
- ir_mode* mode = get_irn_mode(left);
- ir_node* op_left = get_Conv_op(left);
- ir_node* op_right = get_Conv_op(right);
- ir_mode* mode_left = get_irn_mode(op_left);
- ir_mode* mode_right = get_irn_mode(op_right);
+ ir_mode *mode = get_irn_mode(left);
+ ir_node *op_left = get_Conv_op(left);
+ ir_node *op_right = get_Conv_op(right);
+ ir_mode *mode_left = get_irn_mode(op_left);
+ ir_mode *mode_right = get_irn_mode(op_right);
if (smaller_mode(mode_left, mode) && smaller_mode(mode_right, mode)) {
- ir_graph* irg = current_ir_graph;
- ir_node* block = get_nodes_block(n);
- ir_node* new_left;
- ir_node* new_right;
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(n);
if (mode_left == mode_right) {
- new_left = op_left;
- new_right = op_right;
+ left = op_left;
+ right = op_right;
+ changed |= 1;
} else if (smaller_mode(mode_left, mode_right)) {
- new_left = new_r_Conv(irg, block, op_left, mode_right);
- new_right = op_right;
+ left = new_r_Conv(irg, block, op_left, mode_right);
+ right = op_right;
+ changed |= 1;
} else if (smaller_mode(mode_right, mode_left)) {
- new_left = op_left;
- new_right = new_r_Conv(irg, block, op_right, mode_left);
- } else {
- goto no_remove_conv;
+ left = op_left;
+ right = new_r_Conv(irg, block, op_right, mode_left);
+ changed |= 1;
+ }
+ }
+ }
+
+ /* remove Casts */
+ if (is_Cast(left))
+ left = get_Cast_op(left);
+ if (is_Cast(right))
+ right = get_Cast_op(right);
+
+ /* remove operation of both sides if possible */
+ if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
+ ir_opcode lop = get_irn_opcode(left);
+
+ if (lop == get_irn_opcode(right)) {
+ ir_node *ll, *lr, *rl, *rr;
+
+ /* same operation on both sides, try to remove */
+ switch (lop) {
+ case iro_Not:
+ case iro_Minus:
+ /* ~a CMP ~b => a CMP b, -a CMP -b ==> a CMP b */
+ left = get_unop_op(left);
+ right = get_unop_op(right);
+ changed |= 1;
+ break;
+ case iro_Add:
+ ll = get_Add_left(left);
+ lr = get_Add_right(left);
+ rl = get_Add_left(right);
+ rr = get_Add_right(right);
+
+ if (ll == rl) {
+ /* X + a CMP X + b ==> a CMP b */
+ left = lr;
+ right = rr;
+ changed |= 1;
+ } else if (ll == rr) {
+ /* X + a CMP b + X ==> a CMP b */
+ left = lr;
+ right = rl;
+ changed |= 1;
+ } else if (lr == rl) {
+ /* a + X CMP X + b ==> a CMP b */
+ left = ll;
+ right = rr;
+ changed |= 1;
+ } else if (lr == rr) {
+ /* a + X CMP b + X ==> a CMP b */
+ left = ll;
+ right = rl;
+ changed |= 1;
+ }
+ break;
+ case iro_Sub:
+ ll = get_Sub_left(left);
+ lr = get_Sub_right(left);
+ rl = get_Sub_left(right);
+ rr = get_Sub_right(right);
+
+ if (ll == rl) {
+ /* X - a CMP X - b ==> a CMP b */
+ left = lr;
+ right = rr;
+ changed |= 1;
+ } else if (lr == rr) {
+ /* a - X CMP b - X ==> a CMP b */
+ left = ll;
+ right = rl;
+ changed |= 1;
+ }
+ break;
+ case iro_Rot:
+ if (get_Rot_right(left) == get_Rot_right(right)) {
+ /* a ROT X CMP b ROT X */
+ left = get_Rot_left(left);
+ right = get_Rot_left(right);
+ changed |= 1;
+ }
+ break;
}
- left = new_left;
- right = new_right;
- set_Cmp_left( n, left);
- set_Cmp_right(n, right);
-no_remove_conv:;
+
}
}
if (get_irn_mode(left) == mode_b) {
- ir_graph* irg = current_ir_graph;
- ir_node* block = get_nodes_block(n);
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(n);
switch (proj_nr) {
case pn_Cmp_Le: return new_r_Or( irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b);
/*
* First step: normalize the compare op
- * by placing the constant on the right site
+ * by placing the constant on the right side
* or moving the lower address node to the left.
* We ignore the case that both are constants
* this case should be optimized away.
*/
- if (get_irn_op(right) == op_Const) {
+ if (is_Const(right)) {
c = right;
- } else if (get_irn_op(left) == op_Const) {
+ } else if (is_Const(left)) {
c = left;
left = right;
right = c;
* be optimized, see this:
* -MININT < 0 =/=> MININT > 0 !!!
*/
- if (get_opt_constant_folding() && get_irn_op(left) == op_Minus &&
+ if (is_Minus(left) &&
(!mode_overflow_on_unary_Minus(mode) ||
(mode_is_int(mode) && (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg)))) {
- left = get_Minus_op(left);
tv = tarval_neg(tv);
if (tv != tarval_bad) {
+ left = get_Minus_op(left);
proj_nr = get_inversed_pnc(proj_nr);
changed |= 2;
}
+ } else if (is_Not(left) && (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg)) {
+ tv = tarval_not(tv);
+
+ if (tv != tarval_bad) {
+ left = get_Not_op(left);
+ changed |= 2;
+ }
}
/* for integer modes, we have more */
if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
/* a-b == 0 ==> a == b, a-b != 0 ==> a != b */
- if (classify_tarval(tv) == TV_CLASSIFY_NULL && get_irn_op(left) == op_Sub) {
- right = get_Sub_right(left);
+ if (classify_tarval(tv) == TV_CLASSIFY_NULL && is_Sub(left)) {
+ right =get_Sub_right(left);
left = get_Sub_left(left);
tv = value_of(right);
- if (tv != tarval_bad) {
- changed = 1;
- }
+ changed = 1;
}
if (tv != tarval_bad) {
*
* if C is a single Bit constant.
*/
- if ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) &&
- (get_irn_op(left) == op_And)) {
+ if ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) && is_And(left)) {
if (tarval_is_single_bit(tv)) {
/* check for Constant's match. We have check hare the tarvals,
because our const might be changed */
right = new_Const(mode, tv);
/* create a new compare */
- n = new_rd_Cmp(get_irn_dbg_info(n), current_ir_graph, block,
- left, right);
+ n = new_rd_Cmp(get_irn_dbg_info(n), current_ir_graph, block, left, right);
set_Proj_pred(proj, n);
set_Proj_proj(proj, proj_nr);
ir_node *t = get_Mux_true(n);
/*
- * Note: normalization puts the constant on the right site,
+ * Note: normalization puts the constant on the right side,
* so we check only one case.
*
* Note further that these optimization work even for floating point
if (get_opt_constant_folding()) {
/* neither constants nor Tuple values can be evaluated */
if (iro != iro_Const && (get_irn_mode(n) != mode_T)) {
+ unsigned fp_model = get_irg_fp_model(current_ir_graph);
+ int old_fp_mode = tarval_enable_fp_ops((fp_model & fp_strict_algebraic) == 0);
/* try to evaluate */
tv = computed_value(n);
if (tv != tarval_bad) {
if (old_tp && get_type_mode(old_tp) == get_tarval_mode (tv))
set_Const_type(nw, old_tp);
DBG_OPT_CSTEVAL(oldn, nw);
+ tarval_enable_fp_ops(old_fp_mode);
return nw;
}
+ tarval_enable_fp_ops(old_fp_mode);
}
}
(iro == iro_Block) ) /* Flags tested local. */
n = equivalent_node(n);
- optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
-
/* Common Subexpression Elimination.
*
* Checks whether n is already available.
if (get_opt_constant_folding()) {
/* neither constants nor Tuple values can be evaluated */
if (iro != iro_Const && get_irn_mode(n) != mode_T) {
+ unsigned fp_model = get_irg_fp_model(current_ir_graph);
+ int old_fp_mode = tarval_enable_fp_ops((fp_model & fp_strict_algebraic) == 0);
/* try to evaluate */
tv = computed_value(n);
if (tv != tarval_bad) {
set_Const_type(n, old_tp);
DBG_OPT_CSTEVAL(oldn, n);
+ tarval_enable_fp_ops(old_fp_mode);
return n;
}
+ tarval_enable_fp_ops(old_fp_mode);
}
}
(iro == iro_Block) ) /* Flags tested local. */
n = equivalent_node(n);
- optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
-
/** common subexpression elimination **/
/* Checks whether n is already available. */
/* The block input is used to distinguish different subexpressions. Right