/* Make types visible to allow most efficient access */
#include "entity_t.h"
+/**
+ * Returns the tarval of a Const node or tarval_bad for all other nodes.
+ */
+static tarval *default_value_of(const ir_node *n) {
+ if (is_Const(n))
+ return get_Const_tarval(n); /* might return tarval_bad */
+ else
+ return tarval_bad;
+}
+
+value_of_func value_of_ptr = default_value_of;
+
+void set_value_of_func(value_of_func func) {
+ if (func != NULL)
+ value_of_ptr = func;
+ else
+ value_of_ptr = default_value_of;
+}
+
/**
* Return the value of a Constant.
*/
} /* computed_value_Shrs */
/**
- * Return the value of a Rot.
+ * Return the value of a Rotl.
*/
-static tarval *computed_value_Rot(ir_node *n) {
- ir_node *a = get_Rot_left(n);
- ir_node *b = get_Rot_right(n);
+static tarval *computed_value_Rotl(ir_node *n) {
+ ir_node *a = get_Rotl_left(n);
+ ir_node *b = get_Rotl_right(n);
tarval *ta = value_of(a);
tarval *tb = value_of(b);
if ((ta != tarval_bad) && (tb != tarval_bad)) {
- return tarval_rot (ta, tb);
+ return tarval_rotl(ta, tb);
}
return tarval_bad;
-} /* computed_value_Rot */
+} /* computed_value_Rotl */
/**
* Return the value of a Conv.
CASE(Shl);
CASE(Shr);
CASE(Shrs);
- CASE(Rot);
+ CASE(Rotl);
CASE(Carry);
CASE(Borrow);
CASE(Conv);
ir_node *left, *right;
ir_mode *mode = get_irn_mode(n);
- /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
- if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
- return n;
-
n = equivalent_node_neutral_zero(n);
if (n != oldn)
return n;
+ /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
+ if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
+ return n;
+
left = get_Add_left(n);
right = get_Add_right(n);
#define equivalent_node_Shl equivalent_node_left_zero
#define equivalent_node_Shr equivalent_node_left_zero
#define equivalent_node_Shrs equivalent_node_left_zero
-#define equivalent_node_Rot equivalent_node_left_zero
+#define equivalent_node_Rotl equivalent_node_left_zero
/**
* Optimize a - 0 and (a + x) - x (for modes with wrap-around).
ir_mode *n_mode = get_irn_mode(n);
ir_mode *a_mode = get_irn_mode(a);
+restart:
if (n_mode == a_mode) { /* No Conv necessary */
if (get_Conv_strict(n)) {
/* special case: the predecessor might be a also a Conv */
if (is_Conv(a)) {
if (! get_Conv_strict(a)) {
/* first one is not strict, kick it */
- set_Conv_op(n, get_Conv_op(a));
- return n;
+ a = get_Conv_op(a);
+ a_mode = get_irn_mode(a);
+ set_Conv_op(n, a);
+ goto restart;
}
/* else both are strict conv, second is superfluous */
- } else if(is_Proj(a)) {
+ } else if (is_Proj(a)) {
ir_node *pred = get_Proj_pred(a);
- if(is_Load(pred)) {
+ if (is_Load(pred)) {
/* loads always return with the exact precision of n_mode */
assert(get_Load_mode(pred) == n_mode);
return a;
ir_node *b = get_Conv_op(a);
ir_mode *b_mode = get_irn_mode(b);
+ if (get_Conv_strict(n) && get_Conv_strict(a)) {
+ /* both are strict conv */
+ if (smaller_mode(a_mode, n_mode)) {
+ /* both are strict, but the first is smaller, so
+ the second cannot remove more precision, remove the
+ strict bit */
+ set_Conv_strict(n, 0);
+ }
+ }
if (n_mode == b_mode) {
- if (n_mode == mode_b) {
- n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
- } else if (mode_is_int(n_mode)) {
- if (get_mode_size_bits(b_mode) <= get_mode_size_bits(a_mode)) {
- n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
+ if (! get_Conv_strict(n) && ! get_Conv_strict(a)) {
+ if (n_mode == mode_b) {
+ n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
+ } else if (get_mode_arithmetic(n_mode) == get_mode_arithmetic(a_mode)) {
+ if (smaller_mode(b_mode, a_mode)) {
+ n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
+ }
+ }
+ }
+ if (is_Conv(b)) {
+ if (smaller_mode(b_mode, a_mode)) {
+ if (get_Conv_strict(n))
+ set_Conv_strict(b, 1);
+ n = b; /* ConvA(ConvB(ConvA(...))) == ConvA(...) */
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
}
}
for (i = 0; i < n_preds; ++i) {
first_val = get_Phi_pred(n, i);
if ( (first_val != n) /* not self pointer */
-#if 1
+#if 0
+ /* BEWARE: when the if is changed to 1, Phi's will ignore it's Bad
+ * predecessors. Then, Phi nodes in dead code might be removed, causing
+ * nodes pointing to themself (Add's for instance).
+ * This is really bad and causes endless recursions in several
+ * code pathes, so we do NOT optimize such a code.
+ * This is not that bad as it sounds, optimize_cf() removes bad control flow
+ * (and bad Phi predecessors), so live code is optimized later.
+ */
&& (! is_Bad(first_val))
#endif
) { /* value not dead */
ir_node *scnd_val = get_Phi_pred(n, i);
if ( (scnd_val != n)
&& (scnd_val != first_val)
-#if 1
+#if 0
+ /* see above */
&& (! is_Bad(scnd_val))
#endif
) {
CASE(Shl);
CASE(Shr);
CASE(Shrs);
- CASE(Rot);
+ CASE(Rotl);
CASE(Not);
CASE(Minus);
CASE(Mul);
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
}
break;
- case iro_Rot:
- if (get_Rot_right(left) == get_Rot_right(right)) {
- /* a ROT X CMP b ROT X ==> a CMP b */
- left = get_Rot_left(left);
- right = get_Rot_left(right);
+ case iro_Rotl:
+ if (get_Rotl_right(left) == get_Rotl_right(right)) {
+ /* a ROTL X CMP b ROTL X ==> a CMP b */
+ left = get_Rotl_left(left);
+ right = get_Rotl_left(right);
changed |= 1;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
}
} /* transform_node_Or_bf_store */
/**
- * Optimize an Or(shl(x, c), shr(x, bits - c)) into a Rot
+ * Optimize an Or(shl(x, c), shr(x, bits - c)) into a Rotl
*/
-static ir_node *transform_node_Or_Rot(ir_node *or) {
+static ir_node *transform_node_Or_Rotl(ir_node *or) {
ir_mode *mode = get_irn_mode(or);
ir_node *shl, *shr, *block;
ir_node *irn, *x, *c1, *c2, *v, *sub, *n;
/* yet, condition met */
block = get_irn_n(or, -1);
- n = new_r_Rot(current_ir_graph, block, x, c1, mode);
+ n = new_r_Rotl(current_ir_graph, block, x, c1, mode);
- DBG_OPT_ALGSIM1(or, shl, shr, n, FS_OPT_OR_SHFT_TO_ROT);
+ DBG_OPT_ALGSIM1(or, shl, shr, n, FS_OPT_OR_SHFT_TO_ROTL);
return n;
} else if (is_Sub(c1)) {
v = c2;
block = get_nodes_block(or);
/* a Rot right is not supported, so use a rot left */
- n = new_r_Rot(current_ir_graph, block, x, sub, mode);
+ n = new_r_Rotl(current_ir_graph, block, x, sub, mode);
- DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROT);
+ DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROTL);
return n;
} else if (is_Sub(c2)) {
v = c1;
block = get_irn_n(or, -1);
/* a Rot Left */
- n = new_r_Rot(current_ir_graph, block, x, v, mode);
+ n = new_r_Rotl(current_ir_graph, block, x, v, mode);
- DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROT);
+ DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROTL);
return n;
}
return or;
-} /* transform_node_Or_Rot */
+} /* transform_node_Or_Rotl */
/**
* Transform an Or.
HANDLE_BINOP_PHI(tarval_or, a, b, c, mode);
n = transform_node_Or_bf_store(n);
- n = transform_node_Or_Rot(n);
+ n = transform_node_Or_Rotl(n);
if (n != oldn)
return n;
static ir_node *transform_node(ir_node *n);
/**
- * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl, Rot.
+ * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl, Rotl.
*
* Should be moved to reassociation?
*/
static ir_node *transform_node_shift(ir_node *n) {
ir_node *left, *right;
- tarval *tv1, *tv2, *res;
ir_mode *mode;
- int modulo_shf, flag;
+ tarval *tv1, *tv2, *res;
+ ir_node *in[2], *irn, *block;
left = get_binop_left(n);
if (tv2 == tarval_bad)
return n;
- res = tarval_add(tv1, tv2);
+ res = tarval_add(tv1, tv2);
+ mode = get_irn_mode(n);
/* beware: a simple replacement works only, if res < modulo shift */
+ if (!is_Rotl(n)) {
+ int modulo_shf = get_mode_modulo_shift(mode);
+ assert(modulo_shf >= (int) get_mode_size_bits(mode));
+ if (modulo_shf > 0) {
+ tarval *modulo = new_tarval_from_long(modulo_shf,
+ get_tarval_mode(res));
+
+ /* shifting too much */
+ if (!(tarval_cmp(res, modulo) & pn_Cmp_Lt)) {
+ if (is_Shrs(n)) {
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *block = get_nodes_block(n);
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *cnst = new_Const(mode_Iu, new_tarval_from_long(get_mode_size_bits(mode)-1, mode_Iu));
+ return new_rd_Shrs(dbgi, irg, block, get_binop_left(left),
+ cnst, mode);
+ }
+
+ return new_Const(mode, get_mode_null(mode));
+ }
+ }
+ } else {
+ res = tarval_mod(res, new_tarval_from_long(get_mode_size_bits(mode), get_tarval_mode(res)));
+ }
+
+ /* ok, we can replace it */
+ block = get_nodes_block(n);
+
+ in[0] = get_binop_left(left);
+ in[1] = new_r_Const(current_ir_graph, block, get_tarval_mode(res), res);
+
+ irn = new_ir_node(NULL, current_ir_graph, block, get_irn_op(n), mode, 2, in);
+
+ DBG_OPT_ALGSIM0(n, irn, FS_OPT_REASSOC_SHIFT);
+
+ return transform_node(irn);
+} /* transform_node_shift */
+
+/**
+ * normalisation: (x & c1) >> c2 to (x >> c2) & (c1 >> c2)
+ * (we can use:
+ * - and, or, xor instead of &
+ * - Shl, Shr, Shrs, rotl instead of >>
+ * (with a special case for Or/Xor + Shrs)
+ */
+static ir_node *transform_node_bitop_shift(ir_node *n) {
+ ir_node *left;
+ ir_node *right = get_binop_right(n);
+ ir_mode *mode = get_irn_mode(n);
+ ir_node *bitop_left;
+ ir_node *bitop_right;
+ ir_op *op_left;
+ ir_graph *irg;
+ ir_node *block;
+ dbg_info *dbgi;
+ ir_node *new_shift;
+ ir_node *new_bitop;
+ ir_node *new_const;
+ tarval *tv1;
+ tarval *tv2;
+ tarval *tv_shift;
+
+ assert(is_Shrs(n) || is_Shr(n) || is_Shl(n) || is_Rotl(n));
+
+ if (!is_Const(right))
+ return n;
+
+ left = get_binop_left(n);
+ op_left = get_irn_op(left);
+ if (op_left != op_And && op_left != op_Or && op_left != op_Eor)
+ return n;
+
+ /* doing it with Shrs is not legal if the Or/Eor affects the topmost bit */
+ if (is_Shrs(n) && (op_left == op_Or || op_left == op_Eor)) {
+ /* TODO: test if sign bit is affectes */
+ return n;
+ }
+
+ bitop_right = get_binop_right(left);
+ if (!is_Const(bitop_right))
+ return n;
+
+ bitop_left = get_binop_left(left);
+
+ irg = get_irn_irg(n);
+ block = get_nodes_block(n);
+ dbgi = get_irn_dbg_info(n);
+ tv1 = get_Const_tarval(bitop_right);
+ tv2 = get_Const_tarval(right);
+
+ assert(get_tarval_mode(tv1) == mode);
+
+ if (is_Shl(n)) {
+ new_shift = new_rd_Shl(dbgi, irg, block, bitop_left, right, mode);
+ tv_shift = tarval_shl(tv1, tv2);
+ } else if(is_Shr(n)) {
+ new_shift = new_rd_Shr(dbgi, irg, block, bitop_left, right, mode);
+ tv_shift = tarval_shr(tv1, tv2);
+ } else if(is_Shrs(n)) {
+ new_shift = new_rd_Shrs(dbgi, irg, block, bitop_left, right, mode);
+ tv_shift = tarval_shrs(tv1, tv2);
+ } else {
+ assert(is_Rotl(n));
+ new_shift = new_rd_Rotl(dbgi, irg, block, bitop_left, right, mode);
+ tv_shift = tarval_rotl(tv1, tv2);
+ }
+
+ assert(get_tarval_mode(tv_shift) == mode);
+ new_const = new_Const(mode, tv_shift);
+
+ if (op_left == op_And) {
+ new_bitop = new_rd_And(dbgi, irg, block, new_shift, new_const, mode);
+ } else if(op_left == op_Or) {
+ new_bitop = new_rd_Or(dbgi, irg, block, new_shift, new_const, mode);
+ } else {
+ assert(op_left == op_Eor);
+ new_bitop = new_rd_Eor(dbgi, irg, block, new_shift, new_const, mode);
+ }
+
+ return new_bitop;
+}
+
+/**
+ * normalisation:
+ * (x << c1) >> c2 <=> x>>(c2-c1) & (-1>>c2)
+ * also:
+ * if c2 > c1: x << (c2-c1)
+ * (x >>s c2) << c1 <=> x>>s(c2-c1) & (-1>>c2)
+ */
+static ir_node *transform_node_shl_shr(ir_node *n) {
+ ir_node *left;
+ ir_node *right = get_binop_right(n);
+ ir_node *x;
+ ir_graph *irg;
+ ir_node *block;
+ ir_mode *mode;
+ dbg_info *dbgi;
+ ir_node *new_const;
+ ir_node *new_shift;
+ ir_node *new_and;
+ tarval *tv_shl;
+ tarval *tv_shr;
+ tarval *tv_shift;
+ tarval *tv_mask;
+ pn_Cmp pnc;
+ int need_shrs = 0;
+
+ assert(is_Shl(n) || is_Shr(n) || is_Shrs(n));
+
+ if (!is_Const(right))
+ return n;
+
+ left = get_binop_left(n);
mode = get_irn_mode(n);
+ if (is_Shl(n) && (is_Shr(left) || is_Shrs(left))) {
+ ir_node *shr_right = get_binop_right(left);
- flag = 0;
+ if (!is_Const(shr_right))
+ return n;
- modulo_shf = get_mode_modulo_shift(mode);
- if (modulo_shf > 0) {
- tarval *modulo = new_tarval_from_long(modulo_shf, get_tarval_mode(res));
+ x = get_binop_left(left);
+ tv_shr = get_Const_tarval(shr_right);
+ tv_shl = get_Const_tarval(right);
- if (tarval_cmp(res, modulo) & pn_Cmp_Lt)
- flag = 1;
- } else
- flag = 1;
+ if (is_Shrs(left)) {
+ /* shrs variant only allowed if c1 >= c2 */
+ if (! (tarval_cmp(tv_shl, tv_shr) & pn_Cmp_Ge))
+ return n;
- if (flag) {
- /* ok, we can replace it */
- ir_node *in[2], *irn, *block = get_irn_n(n, -1);
+ tv_mask = tarval_shrs(get_mode_all_one(mode), tv_shr);
+ need_shrs = 1;
+ } else {
+ tv_mask = tarval_shr(get_mode_all_one(mode), tv_shr);
+ }
+ tv_mask = tarval_shl(tv_mask, tv_shl);
+ } else if(is_Shr(n) && is_Shl(left)) {
+ ir_node *shl_right = get_Shl_right(left);
+
+ if (!is_Const(shl_right))
+ return n;
- in[0] = get_binop_left(left);
- in[1] = new_r_Const(current_ir_graph, block, get_tarval_mode(res), res);
+ x = get_Shl_left(left);
+ tv_shr = get_Const_tarval(right);
+ tv_shl = get_Const_tarval(shl_right);
- irn = new_ir_node(NULL, current_ir_graph, block, get_irn_op(n), mode, 2, in);
+ tv_mask = tarval_shl(get_mode_all_one(mode), tv_shl);
+ tv_mask = tarval_shr(tv_mask, tv_shr);
+ } else {
+ return n;
+ }
- DBG_OPT_ALGSIM0(n, irn, FS_OPT_REASSOC_SHIFT);
+ assert(get_tarval_mode(tv_shl) == get_tarval_mode(tv_shr));
+ assert(tv_mask != tarval_bad);
+ assert(get_tarval_mode(tv_mask) == mode);
- return transform_node(irn);
+ irg = get_irn_irg(n);
+ block = get_nodes_block(n);
+ dbgi = get_irn_dbg_info(n);
+
+ pnc = tarval_cmp(tv_shl, tv_shr);
+ if (pnc == pn_Cmp_Lt || pnc == pn_Cmp_Eq) {
+ tv_shift = tarval_sub(tv_shr, tv_shl);
+ new_const = new_Const(get_tarval_mode(tv_shift), tv_shift);
+ if (need_shrs) {
+ new_shift = new_rd_Shrs(dbgi, irg, block, x, new_const, mode);
+ } else {
+ new_shift = new_rd_Shr(dbgi, irg, block, x, new_const, mode);
+ }
+ } else {
+ assert(pnc == pn_Cmp_Gt);
+ tv_shift = tarval_sub(tv_shl, tv_shr);
+ new_const = new_Const(get_tarval_mode(tv_shift), tv_shift);
+ new_shift = new_rd_Shl(dbgi, irg, block, x, new_const, mode);
}
- return n;
-} /* transform_node_shift */
+
+ new_const = new_Const(mode, tv_mask);
+ new_and = new_rd_And(dbgi, irg, block, new_shift, new_const, mode);
+
+ return new_and;
+}
/**
* Transform a Shr.
*/
static ir_node *transform_node_Shr(ir_node *n) {
ir_node *c, *oldn = n;
- ir_node *a = get_Shr_left(n);
- ir_node *b = get_Shr_right(n);
- ir_mode *mode = get_irn_mode(n);
+ ir_node *left = get_Shr_left(n);
+ ir_node *right = get_Shr_right(n);
+ ir_mode *mode = get_irn_mode(n);
+
+ HANDLE_BINOP_PHI(tarval_shr, left, right, c, mode);
+ n = transform_node_shift(n);
+
+ if (is_Shr(n))
+ n = transform_node_shl_shr(n);
+ if (is_Shr(n))
+ n = transform_node_bitop_shift(n);
- HANDLE_BINOP_PHI(tarval_shr, a, b, c, mode);
- return transform_node_shift(n);
+ return n;
} /* transform_node_Shr */
/**
ir_mode *mode = get_irn_mode(n);
HANDLE_BINOP_PHI(tarval_shrs, a, b, c, mode);
- return transform_node_shift(n);
+ n = transform_node_shift(n);
+
+ if (is_Shrs(n))
+ n = transform_node_bitop_shift(n);
+
+ return n;
} /* transform_node_Shrs */
/**
ir_mode *mode = get_irn_mode(n);
HANDLE_BINOP_PHI(tarval_shl, a, b, c, mode);
- return transform_node_shift(n);
+ n = transform_node_shift(n);
+
+ if (is_Shl(n))
+ n = transform_node_shl_shr(n);
+ if (is_Shl(n))
+ n = transform_node_bitop_shift(n);
+
+ return n;
} /* transform_node_Shl */
/**
- * Transform a Rot.
+ * Transform a Rotl.
*/
-static ir_node *transform_node_Rot(ir_node *n) {
+static ir_node *transform_node_Rotl(ir_node *n) {
ir_node *c, *oldn = n;
- ir_node *a = get_Rot_left(n);
- ir_node *b = get_Rot_right(n);
+ ir_node *a = get_Rotl_left(n);
+ ir_node *b = get_Rotl_right(n);
ir_mode *mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_rot, a, b, c, mode);
- return transform_node_shift(n);
-} /* transform_node_Rot */
+ HANDLE_BINOP_PHI(tarval_rotl, a, b, c, mode);
+ n = transform_node_shift(n);
+
+ if (is_Rotl(n))
+ n = transform_node_bitop_shift(n);
+
+ return n;
+} /* transform_node_Rotl */
/**
* Transform a Conv.
CASE(Shr);
CASE(Shrs);
CASE(Shl);
- CASE(Rot);
+ CASE(Rotl);
CASE(Conv);
CASE(End);
CASE(Mux);
/*
* Calculate a hash value of a node.
*/
-unsigned ir_node_hash(ir_node *node) {
+unsigned ir_node_hash(const ir_node *node) {
unsigned h;
int i, irn_arity;
}
} /* update_value_table */
-/**
- * Return the canonical node computing the same value as n.
- *
- * @param value_table The value table
- * @param n The node to lookup
- *
- * Looks up the node in a hash table.
- *
- * For Const nodes this is performed in the constructor, too. Const
- * nodes are extremely time critical because of their frequent use in
- * constant string arrays.
- */
-static INLINE ir_node *identify(pset *value_table, ir_node *n) {
- ir_node *o = NULL;
-
- if (!value_table) return n;
-
- normalize_node(n);
-
- o = pset_find(value_table, n, ir_node_hash(n));
- if (o == NULL)
- return n;
-
- update_known_irn(o, n);
- DBG_OPT_CSE(n, o);
-
- return o;
-} /* identify */
-
-/**
- * During construction we set the op_pin_state_pinned flag in the graph right when the
- * optimization is performed. The flag turning on procedure global cse could
- * be changed between two allocations. This way we are safe.
- *
- * @param value_table The value table
- * @param n The node to lookup
- */
-static INLINE ir_node *identify_cons(pset *value_table, ir_node *n) {
- ir_node *old = n;
-
- n = identify(value_table, n);
- if (n != old && get_irn_MacroBlock(old) != get_irn_MacroBlock(n))
- set_irg_pinned(current_ir_graph, op_pin_state_floats);
- return n;
-} /* identify_cons */
-
/*
* Return the canonical node computing the same value as n.
* Looks up the node in a hash table, enters it in the table
return o;
} /* identify_remember */
+/**
+ * During construction we set the op_pin_state_pinned flag in the graph right when the
+ * optimization is performed. The flag turning on procedure global cse could
+ * be changed between two allocations. This way we are safe.
+ *
+ * @param value_table The value table
+ * @param n The node to lookup
+ */
+static INLINE ir_node *identify_cons(pset *value_table, ir_node *n) {
+ ir_node *old = n;
+
+ n = identify_remember(value_table, n);
+ if (n != old && get_irn_MacroBlock(old) != get_irn_MacroBlock(n))
+ set_irg_pinned(current_ir_graph, op_pin_state_floats);
+ return n;
+} /* identify_cons */
+
/* Add a node to the identities value table. */
void add_identities(pset *value_table, ir_node *node) {
if (get_opt_cse() && is_no_Block(node))
}
/* remove unnecessary nodes */
- if (get_opt_constant_folding() ||
+ if (get_opt_algebraic_simplification() ||
(iro == iro_Phi) || /* always optimize these nodes. */
(iro == iro_Id) ||
(iro == iro_Proj) ||
/* Some more constant expression evaluation that does not allow to
free the node. */
iro = get_irn_opcode(n);
- if (get_opt_constant_folding() ||
+ if (get_opt_algebraic_simplification() ||
(iro == iro_Cond) ||
(iro == iro_Proj)) /* Flags tested local. */
n = transform_node(n);
/* Remove nodes with dead (Bad) input.
Run always for transformation induced Bads. */
- n = gigo (n);
+ n = gigo(n);
/* Now we have a legal, useful node. Enter it in hash table for CSE */
if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
now all nodes are op_pin_state_pinned to blocks, i.e., the cse only finds common
subexpressions within a block. */
if (get_opt_cse()) {
- n = identify(current_ir_graph->value_table, n);
+ n = identify_remember(current_ir_graph->value_table, n);
}
/* Some more constant expression evaluation. */