} /* computed_value_Shrs */
/**
- * Return the value of a Rot.
+ * Return the value of a Rotl.
*/
-static tarval *computed_value_Rot(ir_node *n) {
- ir_node *a = get_Rot_left(n);
- ir_node *b = get_Rot_right(n);
+static tarval *computed_value_Rotl(ir_node *n) {
+ ir_node *a = get_Rotl_left(n);
+ ir_node *b = get_Rotl_right(n);
tarval *ta = value_of(a);
tarval *tb = value_of(b);
if ((ta != tarval_bad) && (tb != tarval_bad)) {
- return tarval_rot (ta, tb);
+ return tarval_rotl(ta, tb);
}
return tarval_bad;
-} /* computed_value_Rot */
+} /* computed_value_Rotl */
/**
* Return the value of a Conv.
* if it has the form Confirm(x, '=', Const).
*/
static tarval *computed_value_Confirm(ir_node *n) {
- return get_Confirm_cmp(n) == pn_Cmp_Eq ?
- value_of(get_Confirm_bound(n)) : tarval_bad;
+ /*
+ * Beware: we might produce Phi(Confirm(x == true), Confirm(x == false)).
+ * Do NOT optimize them away (CondEval wants them), so wait until
+ * remove_confirm is activated.
+ */
+ if (get_opt_remove_confirm()) {
+ return get_Confirm_cmp(n) == pn_Cmp_Eq ?
+ value_of(get_Confirm_bound(n)) : tarval_bad;
+ }
+ return tarval_bad;
} /* computed_value_Confirm */
/**
CASE(Shl);
CASE(Shr);
CASE(Shrs);
- CASE(Rot);
+ CASE(Rotl);
CASE(Carry);
CASE(Borrow);
CASE(Conv);
/* (a ^ b) ^ a -> b */
n = ab;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A);
+ return n;
} else if (ab == b) {
/* (a ^ b) ^ b -> a */
n = aa;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A);
+ return n;
}
}
if (is_Eor(b)) {
/* a ^ (a ^ b) -> b */
n = bb;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A);
+ return n;
} else if (bb == a) {
/* a ^ (b ^ a) -> b */
n = ba;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A);
+ return n;
}
}
ir_node *left, *right;
ir_mode *mode = get_irn_mode(n);
- /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
- if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
- return n;
-
n = equivalent_node_neutral_zero(n);
if (n != oldn)
return n;
+ /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
+ if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
+ return n;
+
left = get_Add_left(n);
right = get_Add_right(n);
#define equivalent_node_Shl equivalent_node_left_zero
#define equivalent_node_Shr equivalent_node_left_zero
#define equivalent_node_Shrs equivalent_node_left_zero
-#define equivalent_node_Rot equivalent_node_left_zero
+#define equivalent_node_Rotl equivalent_node_left_zero
/**
* Optimize a - 0 and (a + x) - x (for modes with wrap-around).
ir_mode *n_mode = get_irn_mode(n);
ir_mode *a_mode = get_irn_mode(a);
+restart:
if (n_mode == a_mode) { /* No Conv necessary */
if (get_Conv_strict(n)) {
/* special case: the predecessor might be a also a Conv */
if (is_Conv(a)) {
if (! get_Conv_strict(a)) {
/* first one is not strict, kick it */
- set_Conv_op(n, get_Conv_op(a));
- return n;
+ a = get_Conv_op(a);
+ a_mode = get_irn_mode(a);
+ set_Conv_op(n, a);
+ goto restart;
}
- /* else both are strict conv, second is superflous */
- } else if(is_Proj(a)) {
+ /* else both are strict conv, second is superfluous */
+ } else if (is_Proj(a)) {
ir_node *pred = get_Proj_pred(a);
- if(is_Load(pred)) {
+ if (is_Load(pred)) {
/* loads always return with the exact precision of n_mode */
assert(get_Load_mode(pred) == n_mode);
return a;
ir_node *b = get_Conv_op(a);
ir_mode *b_mode = get_irn_mode(b);
+ if (get_Conv_strict(n) && get_Conv_strict(a)) {
+ /* both are strict conv */
+ if (smaller_mode(a_mode, n_mode)) {
+ /* both are strict, but the first is smaller, so
+ the second cannot remove more precision, remove the
+ strict bit */
+ set_Conv_strict(n, 0);
+ }
+ }
if (n_mode == b_mode) {
- if (n_mode == mode_b) {
- n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
- } else if (mode_is_int(n_mode)) {
- if (get_mode_size_bits(b_mode) <= get_mode_size_bits(a_mode)) {
- n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
+ if (! get_Conv_strict(n) && ! get_Conv_strict(a)) {
+ if (n_mode == mode_b) {
+ n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
+ } else if (get_mode_arithmetic(n_mode) == get_mode_arithmetic(a_mode)) {
+ if (smaller_mode(b_mode, a_mode)) {
+ n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
+ }
+ }
+ }
+ if (is_Conv(b)) {
+ if (smaller_mode(b_mode, a_mode)) {
+ if (get_Conv_strict(n))
+ set_Conv_strict(b, 1);
+ n = b; /* ConvA(ConvB(ConvA(...))) == ConvA(...) */
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
}
}
for (i = 0; i < n_preds; ++i) {
first_val = get_Phi_pred(n, i);
if ( (first_val != n) /* not self pointer */
-#if 1
+#if 0
+ /* BEWARE: when the if is changed to 1, Phi's will ignore it's Bad
+ * predecessors. Then, Phi nodes in dead code might be removed, causing
+ * nodes pointing to themself (Add's for instance).
+ * This is really bad and causes endless recursions in several
+ * code pathes, so we do NOT optimize such a code.
+ * This is not that bad as it sounds, optimize_cf() removes bad control flow
+ * (and bad Phi predecessors), so live code is optimized later.
+ */
&& (! is_Bad(first_val))
#endif
) { /* value not dead */
ir_node *scnd_val = get_Phi_pred(n, i);
if ( (scnd_val != n)
&& (scnd_val != first_val)
-#if 1
+#if 0
+ /* see above */
&& (! is_Bad(scnd_val))
#endif
) {
*/
n = pred;
}
- if (pnc == pn_Cmp_Eq) {
- ir_node *bound = get_Confirm_bound(n);
-
- /*
- * Optimize a rare case:
- * Confirm(x, '=', Constlike) ==> Constlike
- */
- if (is_irn_constlike(bound)) {
- DBG_OPT_CONFIRM(n, bound);
- return bound;
- }
- }
- return get_opt_remove_confirm() ? get_Confirm_value(n) : n;
+ if (get_opt_remove_confirm())
+ return get_Confirm_value(n);
+ return n;
}
/**
CASE(Shl);
CASE(Shr);
CASE(Shrs);
- CASE(Rot);
+ CASE(Rotl);
CASE(Not);
CASE(Minus);
CASE(Mul);
}
}
}
+
return n;
} /* transform_node_AddSub */
b = get_Add_right(n);
mode = get_irn_mode(n);
+
+ if (mode_is_reference(mode)) {
+ ir_mode *lmode = get_irn_mode(a);
+
+ if (is_Const(b) && is_Const_null(b) && mode_is_int(lmode)) {
+ /* an Add(a, NULL) is a hidden Conv */
+ dbg_info *dbg = get_irn_dbg_info(n);
+ return new_rd_Conv(dbg, current_ir_graph, get_nodes_block(n), a, mode);
+ }
+ }
+
HANDLE_BINOP_PHI(tarval_add, a, b, c, mode);
/* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
if (mode_is_num(mode)) {
/* the following code leads to endless recursion when Mul are replaced by a simple instruction chain */
if (!is_arch_dep_running() && a == b && mode_is_int(mode)) {
- ir_node *block = get_irn_n(n, -1);
+ ir_node *block = get_nodes_block(n);
n = new_rd_Mul(
get_irn_dbg_info(n),
mode = get_irn_mode(n);
+ if (mode_is_int(mode)) {
+ ir_mode *lmode = get_irn_mode(a);
+
+ if (is_Const(b) && is_Const_null(b) && mode_is_reference(lmode)) {
+ /* a Sub(a, NULL) is a hidden Conv */
+ dbg_info *dbg = get_irn_dbg_info(n);
+ return new_rd_Conv(dbg, current_ir_graph, get_nodes_block(n), a, mode);
+ }
+ }
+
restart:
HANDLE_BINOP_PHI(tarval_sub, a, b, c, mode);
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
}
break;
- case iro_Rot:
- if (get_Rot_right(left) == get_Rot_right(right)) {
- /* a ROT X CMP b ROT X ==> a CMP b */
- left = get_Rot_left(left);
- right = get_Rot_left(right);
+ case iro_Rotl:
+ if (get_Rotl_right(left) == get_Rotl_right(right)) {
+ /* a ROTL X CMP b ROTL X ==> a CMP b */
+ left = get_Rotl_left(left);
+ right = get_Rotl_left(right);
changed |= 1;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
}
} /* transform_node_Or_bf_store */
/**
- * Optimize an Or(shl(x, c), shr(x, bits - c)) into a Rot
+ * Optimize an Or(shl(x, c), shr(x, bits - c)) into a Rotl
*/
-static ir_node *transform_node_Or_Rot(ir_node *or) {
+static ir_node *transform_node_Or_Rotl(ir_node *or) {
ir_mode *mode = get_irn_mode(or);
ir_node *shl, *shr, *block;
ir_node *irn, *x, *c1, *c2, *v, *sub, *n;
/* yet, condition met */
block = get_irn_n(or, -1);
- n = new_r_Rot(current_ir_graph, block, x, c1, mode);
+ n = new_r_Rotl(current_ir_graph, block, x, c1, mode);
- DBG_OPT_ALGSIM1(or, shl, shr, n, FS_OPT_OR_SHFT_TO_ROT);
+ DBG_OPT_ALGSIM1(or, shl, shr, n, FS_OPT_OR_SHFT_TO_ROTL);
return n;
} else if (is_Sub(c1)) {
v = c2;
block = get_nodes_block(or);
/* a Rot right is not supported, so use a rot left */
- n = new_r_Rot(current_ir_graph, block, x, sub, mode);
+ n = new_r_Rotl(current_ir_graph, block, x, sub, mode);
- DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROT);
+ DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROTL);
return n;
} else if (is_Sub(c2)) {
v = c1;
block = get_irn_n(or, -1);
/* a Rot Left */
- n = new_r_Rot(current_ir_graph, block, x, v, mode);
+ n = new_r_Rotl(current_ir_graph, block, x, v, mode);
- DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROT);
+ DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROTL);
return n;
}
return or;
-} /* transform_node_Or_Rot */
+} /* transform_node_Or_Rotl */
/**
* Transform an Or.
HANDLE_BINOP_PHI(tarval_or, a, b, c, mode);
n = transform_node_Or_bf_store(n);
- n = transform_node_Or_Rot(n);
+ n = transform_node_Or_Rotl(n);
if (n != oldn)
return n;
static ir_node *transform_node(ir_node *n);
/**
- * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl, Rot.
+ * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl, Rotl.
*
* Should be moved to reassociation?
*/
} /* transform_node_Shl */
/**
- * Transform a Rot.
+ * Transform a Rotl.
*/
-static ir_node *transform_node_Rot(ir_node *n) {
+static ir_node *transform_node_Rotl(ir_node *n) {
ir_node *c, *oldn = n;
- ir_node *a = get_Rot_left(n);
- ir_node *b = get_Rot_right(n);
+ ir_node *a = get_Rotl_left(n);
+ ir_node *b = get_Rotl_right(n);
ir_mode *mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_rot, a, b, c, mode);
+ HANDLE_BINOP_PHI(tarval_rotl, a, b, c, mode);
return transform_node_shift(n);
-} /* transform_node_Rot */
+} /* transform_node_Rotl */
/**
* Transform a Conv.
static ir_node *transform_node_Mux(ir_node *n) {
ir_node *oldn = n, *sel = get_Mux_sel(n);
ir_mode *mode = get_irn_mode(n);
+ ir_node *t = get_Mux_true(n);
+ ir_node *f = get_Mux_false(n);
+ ir_graph *irg = current_ir_graph;
+ ir_node *conds[1], *vals[2];
+ /* first normalization step: move a possible zero to the false case */
+ if (is_Proj(sel)) {
+ ir_node *cmp = get_Proj_pred(sel);
+
+ if (is_Cmp(cmp)) {
+ if (is_Const(t) && is_Const_null(t)) {
+ /* Psi(x, 0, y) => Psi(x, y, 0) */
+ pn_Cmp pnc = get_Proj_proj(sel);
+ sel = new_r_Proj(irg, get_nodes_block(cmp), cmp, mode_b,
+ get_negated_pnc(pnc, get_irn_mode(get_Cmp_left(cmp))));
+ conds[0] = sel;
+ vals[0] = f;
+ vals[1] = t;
+ n = new_rd_Psi(get_irn_dbg_info(n), irg, get_nodes_block(n), 1, conds, vals, mode);
+ t = vals[0];
+ f = vals[1];
+ }
+ }
+ }
+
+ /* note: after normalization, false can only happen on default */
if (mode == mode_b) {
- ir_node *t = get_Mux_true(n);
- ir_node *f = get_Mux_false(n);
dbg_info *dbg = get_irn_dbg_info(n);
- ir_node *block = get_irn_n(n, -1);
+ ir_node *block = get_nodes_block(n);
ir_graph *irg = current_ir_graph;
if (is_Const(t)) {
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_OR_BOOL);
return n;
}
- } else {
- ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b);
- assert(tv_t == tarval_b_false);
- if (is_Const(f)) {
- /* Muxb(sel, false, true) = Not(sel) */
- assert(get_Const_tarval(f) == tarval_b_true);
- DBG_OPT_ALGSIM0(oldn, not_sel, FS_OPT_MUX_NOT_BOOL);
- return not_sel;
- } else {
- /* Muxb(sel, false, x) = And(Not(sel), x) */
- n = new_rd_And(dbg, irg, block, not_sel, f, mode_b);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_ANDNOT_BOOL);
- return n;
- }
}
} else if (is_Const(f)) {
tarval *tv_f = get_Const_tarval(f);
}
}
- if (is_Proj(sel) && !mode_honor_signed_zeros(mode)) {
+ /* more normalization: try to normalize Mux(x, C1, C2) into Mux(x, +1/-1, 0) op C2 */
+ if (is_Const(t) && is_Const(f) && mode_is_int(mode)) {
+ tarval *a = get_Const_tarval(t);
+ tarval *b = get_Const_tarval(f);
+ tarval *null = get_tarval_null(mode);
+ tarval *diff, *min;
+
+ if (tarval_cmp(a, b) & pn_Cmp_Gt) {
+ diff = tarval_sub(a, b);
+ min = b;
+ } else {
+ diff = tarval_sub(b, a);
+ min = a;
+ }
+
+ if (diff == get_tarval_one(mode) && min != null) {
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_graph *irg = current_ir_graph;
+
+
+ conds[0] = sel;
+ vals[0] = new_Const(mode, tarval_sub(a, min));
+ vals[1] = new_Const(mode, tarval_sub(b, min));
+ n = new_rd_Psi(dbg, irg, block, 1, conds, vals, mode);
+ n = new_rd_Add(dbg, irg, block, n, new_Const(mode, min), mode);
+ return n;
+ }
+ }
+
+ if (is_Proj(sel)) {
ir_node *cmp = get_Proj_pred(sel);
long pn = get_Proj_proj(sel);
- ir_node *f = get_Mux_false(n);
- ir_node *t = get_Mux_true(n);
/*
* Note: normalization puts the constant on the right side,
*
* Note further that these optimization work even for floating point
* with NaN's because -NaN == NaN.
- * However, if +0 and -0 is handled differently, we cannot use the first
- * one.
+ * However, if +0 and -0 is handled differently, we cannot use the Abs/-Abs
+ * transformations.
*/
if (is_Cmp(cmp)) {
ir_node *cmp_r = get_Cmp_right(cmp);
if (is_Const(cmp_r) && is_Const_null(cmp_r)) {
- ir_node *block = get_irn_n(n, -1);
+ ir_node *block = get_nodes_block(n);
+ ir_node *cmp_l = get_Cmp_left(cmp);
- if (is_negated_value(f, t)) {
- ir_node *cmp_left = get_Cmp_left(cmp);
+ if (!mode_honor_signed_zeros(mode) && is_negated_value(f, t)) {
+ /* f = -t */
- /* Psi(a >= 0, a, -a) = Psi(a <= 0, -a, a) ==> Abs(a) */
- if ( (cmp_left == t && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt))
- || (cmp_left == f && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt)))
+ if ( (cmp_l == t && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt))
+ || (cmp_l == f && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt)))
{
+ /* Psi(a >/>= 0, a, -a) = Psi(a </<= 0, -a, a) ==> Abs(a) */
n = new_rd_Abs(get_irn_dbg_info(n), current_ir_graph, block,
- cmp_left, mode);
+ cmp_l, mode);
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
return n;
- /* Psi(a <= 0, a, -a) = Psi(a >= 0, -a, a) ==> -Abs(a) */
- } else if ((cmp_left == t && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt))
- || (cmp_left == f && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt)))
+ } else if ((cmp_l == t && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt))
+ || (cmp_l == f && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt)))
{
+ /* Psi(a </<= 0, a, -a) = Psi(a >/>= 0, -a, a) ==> -Abs(a) */
n = new_rd_Abs(get_irn_dbg_info(n), current_ir_graph, block,
- cmp_left, mode);
+ cmp_l, mode);
n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph,
block, n, mode);
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
return n;
}
}
+
+ if (mode_is_int(mode)) {
+ /* integer only */
+ if ((pn == pn_Cmp_Lg || pn == pn_Cmp_Eq) && is_And(cmp_l)) {
+ /* Psi((a & b) != 0, c, 0) */
+ ir_node *and_r = get_And_right(cmp_l);
+ ir_node *and_l;
+
+ if (and_r == t && f == cmp_r) {
+ if (is_Const(t) && tarval_is_single_bit(get_Const_tarval(t))) {
+ if (pn == pn_Cmp_Lg) {
+ /* Psi((a & 2^C) != 0, 2^C, 0) */
+ n = cmp_l;
+ } else {
+ /* Psi((a & 2^C) == 0, 2^C, 0) */
+ n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph,
+ block, cmp_l, t, mode);
+ }
+ return n;
+ }
+ }
+ if (is_Shl(and_r)) {
+ ir_node *shl_l = get_Shl_left(and_r);
+ if (is_Const(shl_l) && is_Const_one(shl_l)) {
+ if (and_r == t && f == cmp_r) {
+ if (pn == pn_Cmp_Lg) {
+ /* (a & (1 << n)) != 0, (1 << n), 0) */
+ n = cmp_l;
+ } else {
+ /* (a & (1 << n)) == 0, (1 << n), 0) */
+ n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph,
+ block, cmp_l, t, mode);
+ }
+ return n;
+ }
+ }
+ }
+ and_l = get_And_left(cmp_l);
+ if (is_Shl(and_l)) {
+ ir_node *shl_l = get_Shl_left(and_l);
+ if (is_Const(shl_l) && is_Const_one(shl_l)) {
+ if (and_l == t && f == cmp_r) {
+ if (pn == pn_Cmp_Lg) {
+ /* ((1 << n) & a) != 0, (1 << n), 0) */
+ n = cmp_l;
+ } else {
+ /* ((1 << n) & a) == 0, (1 << n), 0) */
+ n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph,
+ block, cmp_l, t, mode);
+ }
+ return n;
+ }
+ }
+ }
+ }
+ }
}
}
}
CASE(Shr);
CASE(Shrs);
CASE(Shl);
- CASE(Rot);
+ CASE(Rotl);
CASE(Conv);
CASE(End);
CASE(Mux);
/*
* Calculate a hash value of a node.
*/
-unsigned ir_node_hash(ir_node *node) {
+unsigned ir_node_hash(const ir_node *node) {
unsigned h;
int i, irn_arity;
}
} /* update_value_table */
-/**
- * Return the canonical node computing the same value as n.
- *
- * @param value_table The value table
- * @param n The node to lookup
- *
- * Looks up the node in a hash table.
- *
- * For Const nodes this is performed in the constructor, too. Const
- * nodes are extremely time critical because of their frequent use in
- * constant string arrays.
- */
-static INLINE ir_node *identify(pset *value_table, ir_node *n) {
- ir_node *o = NULL;
-
- if (!value_table) return n;
-
- normalize_node(n);
-
- o = pset_find(value_table, n, ir_node_hash(n));
- if (o == NULL)
- return n;
-
- update_known_irn(o, n);
- DBG_OPT_CSE(n, o);
-
- return o;
-} /* identify */
-
-/**
- * During construction we set the op_pin_state_pinned flag in the graph right when the
- * optimization is performed. The flag turning on procedure global cse could
- * be changed between two allocations. This way we are safe.
- *
- * @param value_table The value table
- * @param n The node to lookup
- */
-static INLINE ir_node *identify_cons(pset *value_table, ir_node *n) {
- ir_node *old = n;
-
- n = identify(value_table, n);
- if (n != old && get_irn_MacroBlock(old) != get_irn_MacroBlock(n))
- set_irg_pinned(current_ir_graph, op_pin_state_floats);
- return n;
-} /* identify_cons */
-
/*
* Return the canonical node computing the same value as n.
* Looks up the node in a hash table, enters it in the table
return o;
} /* identify_remember */
+/**
+ * During construction we set the op_pin_state_pinned flag in the graph right when the
+ * optimization is performed. The flag turning on procedure global cse could
+ * be changed between two allocations. This way we are safe.
+ *
+ * @param value_table The value table
+ * @param n The node to lookup
+ */
+static INLINE ir_node *identify_cons(pset *value_table, ir_node *n) {
+ ir_node *old = n;
+
+ n = identify_remember(value_table, n);
+ if (n != old && get_irn_MacroBlock(old) != get_irn_MacroBlock(n))
+ set_irg_pinned(current_ir_graph, op_pin_state_floats);
+ return n;
+} /* identify_cons */
+
/* Add a node to the identities value table. */
void add_identities(pset *value_table, ir_node *node) {
if (get_opt_cse() && is_no_Block(node))
}
/* remove unnecessary nodes */
- if (get_opt_constant_folding() ||
+ if (get_opt_algebraic_simplification() ||
(iro == iro_Phi) || /* always optimize these nodes. */
(iro == iro_Id) ||
(iro == iro_Proj) ||
/* Some more constant expression evaluation that does not allow to
free the node. */
iro = get_irn_opcode(n);
- if (get_opt_constant_folding() ||
+ if (get_opt_algebraic_simplification() ||
(iro == iro_Cond) ||
(iro == iro_Proj)) /* Flags tested local. */
n = transform_node(n);
/* Remove nodes with dead (Bad) input.
Run always for transformation induced Bads. */
- n = gigo (n);
+ n = gigo(n);
/* Now we have a legal, useful node. Enter it in hash table for CSE */
if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
now all nodes are op_pin_state_pinned to blocks, i.e., the cse only finds common
subexpressions within a block. */
if (get_opt_cse()) {
- n = identify(current_ir_graph->value_table, n);
+ n = identify_remember(current_ir_graph->value_table, n);
}
/* Some more constant expression evaluation. */