static ir_node *equivalent_node_Block(ir_node *n)
{
ir_node *oldn = n;
- int n_preds = get_Block_n_cfgpreds(n);
+ int n_preds;
- /* The Block constructor does not call optimize, but mature_immBlock
- calls the optimization. */
+ /* don't optimize dead blocks */
+ if (is_Block_dead(n))
+ return n;
+
+ n_preds = get_Block_n_cfgpreds(n);
+
+ /* The Block constructor does not call optimize, but mature_immBlock()
+ calls the optimization. */
assert(get_Block_matured(n));
/* Straightening: a single entry Block following a single exit Block
/**
* Eor is commutative and has neutral 0.
*/
-#define equivalent_node_Eor equivalent_node_neutral_zero
+static ir_node *equivalent_node_Eor(ir_node *n)
+{
+ ir_node *oldn = n;
+ ir_node *a;
+ ir_node *b;
+
+ n = equivalent_node_neutral_zero(n);
+ if (n != oldn) return n;
+
+ a = get_Eor_left(n);
+ b = get_Eor_right(n);
+
+ if (is_Eor(a)) {
+ ir_node *aa = get_Eor_left(a);
+ ir_node *ab = get_Eor_right(a);
+
+ if (aa == b) {
+ /* (a ^ b) ^ a -> b */
+ n = ab;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A);
+ return n;
+ } else if (ab == b) {
+ /* (a ^ b) ^ b -> a */
+ n = aa;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A);
+ return n;
+ }
+ }
+ if (is_Eor(b)) {
+ ir_node *ba = get_Eor_left(b);
+ ir_node *bb = get_Eor_right(b);
+
+ if (ba == a) {
+ /* a ^ (a ^ b) -> b */
+ n = bb;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A);
+ return n;
+ } else if (bb == a) {
+ /* a ^ (b ^ a) -> b */
+ n = ba;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A);
+ return n;
+ }
+ }
+
+ return n;
+}
/*
* Optimize a - 0 and (a - x) + x (for modes with wrap-around).
if (is_Const(b)) {
tarval *tv = get_Const_tarval(b);
+ int rem;
+ /*
+ * Floating point constant folding might be disabled here to
+ * prevent rounding.
+ * However, as we check for exact result, doing it is safe.
+ * Switch it on.
+ */
+ rem = tarval_enable_fp_ops(1);
tv = tarval_quo(get_mode_one(mode), tv);
+ (void)tarval_enable_fp_ops(rem);
/* Do the transformation if the result is either exact or we are not
using strict rules. */
break;
}
- /* remove Casts */
- if (is_Cast(left))
- left = get_Cast_op(left);
- if (is_Cast(right))
- right = get_Cast_op(right);
+ /* remove Casts of both sides */
+ left = skip_Cast(left);
+ right = skip_Cast(right);
/* Remove unnecessary conversions */
/* TODO handle constants */
}
}
- /* remove operation of both sides if possible */
+ /* remove operation on both sides if possible */
if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
/*
* The following operations are NOT safe for floating point operations, for instance
/** returns 1 if a == -b */
static int is_negated_value(ir_node *a, ir_node *b) {
- if(is_Minus(a) && get_Minus_op(a) == b)
+ if (is_Minus(a) && get_Minus_op(a) == b)
return 1;
- if(is_Minus(b) && get_Minus_op(b) == a)
+ if (is_Minus(b) && get_Minus_op(b) == a)
return 1;
- if(is_Sub(a) && is_Sub(b)) {
+ if (is_Sub(a) && is_Sub(b)) {
ir_node *a_left = get_Sub_left(a);
ir_node *a_right = get_Sub_right(a);
ir_node *b_left = get_Sub_left(b);
ir_node *b_right = get_Sub_right(b);
- if(a_left == b_right && a_right == b_left)
+ if (a_left == b_right && a_right == b_left)
return 1;
}
tarval *tv_t = get_Const_tarval(t);
if (tv_t == tarval_b_true) {
if (is_Const(f)) {
+ /* Muxb(sel, true, false) = sel */
assert(get_Const_tarval(f) == tarval_b_false);
+ DBG_OPT_ALGSIM0(oldn, sel, FS_OPT_MUX_BOOL);
return sel;
} else {
- return new_rd_Or(dbg, irg, block, sel, f, mode_b);
+ /* Muxb(sel, true, x) = Or(sel, x) */
+ n = new_rd_Or(dbg, irg, block, sel, f, mode_b);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_OR_BOOL);
+ return n;
}
} else {
ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b);
assert(tv_t == tarval_b_false);
if (is_Const(f)) {
+ /* Muxb(sel, false, true) = Not(sel) */
assert(get_Const_tarval(f) == tarval_b_true);
+ DBG_OPT_ALGSIM0(oldn, not_sel, FS_OPT_MUX_NOT_BOOL);
return not_sel;
} else {
- return new_rd_And(dbg, irg, block, not_sel, f, mode_b);
+ /* Muxb(sel, false, x) = And(Not(sel), x) */
+ n = new_rd_And(dbg, irg, block, not_sel, f, mode_b);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_ANDNOT_BOOL);
+ return n;
}
}
} else if (is_Const(f)) {
tarval *tv_f = get_Const_tarval(f);
if (tv_f == tarval_b_true) {
+ /* Muxb(sel, x, true) = Or(Not(sel), x) */
ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b);
- return new_rd_Or(dbg, irg, block, not_sel, t, mode_b);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_ORNOT_BOOL);
+ n = new_rd_Or(dbg, irg, block, not_sel, t, mode_b);
+ return n;
} else {
+ /* Muxb(sel, x, false) = And(sel, x) */
assert(tv_f == tarval_b_false);
- return new_rd_And(dbg, irg, block, sel, t, mode_b);
+ n = new_rd_And(dbg, irg, block, sel, t, mode_b);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_AND_BOOL);
+ return n;
}
}
}
if (is_Const(cmp_r) && is_Const_null(cmp_r)) {
ir_node *block = get_irn_n(n, -1);
- if(is_negated_value(f, t)) {
+ if (is_negated_value(f, t)) {
ir_node *cmp_left = get_Cmp_left(cmp);
/* Psi(a >= 0, a, -a) = Psi(a <= 0, -a, a) ==> Abs(a) */
static int node_cmp_attr_SymConst(ir_node *a, ir_node *b) {
const symconst_attr *pa = get_irn_symconst_attr(a);
const symconst_attr *pb = get_irn_symconst_attr(b);
- return (pa->num != pb->num)
+ return (pa->kind != pb->kind)
|| (pa->sym.type_p != pb->sym.type_p)
|| (pa->tp != pb->tp);
} /* node_cmp_attr_SymConst */