static ir_node *equivalent_node_Block(ir_node *n)
{
ir_node *oldn = n;
- int n_preds = get_Block_n_cfgpreds(n);
+ int n_preds;
- /* The Block constructor does not call optimize, but mature_immBlock
- calls the optimization. */
+ /* don't optimize dead blocks */
+ if (is_Block_dead(n))
+ return n;
+
+ n_preds = get_Block_n_cfgpreds(n);
+
+ /* The Block constructor does not call optimize, but mature_immBlock()
+ calls the optimization. */
assert(get_Block_matured(n));
/* Straightening: a single entry Block following a single exit Block
/**
* Eor is commutative and has neutral 0.
*/
-#define equivalent_node_Eor equivalent_node_neutral_zero
+static ir_node *equivalent_node_Eor(ir_node *n)
+{
+ ir_node *oldn = n;
+ ir_node *a;
+ ir_node *b;
+
+ n = equivalent_node_neutral_zero(n);
+ if (n != oldn) return n;
+
+ a = get_Eor_left(n);
+ b = get_Eor_right(n);
+
+ if (is_Eor(a)) {
+ ir_node *aa = get_Eor_left(a);
+ ir_node *ab = get_Eor_right(a);
+
+ if (aa == b) {
+ /* (a ^ b) ^ a -> b */
+ n = ab;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A);
+ return n;
+ } else if (ab == b) {
+ /* (a ^ b) ^ b -> a */
+ n = aa;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A);
+ return n;
+ }
+ }
+ if (is_Eor(b)) {
+ ir_node *ba = get_Eor_left(b);
+ ir_node *bb = get_Eor_right(b);
+
+ if (ba == a) {
+ /* a ^ (a ^ b) -> b */
+ n = bb;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A);
+ return n;
+ } else if (bb == a) {
+ /* a ^ (b ^ a) -> b */
+ n = ba;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_EOR_A_B_A);
+ return n;
+ }
+ }
+
+ return n;
+}
/*
* Optimize a - 0 and (a - x) + x (for modes with wrap-around).
* themselves.
*/
static ir_node *equivalent_node_Sync(ir_node *n) {
- int i, n_preds;
-
- ir_node *oldn = n;
- ir_node *first_val = NULL; /* to shutup gcc */
-
- if (!get_opt_normalize()) return n;
+ int arity = get_Sync_n_preds(n);
+ int i;
- n_preds = get_Sync_n_preds(n);
+ for (i = 0; i < arity;) {
+ ir_node *pred = get_Sync_pred(n, i);
+ int j;
- /* Find first non-self-referencing input */
- for (i = 0; i < n_preds; ++i) {
- first_val = get_Sync_pred(n, i);
- if ((first_val != n) /* not self pointer */ &&
- (! is_Bad(first_val))
- ) { /* value not dead */
- break; /* then found first value. */
+ /* Remove Bad predecessors */
+ if (is_Bad(pred)) {
+ del_Sync_n(n, i);
+ --arity;
+ continue;
}
- }
-
- if (i >= n_preds)
- /* A totally Bad or self-referencing Sync (we didn't break the above loop) */
- return new_Bad();
- /* search the rest of inputs, determine if any of these
- are non-self-referencing */
- while (++i < n_preds) {
- ir_node *scnd_val = get_Sync_pred(n, i);
- if ((scnd_val != n) &&
- (scnd_val != first_val) &&
- (! is_Bad(scnd_val))
- )
- break;
+ /* Remove duplicate predecessors */
+ for (j = 0;; ++j) {
+ if (j >= i) {
+ ++i;
+ break;
+ }
+ if (get_Sync_pred(n, j) == pred) {
+ del_Sync_n(n, i);
+ --arity;
+ break;
+ }
+ }
}
- if (i >= n_preds) {
- /* Fold, if no multiple distinct non-self-referencing inputs */
- n = first_val;
- DBG_OPT_SYNC(oldn, n);
- }
+ if (arity == 0) return new_Bad();
+ if (arity == 1) return get_Sync_pred(n, 0);
return n;
} /* equivalent_node_Sync */
if (is_Const(b)) {
tarval *tv = get_Const_tarval(b);
+ int rem;
+ /*
+ * Floating point constant folding might be disabled here to
+ * prevent rounding.
+ * However, as we check for exact result, doing it is safe.
+ * Switch it on.
+ */
+ rem = tarval_enable_fp_ops(1);
tv = tarval_quo(get_mode_one(mode), tv);
+ (void)tarval_enable_fp_ops(rem);
/* Do the transformation if the result is either exact or we are not
using strict rules. */
/*
* We can replace the Abs by -x here.
- * We even could add a new Confirm here.
+ * We even could add a new Confirm here
+ * (if not twos complement)
*
* Note that -x would create a new node, so we could
* not run it in the equivalent_node() context.
break;
}
- /* remove Casts */
- if (is_Cast(left))
- left = get_Cast_op(left);
- if (is_Cast(right))
- right = get_Cast_op(right);
+ /* remove Casts of both sides */
+ left = skip_Cast(left);
+ right = skip_Cast(right);
/* Remove unnecessary conversions */
/* TODO handle constants */
}
}
- /* remove operation of both sides if possible */
+ /* remove operation on both sides if possible */
if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
/*
* The following operations are NOT safe for floating point operations, for instance
/** returns 1 if a == -b */
static int is_negated_value(ir_node *a, ir_node *b) {
- if(is_Minus(a) && get_Minus_op(a) == b)
+ if (is_Minus(a) && get_Minus_op(a) == b)
return 1;
- if(is_Minus(b) && get_Minus_op(b) == a)
+ if (is_Minus(b) && get_Minus_op(b) == a)
return 1;
- if(is_Sub(a) && is_Sub(b)) {
+ if (is_Sub(a) && is_Sub(b)) {
ir_node *a_left = get_Sub_left(a);
ir_node *a_right = get_Sub_right(a);
ir_node *b_left = get_Sub_left(b);
ir_node *b_right = get_Sub_right(b);
- if(a_left == b_right && a_right == b_left)
+ if (a_left == b_right && a_right == b_left)
return 1;
}
tarval *tv_t = get_Const_tarval(t);
if (tv_t == tarval_b_true) {
if (is_Const(f)) {
+ /* Muxb(sel, true, false) = sel */
assert(get_Const_tarval(f) == tarval_b_false);
+ DBG_OPT_ALGSIM0(oldn, sel, FS_OPT_MUX_BOOL);
return sel;
} else {
- return new_rd_Or(dbg, irg, block, sel, f, mode_b);
+ /* Muxb(sel, true, x) = Or(sel, x) */
+ n = new_rd_Or(dbg, irg, block, sel, f, mode_b);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_OR_BOOL);
+ return n;
}
} else {
ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b);
assert(tv_t == tarval_b_false);
if (is_Const(f)) {
+ /* Muxb(sel, false, true) = Not(sel) */
assert(get_Const_tarval(f) == tarval_b_true);
+ DBG_OPT_ALGSIM0(oldn, not_sel, FS_OPT_MUX_NOT_BOOL);
return not_sel;
} else {
- return new_rd_And(dbg, irg, block, not_sel, f, mode_b);
+ /* Muxb(sel, false, x) = And(Not(sel), x) */
+ n = new_rd_And(dbg, irg, block, not_sel, f, mode_b);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_ANDNOT_BOOL);
+ return n;
}
}
} else if (is_Const(f)) {
tarval *tv_f = get_Const_tarval(f);
if (tv_f == tarval_b_true) {
+ /* Muxb(sel, x, true) = Or(Not(sel), x) */
ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b);
- return new_rd_Or(dbg, irg, block, not_sel, t, mode_b);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_ORNOT_BOOL);
+ n = new_rd_Or(dbg, irg, block, not_sel, t, mode_b);
+ return n;
} else {
+ /* Muxb(sel, x, false) = And(sel, x) */
assert(tv_f == tarval_b_false);
- return new_rd_And(dbg, irg, block, sel, t, mode_b);
+ n = new_rd_And(dbg, irg, block, sel, t, mode_b);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_AND_BOOL);
+ return n;
}
}
}
if (is_Const(cmp_r) && is_Const_null(cmp_r)) {
ir_node *block = get_irn_n(n, -1);
- if(is_negated_value(f, t)) {
+ if (is_negated_value(f, t)) {
ir_node *cmp_left = get_Cmp_left(cmp);
/* Psi(a >= 0, a, -a) = Psi(a <= 0, -a, a) ==> Abs(a) */
* of the other sync to our own inputs
*/
static ir_node *transform_node_Sync(ir_node *n) {
- int i, arity;
+ int arity = get_Sync_n_preds(n);
+ int i;
+
+ for (i = 0; i < arity;) {
+ ir_node *pred = get_Sync_pred(n, i);
+ int pred_arity;
+ int j;
- arity = get_irn_arity(n);
- for(i = 0; i < get_irn_arity(n); /*empty*/) {
- int i2, arity2;
- ir_node *in = get_irn_n(n, i);
- if(!is_Sync(in)) {
+ if (!is_Sync(pred)) {
++i;
continue;
}
- /* set sync input 0 instead of the sync */
- set_irn_n(n, i, get_irn_n(in, 0));
- /* so we check this input again for syncs */
-
- /* append all other inputs of the sync to our sync */
- arity2 = get_irn_arity(in);
- for(i2 = 1; i2 < arity2; ++i2) {
- ir_node *in_in = get_irn_n(in, i2);
- add_irn_n(n, in_in);
- /* increase arity so we also check the new inputs for syncs */
- arity++;
+ del_Sync_n(n, i);
+ --arity;
+
+ pred_arity = get_Sync_n_preds(pred);
+ for (j = 0; j < pred_arity; ++j) {
+ ir_node *pred_pred = get_Sync_pred(pred, j);
+ int k;
+
+ for (k = 0;; ++k) {
+ if (k >= arity) {
+ add_irn_n(n, pred_pred);
+ ++arity;
+ break;
+ }
+ if (get_Sync_pred(n, k) == pred_pred) break;
+ }
}
}
static int node_cmp_attr_SymConst(ir_node *a, ir_node *b) {
const symconst_attr *pa = get_irn_symconst_attr(a);
const symconst_attr *pb = get_irn_symconst_attr(b);
- return (pa->num != pb->num)
+ return (pa->kind != pb->kind)
|| (pa->sym.type_p != pb->sym.type_p)
|| (pa->tp != pb->tp);
} /* node_cmp_attr_SymConst */