if ((ta != tarval_bad) && (tb != tarval_bad))
return tarval_add(ta, tb);
+ /* x+~x => -1 */
+ if ((is_Not(a) && get_Not_op(a) == b)
+ || (is_Not(b) && get_Not_op(b) == a)) {
+ return get_mode_all_one(get_irn_mode(n));
+ }
+
return tarval_bad;
} /* computed_value_Add */
if ((ta != tarval_bad) && (tb != tarval_bad)) {
return tarval_and (ta, tb);
- } else {
- if (tarval_is_null(ta)) return ta;
- if (tarval_is_null(tb)) return tb;
}
+
+ if (tarval_is_null(ta)) return ta;
+ if (tarval_is_null(tb)) return tb;
+
+ /* x&~x => 0 */
+ if ((is_Not(a) && get_Not_op(a) == b)
+ || (is_Not(b) && get_Not_op(b) == a)) {
+ return get_mode_null(get_irn_mode(n));
+ }
+
return tarval_bad;
} /* computed_value_And */
if ((ta != tarval_bad) && (tb != tarval_bad)) {
return tarval_or (ta, tb);
- } else {
- if (tarval_is_all_one(ta)) return ta;
- if (tarval_is_all_one(tb)) return tb;
+ }
+
+ if (tarval_is_all_one(ta)) return ta;
+ if (tarval_is_all_one(tb)) return tb;
+
+ /* x|~x => -1 */
+ if ((is_Not(a) && get_Not_op(a) == b)
+ || (is_Not(b) && get_Not_op(b) == a)) {
+ return get_mode_all_one(get_irn_mode(n));
}
return tarval_bad;
} /* computed_value_Or */
if (a == b)
return get_mode_null(get_irn_mode(n));
+ /* x^~x => -1 */
+ if ((is_Not(a) && get_Not_op(a) == b)
+ || (is_Not(b) && get_Not_op(b) == a)) {
+ return get_mode_all_one(get_irn_mode(n));
+ }
ta = value_of(a);
tb = value_of(b);
return tarval_bad;
} /* computed_value_Not */
+/**
+ * Tests whether a shift shifts more bits than available in the mode
+ */
+static bool is_oversize_shift(const ir_node *n)
+{
+ ir_node *count = get_binop_right(n);
+ ir_mode *mode = get_irn_mode(n);
+ ir_tarval *tv = value_of(count);
+ long modulo_shift;
+ long shiftval;
+ if (tv == tarval_bad)
+ return false;
+ if (!tarval_is_long(tv))
+ return false;
+ shiftval = get_tarval_long(tv);
+ modulo_shift = get_mode_modulo_shift(mode);
+ if (shiftval < 0 || (modulo_shift > 0 && shiftval >= modulo_shift))
+ return false;
+
+ return shiftval >= (long)get_mode_size_bits(mode);
+}
+
/**
* Return the value of a Shl.
*/
if ((ta != tarval_bad) && (tb != tarval_bad)) {
return tarval_shl(ta, tb);
}
+
+ if (is_oversize_shift(n))
+ return get_mode_null(get_irn_mode(n));
+
return tarval_bad;
} /* computed_value_Shl */
if ((ta != tarval_bad) && (tb != tarval_bad)) {
return tarval_shr(ta, tb);
}
+ if (is_oversize_shift(n))
+ return get_mode_null(get_irn_mode(n));
+
return tarval_bad;
} /* computed_value_Shr */
return tarval_bad;
} /* computed_value_Rotl */
+bool ir_zero_when_converted(const ir_node *node, ir_mode *dest_mode)
+{
+ ir_mode *mode = get_irn_mode(node);
+ if (get_mode_arithmetic(mode) != irma_twos_complement
+ || get_mode_arithmetic(dest_mode) != irma_twos_complement)
+ return false;
+
+ if (is_Shl(node)) {
+ ir_node *count = get_Shl_right(node);
+ if (is_Const(count)) {
+ ir_tarval *tv = get_Const_tarval(count);
+ if (tarval_is_long(tv)) {
+ long shiftval = get_tarval_long(tv);
+ long destbits = get_mode_size_bits(dest_mode);
+ if (shiftval >= destbits
+ && shiftval < (long)get_mode_modulo_shift(mode))
+ return true;
+ }
+ }
+ }
+ if (is_And(node)) {
+ ir_node *right = get_And_right(node);
+ if (is_Const(right)) {
+ ir_tarval *tv = get_Const_tarval(right);
+ ir_tarval *conved = tarval_convert_to(tv, dest_mode);
+ return tarval_is_null(conved);
+ }
+ }
+ return false;
+}
+
/**
* Return the value of a Conv.
*/
static ir_tarval *computed_value_Conv(const ir_node *n)
{
- ir_node *a = get_Conv_op(n);
- ir_tarval *ta = value_of(a);
+ ir_node *a = get_Conv_op(n);
+ ir_tarval *ta = value_of(a);
+ ir_mode *mode = get_irn_mode(n);
if (ta != tarval_bad)
return tarval_convert_to(ta, get_irn_mode(n));
+ if (ir_zero_when_converted(a, mode))
+ return get_mode_null(mode);
+
return tarval_bad;
} /* computed_value_Conv */
return possible;
}
-/**
- * Return the value of a Cmp.
- *
- * The basic idea here is to determine which relations are possible and which
- * one are definitely impossible.
- */
-static ir_tarval *computed_value_Cmp(const ir_node *cmp)
+static ir_tarval *compute_cmp(const ir_node *cmp)
{
ir_node *left = get_Cmp_left(cmp);
ir_node *right = get_Cmp_right(cmp);
return computed_value_Cmp_Confirm(cmp, left, right, relation);
}
+/**
+ * Return the value of a Cmp.
+ *
+ * The basic idea here is to determine which relations are possible and which
+ * one are definitely impossible.
+ */
+static ir_tarval *computed_value_Cmp(const ir_node *cmp)
+{
+ /* we can't construct Constb after lowering mode_b nodes */
+ if (is_irg_state(get_irn_irg(cmp), IR_GRAPH_STATE_MODEB_LOWERED))
+ return tarval_bad;
+
+ return compute_cmp(cmp);
+}
+
/**
* Calculate the value of an integer Div.
* Special case: 0 / b
#undef CASE
} /* firm_set_default_computed_value */
-/**
- * Returns a equivalent block for another block.
- * If the block has only one predecessor, this is
- * the equivalent one. If the only predecessor of a block is
- * the block itself, this is a dead block.
- *
- * If both predecessors of a block are the branches of a binary
- * Cond, the equivalent block is Cond's block.
- *
- * If all predecessors of a block are bad or lies in a dead
- * block, the current block is dead as well.
- *
- * Note, that blocks are NEVER turned into Bad's, instead
- * the dead_block flag is set. So, never test for is_Bad(block),
- * always use is_dead_Block(block).
- */
-static ir_node *equivalent_node_Block(ir_node *n)
-{
- ir_node *oldn = n;
- int n_preds;
- ir_graph *irg;
-
- /* don't optimize dead or labeled blocks */
- if (is_Block_dead(n) || has_Block_entity(n))
- return n;
-
- n_preds = get_Block_n_cfgpreds(n);
-
- /* The Block constructor does not call optimize, but mature_immBlock()
- calls the optimization. */
- assert(get_Block_matured(n));
-
- irg = get_irn_irg(n);
-
- /* Straightening: a single entry Block following a single exit Block
- can be merged, if it is not the Start block. */
- /* !!! Beware, all Phi-nodes of n must have been optimized away.
- This should be true, as the block is matured before optimize is called.
- But what about Phi-cycles with the Phi0/Id that could not be resolved?
- Remaining Phi nodes are just Ids. */
- if (n_preds == 1) {
- ir_node *pred = skip_Proj(get_Block_cfgpred(n, 0));
-
- if (is_Jmp(pred)) {
- ir_node *predblock = get_nodes_block(pred);
- if (predblock == oldn) {
- /* Jmp jumps into the block it is in -- deal self cycle. */
- n = set_Block_dead(n);
- DBG_OPT_DEAD_BLOCK(oldn, n);
- } else {
- n = predblock;
- DBG_OPT_STG(oldn, n);
- }
- } else if (is_Cond(pred)) {
- ir_node *predblock = get_nodes_block(pred);
- if (predblock == oldn) {
- /* Jmp jumps into the block it is in -- deal self cycle. */
- n = set_Block_dead(n);
- DBG_OPT_DEAD_BLOCK(oldn, n);
- }
- }
- } else if (n_preds == 2) {
- /* Test whether Cond jumps twice to this block
- * The more general case which more than 2 predecessors is handles
- * in optimize_cf(), we handle only this special case for speed here.
- */
- ir_node *a = get_Block_cfgpred(n, 0);
- ir_node *b = get_Block_cfgpred(n, 1);
-
- if (is_Proj(a) && is_Proj(b)) {
- ir_node *cond = get_Proj_pred(a);
-
- if (cond == get_Proj_pred(b) && is_Cond(cond) &&
- get_irn_mode(get_Cond_selector(cond)) == mode_b) {
- /* Also a single entry Block following a single exit Block. Phis have
- twice the same operand and will be optimized away. */
- n = get_nodes_block(cond);
- DBG_OPT_IFSIM1(oldn, a, b, n);
- }
- }
- } else if (get_opt_unreachable_code() &&
- (n != get_irg_start_block(irg)) &&
- (n != get_irg_end_block(irg))) {
- int i;
-
- /* If all inputs are dead, this block is dead too, except if it is
- the start or end block. This is one step of unreachable code
- elimination */
- for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
- ir_node *pred = get_Block_cfgpred(n, i);
- ir_node *pred_blk;
-
- if (is_Bad(pred)) continue;
- pred_blk = get_nodes_block(skip_Proj(pred));
-
- if (is_Block_dead(pred_blk)) continue;
-
- if (pred_blk != n) {
- /* really found a living input */
- break;
- }
- }
- if (i < 0) {
- n = set_Block_dead(n);
- DBG_OPT_DEAD_BLOCK(oldn, n);
- }
- }
-
- return n;
-} /* equivalent_node_Block */
-
-/**
- * Returns a equivalent node for a Jmp, a Bad :-)
- * Of course this only happens if the Block of the Jmp is dead.
- */
-static ir_node *equivalent_node_Jmp(ir_node *n)
-{
- ir_node *oldn = n;
-
- /* unreachable code elimination */
- if (is_Block_dead(get_nodes_block(n))) {
- ir_graph *irg = get_irn_irg(n);
- n = get_irg_bad(irg);
- DBG_OPT_DEAD_BLOCK(oldn, n);
- }
- return n;
-} /* equivalent_node_Jmp */
-
-/** Raise is handled in the same way as Jmp. */
-#define equivalent_node_Raise equivalent_node_Jmp
-
-
-/* We do not evaluate Cond here as we replace it by a new node, a Jmp.
- See transform_node_Proj_Cond(). */
-
/**
* Optimize operations that are commutative and have neutral 0,
* so a op 0 = 0 op a = a.
/**
- * Optimize an "self-inverse unary op", ie op(op(n)) = n.
+ * Optimize an "self-inverse unary op", i.e. op(op(n)) = n.
*
* @todo
* -(-a) == a, but might overflow two times.
ir_node *convop = get_Conv_op(a);
ir_mode *convopmode = get_irn_mode(convop);
if (!mode_is_signed(convopmode)) {
- if (tarval_is_all_one(tarval_convert_to(tv, convopmode))) {
- /* Conv(X) & all_one(mode(X)) = Conv(X) */
+ /* Check Conv(all_one) & Const = all_one */
+ ir_tarval *one = get_mode_all_one(convopmode);
+ ir_tarval *conv = tarval_convert_to(one, mode);
+ ir_tarval *and = tarval_and(conv, tv);
+
+ if (tarval_is_all_one(and)) {
+ /* Conv(X) & Const = X */
n = a;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_AND);
return n;
int i, n_preds;
ir_node *oldn = n;
- ir_node *block;
ir_node *first_val = NULL; /* to shutup gcc */
if (!get_opt_optimize() &&
n_preds = get_Phi_n_preds(n);
- block = get_nodes_block(n);
- /* Control dead */
- if (is_Block_dead(block)) {
- ir_graph *irg = get_irn_irg(n);
- return get_irg_bad(irg);
- }
-
- if (n_preds == 0) return n; /* Phi of dead Region without predecessors. */
+ /* Phi of dead Region without predecessors. */
+ if (n_preds == 0)
+ return n;
/* Find first non-self-referencing input */
for (i = 0; i < n_preds; ++i) {
first_val = get_Phi_pred(n, i);
- if ( (first_val != n) /* not self pointer */
-#if 0
- /* BEWARE: when the if is changed to 1, Phis will ignore their Bad
- * predecessors. Then, Phi nodes in unreachable code might be removed,
- * causing nodes pointing to themselev (Adds for instance).
- * This is really bad and causes endless recursion on several
- * code pathes, so we do NOT optimize such code.
- * This is not that bad as it sounds, optimize_cf() removes bad control flow
- * (and bad Phi predecessors), so live code is optimized later.
- */
- && (! is_Bad(get_Block_cfgpred(block, i)))
-#endif
- ) { /* value not dead */
- break; /* then found first value. */
+ /* not self pointer */
+ if (first_val != n) {
+ /* then found first value. */
+ break;
}
}
- if (i >= n_preds) {
- ir_graph *irg = get_irn_irg(n);
- /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
- return get_irg_bad(irg);
- }
-
/* search for rest of inputs, determine if any of these
are non-self-referencing */
while (++i < n_preds) {
ir_node *scnd_val = get_Phi_pred(n, i);
- if ( (scnd_val != n)
- && (scnd_val != first_val)
-#if 0
- /* see above */
- && (! is_Bad(get_Block_cfgpred(block, i)))
-#endif
- ) {
+ if (scnd_val != n && scnd_val != first_val) {
break;
}
}
return n;
} /* equivalent_node_Phi */
-/**
- * Several optimizations:
- * - fold Sync-nodes, iff they have only one predecessor except
- * themselves.
- */
-static ir_node *equivalent_node_Sync(ir_node *n)
-{
- int arity = get_Sync_n_preds(n);
- int i;
-
- for (i = 0; i < arity;) {
- ir_node *pred = get_Sync_pred(n, i);
- int j;
-
- /* Remove Bad predecessors */
- if (is_Bad(pred)) {
- del_Sync_n(n, i);
- --arity;
- continue;
- }
-
- /* Remove duplicate predecessors */
- for (j = 0;; ++j) {
- if (j >= i) {
- ++i;
- break;
- }
- if (get_Sync_pred(n, j) == pred) {
- del_Sync_n(n, i);
- --arity;
- break;
- }
- }
- }
-
- if (arity == 0) {
- ir_graph *irg = get_irn_irg(n);
- return get_irg_bad(irg);
- }
- if (arity == 1) return get_Sync_pred(n, 0);
- return n;
-} /* equivalent_node_Sync */
-
/**
* Optimize Proj(Tuple).
*/
proj = get_CopyB_mem(copyb);
DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
break;
-
- case pn_CopyB_X_except: {
- ir_graph *irg = get_irn_irg(proj);
- DBG_OPT_EXC_REM(proj);
- proj = get_irg_bad(irg);
- break;
- }
}
}
return proj;
DBG_OPT_EXC_REM(proj);
proj = get_Bound_mem(bound);
break;
- case pn_Bound_X_except: {
- ir_graph *irg = get_irn_irg(proj);
- DBG_OPT_EXC_REM(proj);
- proj = get_irg_bad(irg);
- break;
- }
case pn_Bound_res:
proj = idx;
DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
return proj;
} /* equivalent_node_Proj_Bound */
-/**
- * Optimize an Exception Proj(Load) with a non-null address.
- */
-static ir_node *equivalent_node_Proj_Load(ir_node *proj)
-{
- if (get_opt_ldst_only_null_ptr_exceptions()) {
- if (get_irn_mode(proj) == mode_X) {
- ir_node *load = get_Proj_pred(proj);
-
- /* get the Load address */
- const ir_node *addr = get_Load_ptr(load);
- const ir_node *confirm;
-
- if (value_not_null(addr, &confirm)) {
- if (get_Proj_proj(proj) == pn_Load_X_except) {
- ir_graph *irg = get_irn_irg(proj);
- DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
- }
- }
- }
- }
- return proj;
-} /* equivalent_node_Proj_Load */
-
-/**
- * Optimize an Exception Proj(Store) with a non-null address.
- */
-static ir_node *equivalent_node_Proj_Store(ir_node *proj)
-{
- if (get_opt_ldst_only_null_ptr_exceptions()) {
- if (get_irn_mode(proj) == mode_X) {
- ir_node *store = get_Proj_pred(proj);
-
- /* get the load/store address */
- const ir_node *addr = get_Store_ptr(store);
- const ir_node *confirm;
-
- if (value_not_null(addr, &confirm)) {
- if (get_Proj_proj(proj) == pn_Store_X_except) {
- ir_graph *irg = get_irn_irg(proj);
- DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
- }
- }
- }
- }
- return proj;
-} /* equivalent_node_Proj_Store */
-
/**
* Does all optimizations on nodes that must be done on its Projs
* because of creating new nodes.
static ir_node *equivalent_node_Proj(ir_node *proj)
{
ir_node *n = get_Proj_pred(proj);
-
- if (get_irn_mode(proj) == mode_X) {
- if (is_Block_dead(get_nodes_block(n))) {
- /* Remove dead control flow -- early gigo(). */
- ir_graph *irg = get_irn_irg(proj);
- return get_irg_bad(irg);
- }
- }
if (n->op->ops.equivalent_node_Proj)
return n->op->ops.equivalent_node_Proj(proj);
return proj;
ir_node *n_t, *n_f;
ir_tarval *ts = value_of(sel);
+ if (ts == tarval_bad && is_Cmp(sel)) {
+ /* try again with a direct call to compute_cmp, as we don't care
+ * about the MODEB_LOWERED flag here */
+ ts = compute_cmp(sel);
+ }
+
/* Mux(true, f, t) == t */
if (ts == tarval_b_true) {
n = get_Mux_true(n);
break
switch (code) {
- CASE(Block);
- CASE(Jmp);
- CASE(Raise);
CASE(Eor);
CASE(Add);
CASE(Shl);
CASE(And);
CASE(Conv);
CASE(Phi);
- CASE(Sync);
CASE_PROJ(Tuple);
CASE_PROJ(Div);
CASE_PROJ(CopyB);
CASE_PROJ(Bound);
- CASE_PROJ(Load);
- CASE_PROJ(Store);
CASE(Proj);
CASE(Id);
CASE(Mux);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_PLUS_1);
return n;
}
- if (op == b) {
- /* ~x + x = -1 */
- n = new_r_Const(irg, get_mode_minus_one(mode));
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_X_NOT_X);
- return n;
- }
- }
- if (is_Not(b)) {
- ir_node *op = get_Not_op(b);
-
- if (op == a) {
- /* x + ~x = -1 */
- n = new_r_Const(irg, get_mode_minus_one(mode));
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_X_NOT_X);
- return n;
- }
}
}
}
b_vrp = vrp_get_info(b);
if (a_vrp && b_vrp) {
- ir_tarval *c = tarval_and(a_vrp->bits_not_set, b_vrp->bits_not_set);
+ ir_tarval *vrp_val = tarval_and(a_vrp->bits_not_set, b_vrp->bits_not_set);
- if (tarval_is_null(c)) {
+ if (tarval_is_null(vrp_val)) {
dbg_info *dbgi = get_irn_dbg_info(n);
return new_rd_Or(dbgi, get_nodes_block(n), a, b, mode);
}
}
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
return n;
+#if 0
} else if (is_Mul(b)) { /* a - (b * C) -> a + (b * -C) */
ir_node *m_right = get_Mul_right(b);
if (is_Const(m_right)) {
return n;
}
}
+#endif
}
/* Beware of Sub(P, P) which cannot be optimized into a simple Minus ... */
}
if (get_mode_arithmetic(mode) == irma_twos_complement) {
+ /* c - ~X = X + (c+1) */
if (is_Const(a) && is_Not(b)) {
- /* c - ~X = X + (c+1) */
ir_tarval *tv = get_Const_tarval(a);
tv = tarval_add(tv, get_mode_one(mode));
return n;
}
}
+ /* x-(x&y) = x & ~y */
+ if (is_And(b)) {
+ ir_node *and_left = get_And_left(b);
+ ir_node *and_right = get_And_right(b);
+ if (and_right == a) {
+ ir_node *tmp = and_left;
+ and_left = and_right;
+ and_right = tmp;
+ }
+ if (and_left == a) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_mode *mode = get_irn_mode(n);
+ ir_node *notn = new_rd_Not(dbgi, block, and_right, mode);
+ ir_node *and = new_rd_And(dbgi, block, a, notn, mode);
+ return and;
+ }
+ }
}
return n;
} /* transform_node_Sub */
/* skip a potential Pin */
mem = skip_Pin(mem);
- turn_into_tuple(n, pn_Div_max);
+ turn_into_tuple(n, pn_Div_max+1);
set_Tuple_pred(n, pn_Div_M, mem);
set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_Div_X_except, get_irg_bad(irg));
+ set_Tuple_pred(n, pn_Div_X_except, new_r_Bad(irg, mode_X));
set_Tuple_pred(n, pn_Div_res, value);
}
return n;
/* skip a potential Pin */
mem = skip_Pin(mem);
- turn_into_tuple(n, pn_Mod_max);
+ turn_into_tuple(n, pn_Mod_max+1);
set_Tuple_pred(n, pn_Mod_M, mem);
set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(blk));
- set_Tuple_pred(n, pn_Mod_X_except, get_irg_bad(irg));
+ set_Tuple_pred(n, pn_Mod_X_except, new_r_Bad(irg, mode_X));
set_Tuple_pred(n, pn_Mod_res, value);
}
return n;
{
ir_node *a = get_Cond_selector(n);
- ir_tarval *ta = value_of(a);
ir_graph *irg = get_irn_irg(n);
+ ir_tarval *ta;
ir_node *jmp;
/* we need block info which is not available in floating irgs */
if (get_irg_pinned(irg) == op_pin_state_floats)
return n;
- if ((ta != tarval_bad) &&
- (get_irn_mode(a) == mode_b) &&
- (get_opt_unreachable_code())) {
+ /* we do not handle switches here */
+ if (get_irn_mode(a) != mode_b)
+ return n;
+
+ ta = value_of(a);
+ if (ta == tarval_bad && is_Cmp(a)) {
+ /* try again with a direct call to compute_cmp, as we don't care
+ * about the MODEB_LOWERED flag here */
+ ta = compute_cmp(a);
+ }
+
+ if (ta != tarval_bad && get_irn_mode(a) == mode_b) {
/* It's a boolean Cond, branching on a boolean constant.
Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
ir_node *blk = get_nodes_block(n);
jmp = new_r_Jmp(blk);
- turn_into_tuple(n, pn_Cond_max);
+ turn_into_tuple(n, pn_Cond_max+1);
if (ta == tarval_b_true) {
- set_Tuple_pred(n, pn_Cond_false, get_irg_bad(irg));
+ set_Tuple_pred(n, pn_Cond_false, new_r_Bad(irg, mode_X));
set_Tuple_pred(n, pn_Cond_true, jmp);
} else {
set_Tuple_pred(n, pn_Cond_false, jmp);
- set_Tuple_pred(n, pn_Cond_true, get_irg_bad(irg));
+ set_Tuple_pred(n, pn_Cond_true, new_r_Bad(irg, mode_X));
}
/* We might generate an endless loop, so keep it alive. */
add_End_keepalive(get_irg_end(irg), blk);
+ clear_irg_state(irg, IR_GRAPH_STATE_NO_UNREACHABLE_CODE);
}
return n;
} /* transform_node_Cond */
return n;
}
-int ir_is_equality_cmp_0(const ir_node *node)
-{
- ir_relation relation = get_Cmp_relation(node);
- ir_node *left = get_Cmp_left(node);
- ir_node *right = get_Cmp_right(node);
- ir_mode *mode = get_irn_mode(left);
-
- /* this probably makes no sense if unordered is involved */
- assert(!mode_is_float(mode));
-
- if (!is_Const(right) || !is_Const_null(right))
- return false;
- if (relation == ir_relation_equal)
- return true;
- if (mode_is_signed(mode)) {
- return relation == ir_relation_less_greater;
- } else {
- return relation == ir_relation_greater;
- }
-}
-
/**
* Create a 0 constant of given mode.
*/
return cnst;
}
+static bool is_shiftop(const ir_node *n)
+{
+ return is_Shl(n) || is_Shr(n) || is_Shrs(n) || is_Rotl(n);
+}
+
+/**
+ * normalisation: (x & c1) >> c2 to (x >> c2) & (c1 >> c2)
+ * (we can use:
+ * - and, or, xor instead of &
+ * - Shl, Shr, Shrs, rotl instead of >>
+ * (with a special case for Or/Xor + Shrs)
+ *
+ * This normalisation is good for things like x-(x&y) esp. in 186.crafty.
+ */
+static ir_node *transform_node_shift_bitop(ir_node *n)
+{
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *right = get_binop_right(n);
+ ir_mode *mode = get_irn_mode(n);
+ ir_node *left;
+ ir_node *bitop_left;
+ ir_node *bitop_right;
+ ir_op *op_left;
+ ir_node *block;
+ dbg_info *dbgi;
+ ir_node *new_shift;
+ ir_node *new_bitop;
+ ir_node *new_const;
+ ir_tarval *tv1;
+ ir_tarval *tv2;
+ ir_tarval *tv_shift;
+
+ if (is_irg_state(irg, IR_GRAPH_STATE_NORMALISATION2))
+ return n;
+
+ assert(is_Shrs(n) || is_Shr(n) || is_Shl(n) || is_Rotl(n));
+
+ if (!is_Const(right))
+ return n;
+
+ left = get_binop_left(n);
+ op_left = get_irn_op(left);
+ if (op_left != op_And && op_left != op_Or && op_left != op_Eor)
+ return n;
+
+ /* doing it with Shrs is not legal if the Or/Eor affects the topmost bit */
+ if (is_Shrs(n) && (op_left == op_Or || op_left == op_Eor)) {
+ /* TODO: test if sign bit is affectes */
+ return n;
+ }
+
+ bitop_right = get_binop_right(left);
+ if (!is_Const(bitop_right))
+ return n;
+
+ bitop_left = get_binop_left(left);
+
+ block = get_nodes_block(n);
+ dbgi = get_irn_dbg_info(n);
+ tv1 = get_Const_tarval(bitop_right);
+ tv2 = get_Const_tarval(right);
+
+ assert(get_tarval_mode(tv1) == mode);
+
+ if (is_Shl(n)) {
+ new_shift = new_rd_Shl(dbgi, block, bitop_left, right, mode);
+ tv_shift = tarval_shl(tv1, tv2);
+ } else if (is_Shr(n)) {
+ new_shift = new_rd_Shr(dbgi, block, bitop_left, right, mode);
+ tv_shift = tarval_shr(tv1, tv2);
+ } else if (is_Shrs(n)) {
+ new_shift = new_rd_Shrs(dbgi, block, bitop_left, right, mode);
+ tv_shift = tarval_shrs(tv1, tv2);
+ } else {
+ assert(is_Rotl(n));
+ new_shift = new_rd_Rotl(dbgi, block, bitop_left, right, mode);
+ tv_shift = tarval_rotl(tv1, tv2);
+ }
+
+ assert(get_tarval_mode(tv_shift) == mode);
+ irg = get_irn_irg(n);
+ new_const = new_r_Const(irg, tv_shift);
+
+ if (op_left == op_And) {
+ new_bitop = new_rd_And(dbgi, block, new_shift, new_const, mode);
+ } else if (op_left == op_Or) {
+ new_bitop = new_rd_Or(dbgi, block, new_shift, new_const, mode);
+ } else {
+ assert(op_left == op_Eor);
+ new_bitop = new_rd_Eor(dbgi, block, new_shift, new_const, mode);
+ }
+
+ return new_bitop;
+}
+
+/**
+ * normalisation: (x >> c1) & c2 to (x & (c2<<c1)) >> c1
+ * (we can use:
+ * - and, or, xor instead of &
+ * - Shl, Shr, Shrs, rotl instead of >>
+ * (with a special case for Or/Xor + Shrs)
+ *
+ * This normalisation is usually good for the backend since << C can often be
+ * matched as address-mode.
+ */
+static ir_node *transform_node_bitop_shift(ir_node *n)
+{
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *left = get_binop_left(n);
+ ir_node *right = get_binop_right(n);
+ ir_mode *mode = get_irn_mode(n);
+ ir_node *shift_left;
+ ir_node *shift_right;
+ ir_node *block;
+ dbg_info *dbg_bitop;
+ dbg_info *dbg_shift;
+ ir_node *new_bitop;
+ ir_node *new_shift;
+ ir_node *new_const;
+ ir_tarval *tv1;
+ ir_tarval *tv2;
+ ir_tarval *tv_bitop;
+
+ if (!is_irg_state(irg, IR_GRAPH_STATE_NORMALISATION2))
+ return n;
+
+ assert(is_And(n) || is_Or(n) || is_Eor(n));
+ if (!is_Const(right) || !is_shiftop(left))
+ return n;
+
+ shift_left = get_binop_left(left);
+ shift_right = get_binop_right(left);
+ if (!is_Const(shift_right))
+ return n;
+
+ /* doing it with Shrs is not legal if the Or/Eor affects the topmost bit */
+ if (is_Shrs(left)) {
+ /* TODO this could be improved */
+ return n;
+ }
+
+ irg = get_irn_irg(n);
+ block = get_nodes_block(n);
+ dbg_bitop = get_irn_dbg_info(n);
+ dbg_shift = get_irn_dbg_info(left);
+ tv1 = get_Const_tarval(shift_right);
+ tv2 = get_Const_tarval(right);
+ assert(get_tarval_mode(tv2) == mode);
+
+ if (is_Shl(left)) {
+ tv_bitop = tarval_shr(tv2, tv1);
+ } else if (is_Shr(left)) {
+ if (is_Or(n) || is_Eor(n)) {
+ /*
+ * TODO this can be improved by checking whether
+ * the left shift produces an overflow
+ */
+ return n;
+ }
+ tv_bitop = tarval_shl(tv2, tv1);
+ } else {
+ assert(is_Rotl(left));
+ tv_bitop = tarval_rotl(tv2, tarval_neg(tv1));
+ }
+ new_const = new_r_Const(irg, tv_bitop);
+
+ if (is_And(n)) {
+ new_bitop = new_rd_And(dbg_bitop, block, shift_left, new_const, mode);
+ } else if (is_Or(n)) {
+ new_bitop = new_rd_Or(dbg_bitop, block, shift_left, new_const, mode);
+ } else {
+ assert(is_Eor(n));
+ new_bitop = new_rd_Eor(dbg_bitop, block, shift_left, new_const, mode);
+ }
+
+ if (is_Shl(left)) {
+ new_shift = new_rd_Shl(dbg_shift, block, new_bitop, shift_right, mode);
+ } else if (is_Shr(left)) {
+ new_shift = new_rd_Shr(dbg_shift, block, new_bitop, shift_right, mode);
+ } else {
+ assert(is_Rotl(left));
+ new_shift = new_rd_Rotl(dbg_shift, block, new_bitop, shift_right, mode);
+ }
+
+ return new_shift;
+}
+
/**
* Transform an And.
*/
vrp_attr *a_vrp, *b_vrp;
if (is_Cmp(a) && is_Cmp(b)) {
- ir_node *a_left = get_Cmp_left(a);
- ir_node *a_right = get_Cmp_left(a);
- ir_node *b_left = get_Cmp_left(b);
- ir_node *b_right = get_Cmp_right(b);
+ ir_node *a_left = get_Cmp_left(a);
+ ir_node *a_right = get_Cmp_right(a);
+ ir_node *b_left = get_Cmp_left(b);
+ ir_node *b_right = get_Cmp_right(b);
+ ir_relation a_relation = get_Cmp_relation(a);
+ ir_relation b_relation = get_Cmp_relation(b);
/* we can combine the relations of two compares with the same
* operands */
if (a_left == b_left && b_left == b_right) {
dbg_info *dbgi = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- ir_relation a_relation = get_Cmp_relation(a);
- ir_relation b_relation = get_Cmp_relation(b);
ir_relation new_relation = a_relation & b_relation;
return new_rd_Cmp(dbgi, block, a_left, a_right, new_relation);
}
- /* Cmp(a, 0) and Cmp(b,0) can be optimized to Cmp(a|b, 0) */
- if (ir_is_equality_cmp_0(a) && ir_is_equality_cmp_0(b)
- && (get_Cmp_relation(a) & ir_relation_equal) == (get_Cmp_relation(b) & ir_relation_equal)) {
- dbg_info *dbgi = get_irn_dbg_info(n);
- ir_node *block = get_nodes_block(n);
- ir_relation relation = get_Cmp_relation(a);
- ir_mode *mode = get_irn_mode(a_left);
- ir_node *n_b_left = get_irn_mode(b_left) != mode ?
- new_rd_Conv(dbgi, block, b_left, mode) : b_left;
- ir_node *or = new_rd_Or(dbgi, block, a_left, n_b_left, mode);
- ir_graph *irg = get_irn_irg(n);
- ir_node *zero = create_zero_const(irg, mode);
- return new_rd_Cmp(dbgi, block, or, zero, relation);
+ /* Cmp(a==b) and Cmp(c==d) can be optimized to Cmp((a^b)|(c^d)==0) */
+ if (a_relation == b_relation && a_relation == ir_relation_equal
+ && !mode_is_float(get_irn_mode(a_left))
+ && !mode_is_float(get_irn_mode(b_left))) {
+ if (values_in_mode(get_irn_mode(a_left), get_irn_mode(b_left))) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_mode *a_mode = get_irn_mode(a_left);
+ ir_mode *b_mode = get_irn_mode(b_left);
+ ir_node *xora = new_rd_Eor(dbgi, block, a_left, a_right, a_mode);
+ ir_node *xorb = new_rd_Eor(dbgi, block, b_left, b_right, b_mode);
+ ir_node *conv = new_rd_Conv(dbgi, block, xora, b_mode);
+ ir_node *or = new_rd_Or(dbgi, block, conv, xorb, b_mode);
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *zero = create_zero_const(irg, b_mode);
+ return new_rd_Cmp(dbgi, block, or, zero, ir_relation_equal);
+ }
+ if (values_in_mode(get_irn_mode(b_left), get_irn_mode(a_left))) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_mode *a_mode = get_irn_mode(a_left);
+ ir_mode *b_mode = get_irn_mode(b_left);
+ ir_node *xora = new_rd_Eor(dbgi, block, a_left, a_right, a_mode);
+ ir_node *xorb = new_rd_Eor(dbgi, block, b_left, b_right, b_mode);
+ ir_node *conv = new_rd_Conv(dbgi, block, xorb, a_mode);
+ ir_node *or = new_rd_Or(dbgi, block, xora, conv, a_mode);
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *zero = create_zero_const(irg, a_mode);
+ return new_rd_Cmp(dbgi, block, or, zero, ir_relation_equal);
+ }
}
}
}
n = transform_bitwise_distributive(n, transform_node_And);
+ if (is_And(n))
+ n = transform_node_bitop_shift(n);
return n;
} /* transform_node_And */
}
n = transform_bitwise_distributive(n, transform_node_Eor);
+ if (is_Eor(n))
+ n = transform_node_bitop_shift(n);
+
return n;
} /* transform_node_Eor */
if (get_Proj_proj(proj) == pn_Load_X_except) {
ir_graph *irg = get_irn_irg(proj);
DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
} else {
ir_node *blk = get_nodes_block(load);
return new_r_Jmp(blk);
if (get_Proj_proj(proj) == pn_Store_X_except) {
ir_graph *irg = get_irn_irg(proj);
DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
} else {
ir_node *blk = get_nodes_block(store);
return new_r_Jmp(blk);
ir_graph *irg = get_irn_irg(proj);
/* we found an exception handler, remove it */
DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
}
case pn_Div_M: {
ir_graph *irg = get_irn_irg(proj);
/* we found an exception handler, remove it */
DBG_OPT_EXC_REM(proj);
- return get_irg_bad(irg);
+ return new_r_Bad(irg, mode_X);
}
case pn_Mod_M: {
ir_node *n = get_Proj_pred(proj);
ir_node *b = get_Cond_selector(n);
- if (!get_opt_unreachable_code())
- return n;
-
if (mode_is_int(get_irn_mode(b))) {
ir_tarval *tb = value_of(b);
} else {
ir_graph *irg = get_irn_irg(proj);
/* this case will NEVER be taken, kill it */
- return get_irg_bad(irg);
+ clear_irg_state(irg, IR_GRAPH_STATE_NO_UNREACHABLE_CODE);
+ return new_r_Bad(irg, mode_X);
}
}
} else {
ir_relation cmp_result = tarval_cmp(b_vrp->range_bottom, tp);
ir_relation cmp_result2 = tarval_cmp(b_vrp->range_top, tp);
- if ((cmp_result & ir_relation_greater) == cmp_result && (cmp_result2
- & ir_relation_less) == cmp_result2) {
+ if ((cmp_result & ir_relation_greater) == cmp_result
+ && (cmp_result2 & ir_relation_less) == cmp_result2) {
ir_graph *irg = get_irn_irg(proj);
- return get_irg_bad(irg);
+ clear_irg_state(irg, IR_GRAPH_STATE_NO_UNREACHABLE_CODE);
+ return new_r_Bad(irg, mode_X);
}
} else if (b_vrp->range_type == VRP_ANTIRANGE) {
ir_relation cmp_result = tarval_cmp(b_vrp->range_bottom, tp);
ir_relation cmp_result2 = tarval_cmp(b_vrp->range_top, tp);
- if ((cmp_result & ir_relation_less_equal) == cmp_result && (cmp_result2
- & ir_relation_greater_equal) == cmp_result2) {
+ if ((cmp_result & ir_relation_less_equal) == cmp_result
+ && (cmp_result2 & ir_relation_greater_equal) == cmp_result2) {
ir_graph *irg = get_irn_irg(proj);
- return get_irg_bad(irg);
+ clear_irg_state(irg, IR_GRAPH_STATE_NO_UNREACHABLE_CODE);
+ return new_r_Bad(irg, mode_X);
}
}
b_vrp->bits_set
) == ir_relation_equal)) {
ir_graph *irg = get_irn_irg(proj);
- return get_irg_bad(irg);
+ clear_irg_state(irg, IR_GRAPH_STATE_NO_UNREACHABLE_CODE);
+ return new_r_Bad(irg, mode_X);
}
if (!(tarval_cmp(
tarval_not(b_vrp->bits_not_set))
== ir_relation_equal)) {
ir_graph *irg = get_irn_irg(proj);
- return get_irg_bad(irg);
+ clear_irg_state(irg, IR_GRAPH_STATE_NO_UNREACHABLE_CODE);
+ return new_r_Bad(irg, mode_X);
}
-
-
}
}
}
}
/* Remove unnecessary conversions */
- /* TODO handle conv+constant */
if (is_Conv(left) && is_Conv(right)) {
- ir_node *op_left = get_Conv_op(left);
- ir_node *op_right = get_Conv_op(right);
- ir_mode *mode_left = get_irn_mode(op_left);
- ir_mode *mode_right = get_irn_mode(op_right);
+ ir_node *op_left = get_Conv_op(left);
+ ir_node *op_right = get_Conv_op(right);
+ ir_mode *mode_left = get_irn_mode(op_left);
+ ir_mode *mode_right = get_irn_mode(op_right);
if (smaller_mode(mode_left, mode) && smaller_mode(mode_right, mode)
&& mode_left != mode_b && mode_right != mode_b) {
- ir_node *block = get_nodes_block(n);
+ ir_node *block = get_nodes_block(n);
if (mode_left == mode_right) {
- left = op_left;
- right = op_right;
+ left = op_left;
+ right = op_right;
changed = true;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV_CONV);
} else if (smaller_mode(mode_left, mode_right)) {
- left = new_r_Conv(block, op_left, mode_right);
- right = op_right;
+ left = new_r_Conv(block, op_left, mode_right);
+ right = op_right;
changed = true;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
} else if (smaller_mode(mode_right, mode_left)) {
- left = op_left;
- right = new_r_Conv(block, op_right, mode_left);
+ left = op_left;
+ right = new_r_Conv(block, op_right, mode_left);
+ changed = true;
+ DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
+ }
+ mode = get_irn_mode(left);
+ }
+ }
+ if (is_Conv(left) && is_Const(right)) {
+ ir_node *op_left = get_Conv_op(left);
+ ir_mode *mode_left = get_irn_mode(op_left);
+ if (smaller_mode(mode_left, mode) && mode_left != mode_b) {
+ ir_tarval *tv = get_Const_tarval(right);
+ tarval_int_overflow_mode_t last_mode
+ = tarval_get_integer_overflow_mode();
+ ir_tarval *new_tv;
+ tarval_set_integer_overflow_mode(TV_OVERFLOW_BAD);
+ new_tv = tarval_convert_to(tv, mode_left);
+ tarval_set_integer_overflow_mode(last_mode);
+ if (new_tv != tarval_bad) {
+ ir_graph *irg = get_irn_irg(n);
+ left = op_left;
+ right = new_r_Const(irg, new_tv);
+ mode = get_irn_mode(left);
changed = true;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
}
}
/* for integer modes, we have more */
- if (mode_is_int(mode)) {
+ if (mode_is_int(mode) && !is_Const(left)) {
/* c > 0 : a < c ==> a <= (c-1) a >= c ==> a > (c-1) */
if ((relation == ir_relation_less || relation == ir_relation_greater_equal) &&
tarval_cmp(tv, get_mode_null(mode)) == ir_relation_greater) {
/* the following reassociations work only for == and != */
if (relation == ir_relation_equal || relation == ir_relation_less_greater) {
-
-#if 0 /* Might be not that good in general */
- /* a-b == 0 ==> a == b, a-b != 0 ==> a != b */
- if (tarval_is_null(tv) && is_Sub(left)) {
- right = get_Sub_right(left);
- left = get_Sub_left(left);
-
- tv = value_of(right);
- changed = 1;
- DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_C);
- }
-#endif
-
if (tv != tarval_bad) {
/* a-c1 == c2 ==> a == c2+c1, a-c1 != c2 ==> a != c2+c1 */
if (is_Sub(left)) {
DBG_OPT_EXC_REM(proj);
proj = new_r_Jmp(get_nodes_block(copyb));
break;
- case pn_CopyB_X_except:
+ case pn_CopyB_X_except: {
+ ir_graph *irg = get_irn_irg(proj);
DBG_OPT_EXC_REM(proj);
- proj = get_irg_bad(get_irn_irg(proj));
+ proj = new_r_Bad(irg, mode_X);
break;
+ }
default:
break;
}
break;
case pn_Bound_X_except:
DBG_OPT_EXC_REM(proj);
- proj = get_irg_bad(get_irn_irg(proj));
+ proj = new_r_Bad(get_irn_irg(proj), mode_X);
break;
case pn_Bound_res:
proj = idx;
} /* transform_node_Proj */
/**
- * Move Confirms down through Phi nodes.
+ * Test wether a block is unreachable
+ * Note: That this only returns true when
+ * IR_GRAPH_STATE_OPTIMIZE_UNREACHABLE_CODE is set.
+ * This is important, as you easily end up producing invalid constructs in the
+ * unreachable code when optimizing away edges into the unreachable code.
+ * So only set this flag when you iterate localopts to the fixpoint.
+ * When you reach the fixpoint then all unreachable code is dead
+ * (= can't be reached by firm edges) and you won't see the invalid constructs
+ * anymore.
*/
+static bool is_block_unreachable(const ir_node *block)
+{
+ const ir_graph *irg = get_irn_irg(block);
+ if (!is_irg_state(irg, IR_GRAPH_STATE_OPTIMIZE_UNREACHABLE_CODE))
+ return false;
+ return get_Block_dom_depth(block) < 0;
+}
+
+static ir_node *transform_node_Block(ir_node *block)
+{
+ ir_graph *irg = get_irn_irg(block);
+ int arity = get_irn_arity(block);
+ ir_node *bad = NULL;
+ int i;
+
+ if (!is_irg_state(irg, IR_GRAPH_STATE_OPTIMIZE_UNREACHABLE_CODE))
+ return block;
+
+ for (i = 0; i < arity; ++i) {
+ ir_node *const pred = get_Block_cfgpred(block, i);
+ if (is_Bad(pred) || !is_block_unreachable(get_nodes_block(pred)))
+ continue;
+ if (bad == NULL)
+ bad = new_r_Bad(irg, mode_X);
+ set_irn_n(block, i, bad);
+ }
+
+ return block;
+}
+
static ir_node *transform_node_Phi(ir_node *phi)
{
- int i, n;
- ir_mode *mode = get_irn_mode(phi);
+ int n = get_irn_arity(phi);
+ ir_mode *mode = get_irn_mode(phi);
+ ir_node *block = get_nodes_block(phi);
+ ir_graph *irg = get_irn_irg(phi);
+ ir_node *bad = NULL;
+ int i;
- if (mode_is_reference(mode)) {
+ /* Set phi-operands for bad-block inputs to bad */
+ for (i = 0; i < n; ++i) {
+ if (!is_Bad(get_Phi_pred(phi, i))) {
+ ir_node *pred = get_Block_cfgpred(block, i);
+ if (is_Bad(pred) || is_block_unreachable(get_nodes_block(pred))) {
+ if (bad == NULL)
+ bad = new_r_Bad(irg, mode);
+ set_irn_n(phi, i, bad);
+ }
+ }
+ }
+
+ /* Move Pin nodes down through Phi nodes. */
+ if (mode == mode_M) {
+ n = get_irn_arity(phi);
+
+ /* Beware of Phi0 */
+ if (n > 0) {
+ ir_node **in;
+ ir_node *new_phi;
+ bool has_pin = false;
+
+ NEW_ARR_A(ir_node *, in, n);
+
+ for (i = 0; i < n; ++i) {
+ ir_node *pred = get_irn_n(phi, i);
+
+ if (is_Pin(pred)) {
+ in[i] = get_Pin_op(pred);
+ has_pin = true;
+ } else if (is_Bad(pred)) {
+ in[i] = pred;
+ } else {
+ return phi;
+ }
+ }
+
+ if (!has_pin)
+ return phi;
+
+ /* Move the Pin nodes "behind" the Phi. */
+ block = get_irn_n(phi, -1);
+ new_phi = new_r_Phi(block, n, in, mode_M);
+ return new_r_Pin(block, new_phi);
+ }
+ }
+ /* Move Confirms down through Phi nodes. */
+ else if (mode_is_reference(mode)) {
n = get_irn_arity(phi);
/* Beware of Phi0 */
ir_node *pred = get_irn_n(phi, 0);
ir_node *bound, *new_phi, *block, **in;
ir_relation relation;
+ bool has_confirm = false;
if (! is_Confirm(pred))
return phi;
for (i = 1; i < n; ++i) {
pred = get_irn_n(phi, i);
- if (! is_Confirm(pred) ||
- get_Confirm_bound(pred) != bound ||
- get_Confirm_relation(pred) != relation)
+ if (is_Confirm(pred) &&
+ get_Confirm_bound(pred) == bound &&
+ get_Confirm_relation(pred) == relation) {
+ in[i] = get_Confirm_value(pred);
+ has_confirm = true;
+ } else if (is_Bad(pred)) {
+ in[i] = pred;
+ } else {
return phi;
- in[i] = get_Confirm_value(pred);
+ }
}
+
+ if (!has_confirm)
+ return phi;
+
/* move the Confirm nodes "behind" the Phi */
block = get_irn_n(phi, -1);
new_phi = new_r_Phi(block, n, in, get_irn_mode(phi));
return n;
} /* transform_node_Or_Rotl */
+static bool is_cmp_unequal(const ir_node *node)
+{
+ ir_relation relation = get_Cmp_relation(node);
+ ir_node *left = get_Cmp_left(node);
+ ir_node *right = get_Cmp_right(node);
+ ir_mode *mode = get_irn_mode(left);
+
+ if (relation == ir_relation_less_greater)
+ return true;
+
+ if (!mode_is_signed(mode) && is_Const(right) && is_Const_null(right))
+ return relation == ir_relation_greater;
+ return false;
+}
+
+/**
+ * returns true for Cmp(x == 0) or Cmp(x != 0)
+ */
+static bool is_cmp_equality_zero(const ir_node *node)
+{
+ ir_relation relation;
+ ir_node *right = get_Cmp_right(node);
+
+ if (!is_Const(right) || !is_Const_null(right))
+ return false;
+ relation = get_Cmp_relation(node);
+ return relation == ir_relation_equal
+ || relation == ir_relation_less_greater
+ || (!mode_is_signed(get_irn_mode(right))
+ && relation == ir_relation_greater);
+}
+
/**
* Transform an Or.
*/
/* we can combine the relations of two compares with the same operands */
if (is_Cmp(a) && is_Cmp(b)) {
ir_node *a_left = get_Cmp_left(a);
- ir_node *a_right = get_Cmp_left(a);
+ ir_node *a_right = get_Cmp_right(a);
ir_node *b_left = get_Cmp_left(b);
ir_node *b_right = get_Cmp_right(b);
if (a_left == b_left && b_left == b_right) {
ir_relation new_relation = a_relation | b_relation;
return new_rd_Cmp(dbgi, block, a_left, a_right, new_relation);
}
+ /* Cmp(a!=b) or Cmp(c!=d) => Cmp((a^b)|(c^d) != 0) */
+ if (is_cmp_unequal(a) && is_cmp_unequal(b)
+ && !mode_is_float(get_irn_mode(a_left))
+ && !mode_is_float(get_irn_mode(b_left))) {
+ if (values_in_mode(get_irn_mode(a_left), get_irn_mode(b_left))) {
+ ir_graph *irg = get_irn_irg(n);
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_mode *a_mode = get_irn_mode(a_left);
+ ir_mode *b_mode = get_irn_mode(b_left);
+ ir_node *xora = new_rd_Eor(dbgi, block, a_left, a_right, a_mode);
+ ir_node *xorb = new_rd_Eor(dbgi, block, b_left, b_right, b_mode);
+ ir_node *conv = new_rd_Conv(dbgi, block, xora, b_mode);
+ ir_node *or = new_rd_Or(dbgi, block, conv, xorb, b_mode);
+ ir_node *zero = create_zero_const(irg, b_mode);
+ return new_rd_Cmp(dbgi, block, or, zero, ir_relation_less_greater);
+ }
+ if (values_in_mode(get_irn_mode(b_left), get_irn_mode(a_left))) {
+ ir_graph *irg = get_irn_irg(n);
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_mode *a_mode = get_irn_mode(a_left);
+ ir_mode *b_mode = get_irn_mode(b_left);
+ ir_node *xora = new_rd_Eor(dbgi, block, a_left, a_right, a_mode);
+ ir_node *xorb = new_rd_Eor(dbgi, block, b_left, b_right, b_mode);
+ ir_node *conv = new_rd_Conv(dbgi, block, xorb, a_mode);
+ ir_node *or = new_rd_Or(dbgi, block, xora, conv, a_mode);
+ ir_node *zero = create_zero_const(irg, a_mode);
+ return new_rd_Cmp(dbgi, block, or, zero, ir_relation_less_greater);
+ }
+ }
}
mode = get_irn_mode(n);
return n;
n = transform_bitwise_distributive(n, transform_node_Or);
+ if (is_Or(n))
+ n = transform_node_bitop_shift(n);
return n;
} /* transform_node_Or */
{
ir_node *left, *right;
ir_mode *mode;
+ ir_mode *count_mode;
ir_tarval *tv1, *tv2, *res;
ir_node *in[2], *irn, *block;
ir_graph *irg;
+ int modulo_shf;
left = get_binop_left(n);
return n;
right = get_binop_right(n);
- tv1 = value_of(right);
+ tv1 = value_of(right);
if (tv1 == tarval_bad)
return n;
if (tv2 == tarval_bad)
return n;
- res = tarval_add(tv1, tv2);
- mode = get_irn_mode(n);
- irg = get_irn_irg(n);
+ count_mode = get_tarval_mode(tv1);
+ if (get_tarval_mode(tv2) != count_mode) {
+ /* TODO: search bigger mode or something and convert... */
+ return n;
+ }
- /* beware: a simple replacement works only, if res < modulo shift */
- if (!is_Rotl(n)) {
- int modulo_shf = get_mode_modulo_shift(mode);
- if (modulo_shf > 0) {
- ir_tarval *modulo = new_tarval_from_long(modulo_shf,
- get_tarval_mode(res));
+ mode = get_irn_mode(n);
+ modulo_shf = get_mode_modulo_shift(mode);
- assert(modulo_shf >= (int) get_mode_size_bits(mode));
+ if (modulo_shf > 0) {
+ ir_tarval *modulo_mask = new_tarval_from_long(modulo_shf-1, count_mode);
- /* shifting too much */
- if (!(tarval_cmp(res, modulo) & ir_relation_less)) {
- if (is_Shrs(n)) {
- ir_node *block = get_nodes_block(n);
- dbg_info *dbgi = get_irn_dbg_info(n);
- ir_mode *smode = get_irn_mode(right);
- ir_node *cnst = new_r_Const_long(irg, smode, get_mode_size_bits(mode) - 1);
- return new_rd_Shrs(dbgi, block, get_binop_left(left), cnst, mode);
- }
+ /* I'm not so sure what happens in one complement... */
+ assert(get_mode_arithmetic(count_mode) == irma_twos_complement);
+ /* modulo shifts should always be a power of 2 (otherwise modulo_mask
+ * above will be invalid) */
+ assert(modulo_shf<=0 || is_po2(modulo_shf));
+
+ tv1 = tarval_and(tv1, modulo_mask);
+ tv2 = tarval_and(tv2, modulo_mask);
+ }
+ res = tarval_add(tv1, tv2);
+ irg = get_irn_irg(n);
+
+ /* beware: a simple replacement works only, if res < modulo shift */
+ if (is_Rotl(n)) {
+ int bits = get_mode_size_bits(mode);
+ ir_tarval *modulo = new_tarval_from_long(bits, count_mode);
+ res = tarval_mod(res, modulo);
+ } else {
+ long bits = get_mode_size_bits(mode);
+ ir_tarval *mode_size = new_tarval_from_long(bits, count_mode);
- return new_r_Const(irg, get_mode_null(mode));
+ /* shifting too much */
+ if (!(tarval_cmp(res, mode_size) & ir_relation_less)) {
+ if (is_Shrs(n)) {
+ ir_node *block = get_nodes_block(n);
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_mode *smode = get_irn_mode(right);
+ ir_node *cnst = new_r_Const_long(irg, smode, get_mode_size_bits(mode) - 1);
+ return new_rd_Shrs(dbgi, block, get_binop_left(left), cnst, mode);
}
+
+ return new_r_Const(irg, get_mode_null(mode));
}
- } else {
- res = tarval_mod(res, new_tarval_from_long(get_mode_size_bits(mode), get_tarval_mode(res)));
}
/* ok, we can replace it */
+ assert(modulo_shf >= (int) get_mode_size_bits(mode));
block = get_nodes_block(n);
in[0] = get_binop_left(left);
DBG_OPT_ALGSIM0(n, irn, FS_OPT_REASSOC_SHIFT);
return transform_node(irn);
-} /* transform_node_shift */
+}
/**
- * normalisation: (x & c1) >> c2 to (x >> c2) & (c1 >> c2)
- * (we can use:
- * - and, or, xor instead of &
- * - Shl, Shr, Shrs, rotl instead of >>
- * (with a special case for Or/Xor + Shrs)
- */
-static ir_node *transform_node_bitop_shift(ir_node *n)
-{
- ir_node *left;
- ir_node *right = get_binop_right(n);
- ir_mode *mode = get_irn_mode(n);
- ir_node *bitop_left;
- ir_node *bitop_right;
- ir_op *op_left;
- ir_node *block;
- dbg_info *dbgi;
- ir_graph *irg;
- ir_node *new_shift;
- ir_node *new_bitop;
- ir_node *new_const;
- ir_tarval *tv1;
- ir_tarval *tv2;
- ir_tarval *tv_shift;
-
- assert(is_Shrs(n) || is_Shr(n) || is_Shl(n) || is_Rotl(n));
-
- if (!is_Const(right))
- return n;
-
- left = get_binop_left(n);
- op_left = get_irn_op(left);
- if (op_left != op_And && op_left != op_Or && op_left != op_Eor)
- return n;
-
- /* doing it with Shrs is not legal if the Or/Eor affects the topmost bit */
- if (is_Shrs(n) && (op_left == op_Or || op_left == op_Eor)) {
- /* TODO: test if sign bit is affectes */
- return n;
- }
-
- bitop_right = get_binop_right(left);
- if (!is_Const(bitop_right))
- return n;
-
- bitop_left = get_binop_left(left);
-
- block = get_nodes_block(n);
- dbgi = get_irn_dbg_info(n);
- tv1 = get_Const_tarval(bitop_right);
- tv2 = get_Const_tarval(right);
-
- assert(get_tarval_mode(tv1) == mode);
-
- if (is_Shl(n)) {
- new_shift = new_rd_Shl(dbgi, block, bitop_left, right, mode);
- tv_shift = tarval_shl(tv1, tv2);
- } else if (is_Shr(n)) {
- new_shift = new_rd_Shr(dbgi, block, bitop_left, right, mode);
- tv_shift = tarval_shr(tv1, tv2);
- } else if (is_Shrs(n)) {
- new_shift = new_rd_Shrs(dbgi, block, bitop_left, right, mode);
- tv_shift = tarval_shrs(tv1, tv2);
- } else {
- assert(is_Rotl(n));
- new_shift = new_rd_Rotl(dbgi, block, bitop_left, right, mode);
- tv_shift = tarval_rotl(tv1, tv2);
- }
-
- assert(get_tarval_mode(tv_shift) == mode);
- irg = get_irn_irg(n);
- new_const = new_r_Const(irg, tv_shift);
-
- if (op_left == op_And) {
- new_bitop = new_rd_And(dbgi, block, new_shift, new_const, mode);
- } else if (op_left == op_Or) {
- new_bitop = new_rd_Or(dbgi, block, new_shift, new_const, mode);
- } else {
- assert(op_left == op_Eor);
- new_bitop = new_rd_Eor(dbgi, block, new_shift, new_const, mode);
- }
-
- return new_bitop;
-}
-
-/**
- * normalisation:
- * (x << c1) >> c2 <=> x OP (c2-c1) & ((-1 << c1) >> c2)
- * also:
- * (x >> c1) << c2 <=> x OP (c2-c1) & ((-1 >> c1) << c2)
- * (also with x >>s c1 when c1>=c2)
+ * normalisation:
+ * (x << c1) >> c2 <=> x OP (c2-c1) & ((-1 << c1) >> c2)
+ * also:
+ * (x >> c1) << c2 <=> x OP (c2-c1) & ((-1 >> c1) << c2)
+ * (also with x >>s c1 when c1>=c2)
*/
static ir_node *transform_node_shl_shr(ir_node *n)
{
* then we can use that to minimize the value of Add(x, const) or
* Sub(Const, x). In particular this often avoids 1 instruction in some
* backends for the Shift(x, Sub(Const, y)) case because it can be replaced
- * by Shift(x, Minus(y)) which doesnt't need an explicit Const constructed.
+ * by Shift(x, Minus(y)) which does not need an explicit Const constructed.
*/
static ir_node *transform_node_shift_modulo(ir_node *n,
new_shift_func new_shift)
if (is_Shr(n))
n = transform_node_shl_shr(n);
if (is_Shr(n))
- n = transform_node_bitop_shift(n);
+ n = transform_node_shift_bitop(n);
return n;
} /* transform_node_Shr */
ir_node *b = get_Shrs_right(n);
ir_mode *mode = get_irn_mode(n);
+ if (is_oversize_shift(n)) {
+ ir_node *block = get_nodes_block(n);
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_mode *cmode = get_irn_mode(b);
+ long val = get_mode_size_bits(cmode)-1;
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *cnst = new_r_Const_long(irg, cmode, val);
+ return new_rd_Shrs(dbgi, block, a, cnst, mode);
+ }
+
HANDLE_BINOP_PHI((eval_func) tarval_shrs, a, b, c, mode);
n = transform_node_shift(n);
if (is_Shrs(n))
n = transform_node_shift_modulo(n, new_rd_Shrs);
if (is_Shrs(n))
- n = transform_node_bitop_shift(n);
+ n = transform_node_shift_bitop(n);
return n;
} /* transform_node_Shrs */
if (is_Shl(n))
n = transform_node_shl_shr(n);
if (is_Shl(n))
- n = transform_node_bitop_shift(n);
+ n = transform_node_shift_bitop(n);
return n;
} /* transform_node_Shl */
n = transform_node_shift(n);
if (is_Rotl(n))
- n = transform_node_bitop_shift(n);
+ n = transform_node_shift_bitop(n);
return n;
} /* transform_node_Rotl */
for (i = j = 0; i < n_keepalives; ++i) {
ir_node *ka = get_End_keepalive(n, i);
- if (is_Block(ka)) {
- if (! is_Block_dead(ka)) {
- in[j++] = ka;
- }
- continue;
- } else if (is_irn_pinned_in_irg(ka) && is_Block_dead(get_nodes_block(ka))) {
+ ir_node *block;
+ /* no need to keep Bad */
+ if (is_Bad(ka))
continue;
- } else if (is_Bad(ka)) {
- /* no need to keep Bad */
+ /* do not keep unreachable code */
+ block = is_Block(ka) ? ka : get_nodes_block(ka);
+ if (is_block_unreachable(block))
continue;
- }
in[j++] = ka;
}
if (j != n_keepalives)
return false;
}
+static const ir_node *skip_upconv(const ir_node *node)
+{
+ while (is_Conv(node)) {
+ ir_mode *mode = get_irn_mode(node);
+ const ir_node *op = get_Conv_op(node);
+ ir_mode *op_mode = get_irn_mode(op);
+ if (!smaller_mode(op_mode, mode))
+ break;
+ node = op;
+ }
+ return node;
+}
+
+int ir_mux_is_abs(const ir_node *sel, const ir_node *mux_true,
+ const ir_node *mux_false)
+{
+ ir_node *cmp_left;
+ ir_node *cmp_right;
+ ir_mode *mode;
+ ir_relation relation;
+
+ if (!is_Cmp(sel))
+ return 0;
+
+ /**
+ * Note further that these optimization work even for floating point
+ * with NaN's because -NaN == NaN.
+ * However, if +0 and -0 is handled differently, we cannot use the Abs/-Abs
+ * transformations.
+ */
+ mode = get_irn_mode(mux_true);
+ if (mode_honor_signed_zeros(mode))
+ return 0;
+
+ /* must be <, <=, >=, > */
+ relation = get_Cmp_relation(sel);
+ if ((relation & ir_relation_less_greater) == 0)
+ return 0;
+
+ if (!ir_is_negated_value(mux_true, mux_false))
+ return 0;
+
+ mux_true = skip_upconv(mux_true);
+ mux_false = skip_upconv(mux_false);
+
+ /* must be x cmp 0 */
+ cmp_right = get_Cmp_right(sel);
+ if (!is_Const(cmp_right) || !is_Const_null(cmp_right))
+ return 0;
+
+ cmp_left = get_Cmp_left(sel);
+ if (cmp_left == mux_false) {
+ if (relation & ir_relation_less) {
+ return 1;
+ } else {
+ assert(relation & ir_relation_greater);
+ return -1;
+ }
+ } else if (cmp_left == mux_true) {
+ if (relation & ir_relation_less) {
+ return -1;
+ } else {
+ assert(relation & ir_relation_greater);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+ir_node *ir_get_abs_op(const ir_node *sel, ir_node *mux_true,
+ ir_node *mux_false)
+{
+ ir_node *cmp_left = get_Cmp_left(sel);
+ return cmp_left == skip_upconv(mux_false) ? mux_false : mux_true;
+}
+
/**
* Optimize a Mux into some simpler cases.
*/
static ir_node *transform_node_Mux(ir_node *n)
{
- ir_node *oldn = n, *sel = get_Mux_sel(n);
- ir_mode *mode = get_irn_mode(n);
- ir_node *t = get_Mux_true(n);
- ir_node *f = get_Mux_false(n);
- ir_graph *irg = get_irn_irg(n);
+ ir_node *oldn = n;
+ ir_node *sel = get_Mux_sel(n);
+ ir_mode *mode = get_irn_mode(n);
+ ir_node *t = get_Mux_true(n);
+ ir_node *f = get_Mux_false(n);
+ ir_graph *irg = get_irn_irg(n);
- if (is_irg_state(irg, IR_GRAPH_STATE_KEEP_MUX))
- return n;
-
- if (is_Mux(t)) {
- ir_node* block = get_nodes_block(n);
- ir_node* c0 = sel;
- ir_node* c1 = get_Mux_sel(t);
- ir_node* t1 = get_Mux_true(t);
- ir_node* f1 = get_Mux_false(t);
- if (f == f1) {
- /* Mux(cond0, Mux(cond1, x, y), y) -> typical if (cond0 && cond1) x else y */
- ir_node* and_ = new_r_And(block, c0, c1, mode_b);
- ir_node* new_mux = new_r_Mux(block, and_, f1, t1, mode);
- n = new_mux;
- sel = and_;
- f = f1;
- t = t1;
- DBG_OPT_ALGSIM0(oldn, t, FS_OPT_MUX_COMBINE);
- } else if (f == t1) {
- /* Mux(cond0, Mux(cond1, x, y), x) */
- ir_node* not_c1 = new_r_Not(block, c1, mode_b);
- ir_node* and_ = new_r_And(block, c0, not_c1, mode_b);
- ir_node* new_mux = new_r_Mux(block, and_, t1, f1, mode);
- n = new_mux;
- sel = and_;
- f = t1;
- t = f1;
- DBG_OPT_ALGSIM0(oldn, t, FS_OPT_MUX_COMBINE);
- }
- } else if (is_Mux(f)) {
- ir_node* block = get_nodes_block(n);
- ir_node* c0 = sel;
- ir_node* c1 = get_Mux_sel(f);
- ir_node* t1 = get_Mux_true(f);
- ir_node* f1 = get_Mux_false(f);
- if (t == t1) {
- /* Mux(cond0, x, Mux(cond1, x, y)) -> typical if (cond0 || cond1) x else y */
- ir_node* or_ = new_r_Or(block, c0, c1, mode_b);
- ir_node* new_mux = new_r_Mux(block, or_, f1, t1, mode);
- n = new_mux;
- sel = or_;
- f = f1;
- t = t1;
- DBG_OPT_ALGSIM0(oldn, f, FS_OPT_MUX_COMBINE);
- } else if (t == f1) {
- /* Mux(cond0, x, Mux(cond1, y, x)) */
- ir_node* not_c1 = new_r_Not(block, c1, mode_b);
- ir_node* or_ = new_r_Or(block, c0, not_c1, mode_b);
- ir_node* new_mux = new_r_Mux(block, or_, t1, f1, mode);
- n = new_mux;
- sel = or_;
- f = t1;
- t = f1;
- DBG_OPT_ALGSIM0(oldn, f, FS_OPT_MUX_COMBINE);
+ /* implement integer abs: abs(x) = x^(x >>s 31) - (x >>s 31) */
+ if (get_mode_arithmetic(mode) == irma_twos_complement) {
+ int abs = ir_mux_is_abs(sel, t, f);
+ if (abs != 0) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_node *op = ir_get_abs_op(sel, t, f);
+ int bits = get_mode_size_bits(mode);
+ ir_node *shiftconst = new_r_Const_long(irg, mode_Iu, bits-1);
+ ir_node *sext = new_rd_Shrs(dbgi, block, op, shiftconst, mode);
+ ir_node *xorn = new_rd_Eor(dbgi, block, op, sext, mode);
+ ir_node *res;
+ if (abs > 0) {
+ res = new_rd_Sub(dbgi, block, xorn, sext, mode);
+ } else {
+ res = new_rd_Sub(dbgi, block, sext, xorn, mode);
+ }
+ return res;
}
}
n = new_rd_Mux(get_irn_dbg_info(n), get_nodes_block(n), sel, f, t, mode);
}
- /* note: after normalization, false can only happen on default */
- if (mode == mode_b) {
- dbg_info *dbg = get_irn_dbg_info(n);
- ir_node *block = get_nodes_block(n);
+ /* the following optimisations create new mode_b nodes, so only do them
+ * before mode_b lowering */
+ if (!is_irg_state(irg, IR_GRAPH_STATE_MODEB_LOWERED)) {
+ if (is_Mux(t)) {
+ ir_node* block = get_nodes_block(n);
+ ir_node* c0 = sel;
+ ir_node* c1 = get_Mux_sel(t);
+ ir_node* t1 = get_Mux_true(t);
+ ir_node* f1 = get_Mux_false(t);
+ if (f == f1) {
+ /* Mux(cond0, Mux(cond1, x, y), y) => Mux(cond0 && cond1, x, y) */
+ ir_node* and_ = new_r_And(block, c0, c1, mode_b);
+ ir_node* new_mux = new_r_Mux(block, and_, f1, t1, mode);
+ n = new_mux;
+ sel = and_;
+ f = f1;
+ t = t1;
+ DBG_OPT_ALGSIM0(oldn, t, FS_OPT_MUX_COMBINE);
+ } else if (f == t1) {
+ /* Mux(cond0, Mux(cond1, x, y), x) */
+ ir_node* not_c1 = new_r_Not(block, c1, mode_b);
+ ir_node* and_ = new_r_And(block, c0, not_c1, mode_b);
+ ir_node* new_mux = new_r_Mux(block, and_, t1, f1, mode);
+ n = new_mux;
+ sel = and_;
+ f = t1;
+ t = f1;
+ DBG_OPT_ALGSIM0(oldn, t, FS_OPT_MUX_COMBINE);
+ }
+ } else if (is_Mux(f)) {
+ ir_node* block = get_nodes_block(n);
+ ir_node* c0 = sel;
+ ir_node* c1 = get_Mux_sel(f);
+ ir_node* t1 = get_Mux_true(f);
+ ir_node* f1 = get_Mux_false(f);
+ if (t == t1) {
+ /* Mux(cond0, x, Mux(cond1, x, y)) -> typical if (cond0 || cond1) x else y */
+ ir_node* or_ = new_r_Or(block, c0, c1, mode_b);
+ ir_node* new_mux = new_r_Mux(block, or_, f1, t1, mode);
+ n = new_mux;
+ sel = or_;
+ f = f1;
+ t = t1;
+ DBG_OPT_ALGSIM0(oldn, f, FS_OPT_MUX_COMBINE);
+ } else if (t == f1) {
+ /* Mux(cond0, x, Mux(cond1, y, x)) */
+ ir_node* not_c1 = new_r_Not(block, c1, mode_b);
+ ir_node* or_ = new_r_Or(block, c0, not_c1, mode_b);
+ ir_node* new_mux = new_r_Mux(block, or_, t1, f1, mode);
+ n = new_mux;
+ sel = or_;
+ f = t1;
+ t = f1;
+ DBG_OPT_ALGSIM0(oldn, f, FS_OPT_MUX_COMBINE);
+ }
+ }
- if (is_Const(t)) {
- ir_tarval *tv_t = get_Const_tarval(t);
- if (tv_t == tarval_b_true) {
- if (is_Const(f)) {
- /* Muxb(sel, true, false) = sel */
- assert(get_Const_tarval(f) == tarval_b_false);
- DBG_OPT_ALGSIM0(oldn, sel, FS_OPT_MUX_BOOL);
- return sel;
+ /* note: after normalization, false can only happen on default */
+ if (mode == mode_b) {
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+
+ if (is_Const(t)) {
+ ir_tarval *tv_t = get_Const_tarval(t);
+ if (tv_t == tarval_b_true) {
+ if (is_Const(f)) {
+ /* Muxb(sel, true, false) = sel */
+ assert(get_Const_tarval(f) == tarval_b_false);
+ DBG_OPT_ALGSIM0(oldn, sel, FS_OPT_MUX_BOOL);
+ return sel;
+ } else {
+ /* Muxb(sel, true, x) = Or(sel, x) */
+ n = new_rd_Or(dbg, block, sel, f, mode_b);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_OR_BOOL);
+ return n;
+ }
+ }
+ } else if (is_Const(f)) {
+ ir_tarval *tv_f = get_Const_tarval(f);
+ if (tv_f == tarval_b_true) {
+ /* Muxb(sel, x, true) = Or(Not(sel), x) */
+ ir_node* not_sel = new_rd_Not(dbg, block, sel, mode_b);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_ORNOT_BOOL);
+ n = new_rd_Or(dbg, block, not_sel, t, mode_b);
+ return n;
} else {
- /* Muxb(sel, true, x) = Or(sel, x) */
- n = new_rd_Or(dbg, block, sel, f, mode_b);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_OR_BOOL);
+ /* Muxb(sel, x, false) = And(sel, x) */
+ assert(tv_f == tarval_b_false);
+ n = new_rd_And(dbg, block, sel, t, mode_b);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_AND_BOOL);
return n;
}
}
- } else if (is_Const(f)) {
- ir_tarval *tv_f = get_Const_tarval(f);
- if (tv_f == tarval_b_true) {
- /* Muxb(sel, x, true) = Or(Not(sel), x) */
- ir_node* not_sel = new_rd_Not(dbg, block, sel, mode_b);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_ORNOT_BOOL);
- n = new_rd_Or(dbg, block, not_sel, t, mode_b);
+ }
+
+ /* more normalization: Mux(sel, 0, 1) is simply a conv from the mode_b
+ * value to integer. */
+ if (is_Const(t) && is_Const(f) && mode_is_int(mode)) {
+ ir_tarval *a = get_Const_tarval(t);
+ ir_tarval *b = get_Const_tarval(f);
+
+ if (tarval_is_one(a) && tarval_is_null(b)) {
+ ir_node *block = get_nodes_block(n);
+ ir_node *conv = new_r_Conv(block, sel, mode);
+ n = conv;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_CONV);
return n;
- } else {
- /* Muxb(sel, x, false) = And(sel, x) */
- assert(tv_f == tarval_b_false);
- n = new_rd_And(dbg, block, sel, t, mode_b);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_AND_BOOL);
+ } else if (tarval_is_null(a) && tarval_is_one(b)) {
+ ir_node *block = get_nodes_block(n);
+ ir_node *not_ = new_r_Not(block, sel, mode_b);
+ ir_node *conv = new_r_Conv(block, not_, mode);
+ n = conv;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_CONV);
return n;
}
}
}
- /* more normalization: Mux(sel, 0, 1) is simply a conv from the mode_b
- * value to integer. */
- if (is_Const(t) && is_Const(f) && mode_is_int(mode)) {
- ir_tarval *a = get_Const_tarval(t);
- ir_tarval *b = get_Const_tarval(f);
+ if (is_Cmp(sel) && mode_is_int(mode) && is_cmp_equality_zero(sel)) {
+ ir_relation relation = get_Cmp_relation(sel);
+ ir_node *cmp_r = get_Cmp_right(sel);
+ ir_node *cmp_l = get_Cmp_left(sel);
+ ir_node *block = get_nodes_block(n);
- if (tarval_is_one(a) && tarval_is_null(b)) {
- ir_node *block = get_nodes_block(n);
- ir_node *conv = new_r_Conv(block, sel, mode);
- n = conv;
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_CONV);
- return n;
- } else if (tarval_is_null(a) && tarval_is_one(b)) {
- ir_node *block = get_nodes_block(n);
- ir_node *not_ = new_r_Not(block, sel, mode_b);
- ir_node *conv = new_r_Conv(block, not_, mode);
- n = conv;
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_CONV);
- return n;
- }
- }
+ if (is_And(cmp_l) && f == cmp_r) {
+ ir_node *and_r = get_And_right(cmp_l);
+ ir_node *and_l;
- if (is_Cmp(sel)) {
- ir_node *cmp_r = get_Cmp_right(sel);
- if (is_Const(cmp_r) && is_Const_null(cmp_r)) {
- ir_node *block = get_nodes_block(n);
- ir_node *cmp_l = get_Cmp_left(sel);
-
- if (mode_is_int(mode)) {
- ir_relation relation = get_Cmp_relation(sel);
- /* integer only */
- if ((relation == ir_relation_less_greater || relation == ir_relation_equal) && is_And(cmp_l)) {
- /* Mux((a & b) != 0, c, 0) */
- ir_node *and_r = get_And_right(cmp_l);
- ir_node *and_l;
-
- if (and_r == t && f == cmp_r) {
- if (is_Const(t) && tarval_is_single_bit(get_Const_tarval(t))) {
- if (relation == ir_relation_less_greater) {
- /* Mux((a & 2^C) != 0, 2^C, 0) */
- n = cmp_l;
- DBG_OPT_ALGSIM1(oldn, sel, sel, n, FS_OPT_MUX_TO_BITOP);
- } else {
- /* Mux((a & 2^C) == 0, 2^C, 0) */
- n = new_rd_Eor(get_irn_dbg_info(n),
- block, cmp_l, t, mode);
- DBG_OPT_ALGSIM1(oldn, sel, sel, n, FS_OPT_MUX_TO_BITOP);
- }
- return n;
- }
- }
- if (is_Shl(and_r)) {
- ir_node *shl_l = get_Shl_left(and_r);
- if (is_Const(shl_l) && is_Const_one(shl_l)) {
- if (and_r == t && f == cmp_r) {
- if (relation == ir_relation_less_greater) {
- /* (a & (1 << n)) != 0, (1 << n), 0) */
- n = cmp_l;
- DBG_OPT_ALGSIM1(oldn, sel, sel, n, FS_OPT_MUX_TO_BITOP);
- } else {
- /* (a & (1 << n)) == 0, (1 << n), 0) */
- n = new_rd_Eor(get_irn_dbg_info(n),
- block, cmp_l, t, mode);
- DBG_OPT_ALGSIM1(oldn, sel, sel, n, FS_OPT_MUX_TO_BITOP);
- }
- return n;
- }
- }
- }
- and_l = get_And_left(cmp_l);
- if (is_Shl(and_l)) {
- ir_node *shl_l = get_Shl_left(and_l);
- if (is_Const(shl_l) && is_Const_one(shl_l)) {
- if (and_l == t && f == cmp_r) {
- if (relation == ir_relation_less_greater) {
- /* ((1 << n) & a) != 0, (1 << n), 0) */
- n = cmp_l;
- DBG_OPT_ALGSIM1(oldn, sel, sel, n, FS_OPT_MUX_TO_BITOP);
- } else {
- /* ((1 << n) & a) == 0, (1 << n), 0) */
- n = new_rd_Eor(get_irn_dbg_info(n),
- block, cmp_l, t, mode);
- DBG_OPT_ALGSIM1(oldn, sel, sel, n, FS_OPT_MUX_TO_BITOP);
- }
- return n;
- }
- }
- }
+ if (and_r == t && is_single_bit(and_r)) {
+ if (relation == ir_relation_equal) {
+ /* Mux((a & (1<<n)) == 0, (1<<n), 0) == (a&(1<<n)) xor ((1<<n)) */
+ n = new_rd_Eor(get_irn_dbg_info(n),
+ block, cmp_l, t, mode);
+ DBG_OPT_ALGSIM1(oldn, sel, sel, n, FS_OPT_MUX_TO_BITOP);
+ } else {
+ /* Mux((a & (1<<n)) != 0, (1<<n), 0) == a & (1<<n) */
+ n = cmp_l;
+ DBG_OPT_ALGSIM1(oldn, sel, sel, n, FS_OPT_MUX_TO_BITOP);
}
+ return n;
+ }
+ and_l = get_And_left(cmp_l);
+ if (and_l == t && is_single_bit(and_l)) {
+ if (relation == ir_relation_equal) {
+ /* ((1 << n) & a) == 0, (1 << n), 0) */
+ n = new_rd_Eor(get_irn_dbg_info(n),
+ block, cmp_l, t, mode);
+ DBG_OPT_ALGSIM1(oldn, sel, sel, n, FS_OPT_MUX_TO_BITOP);
+ } else {
+ /* ((1 << n) & a) != 0, (1 << n), 0) */
+ n = cmp_l;
+ DBG_OPT_ALGSIM1(oldn, sel, sel, n, FS_OPT_MUX_TO_BITOP);
+ }
+ return n;
}
}
}
int pred_arity;
int j;
+ /* Remove Bad predecessors */
+ if (is_Bad(pred)) {
+ del_Sync_n(n, i);
+ --arity;
+ continue;
+ }
+
+ /* Remove duplicate predecessors */
+ for (j = 0; j < i; ++j) {
+ if (get_Sync_pred(n, j) == pred) {
+ del_Sync_n(n, i);
+ --arity;
+ break;
+ }
+ }
+ if (j < i)
+ continue;
+
if (!is_Sync(pred)) {
++i;
continue;
}
}
+ if (arity == 0) {
+ ir_graph *irg = get_irn_irg(n);
+ return new_r_Bad(irg, mode_M);
+ }
+ if (arity == 1) {
+ return get_Sync_pred(n, 0);
+ }
+
/* rehash the sync node */
add_identities(n);
+ return n;
+}
+
+static ir_node *transform_node_Load(ir_node *n)
+{
+ /* if our memory predecessor is a load from the same address, then reuse the
+ * previous result */
+ ir_node *mem = get_Load_mem(n);
+ ir_node *mem_pred;
+
+ if (!is_Proj(mem))
+ return n;
+ /* don't touch volatile loads */
+ if (get_Load_volatility(n) == volatility_is_volatile)
+ return n;
+ mem_pred = get_Proj_pred(mem);
+ if (is_Load(mem_pred)) {
+ ir_node *pred_load = mem_pred;
+
+ /* conservatively compare the 2 loads. TODO: This could be less strict
+ * with fixup code in some situations (like smaller/bigger modes) */
+ if (get_Load_ptr(pred_load) != get_Load_ptr(n))
+ return n;
+ if (get_Load_mode(pred_load) != get_Load_mode(n))
+ return n;
+ /* all combinations of aligned/unaligned pred/n should be fine so we do
+ * not compare the unaligned attribute */
+ {
+ ir_node *block = get_nodes_block(n);
+ ir_node *jmp = new_r_Jmp(block);
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *bad = new_r_Bad(irg, mode_X);
+ ir_mode *mode = get_Load_mode(n);
+ ir_node *res = new_r_Proj(pred_load, mode, pn_Load_res);
+ ir_node *in[] = { mem, res, jmp, bad };
+ ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
+ return tuple;
+ }
+ } else if (is_Store(mem_pred)) {
+ ir_node *pred_store = mem_pred;
+ ir_node *value = get_Store_value(pred_store);
+
+ if (get_Store_ptr(pred_store) != get_Load_ptr(n))
+ return n;
+ if (get_irn_mode(value) != get_Load_mode(n))
+ return n;
+ /* all combinations of aligned/unaligned pred/n should be fine so we do
+ * not compare the unaligned attribute */
+ {
+ ir_node *block = get_nodes_block(n);
+ ir_node *jmp = new_r_Jmp(block);
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *bad = new_r_Bad(irg, mode_X);
+ ir_node *res = value;
+ ir_node *in[] = { mem, res, jmp, bad };
+ ir_node *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
+ return tuple;
+ }
+ }
return n;
-} /* transform_node_Sync */
+}
/**
* optimize a trampoline Call into a direct Call
}
var = get_method_variadicity(mtp);
set_method_variadicity(ctp, var);
- if (var == variadicity_variadic) {
- set_method_first_variadic_param_index(ctp, get_method_first_variadic_param_index(mtp) + 1);
- }
/* When we resolve a trampoline, the function must be called by a this-call */
set_method_calling_convention(ctp, get_method_calling_convention(mtp) | cc_this_call);
set_method_additional_properties(ctp, get_method_additional_properties(mtp));
switch (code) {
CASE(Add);
CASE(And);
+ CASE(Block);
CASE(Call);
CASE(Cmp);
CASE(Conv);
CASE(Sync);
CASE_PROJ(Bound);
CASE_PROJ(CopyB);
- CASE_PROJ(Load);
CASE_PROJ(Store);
CASE_PROJ_EX(Cond);
CASE_PROJ_EX(Div);
+ CASE_PROJ_EX(Load);
CASE_PROJ_EX(Mod);
default:
- /* leave NULL */;
+ break;
}
return ops;
in a graph. */
#define N_IR_NODES 512
+/** Compares two exception attributes */
+static int node_cmp_exception(const ir_node *a, const ir_node *b)
+{
+ const except_attr *ea = &a->attr.except;
+ const except_attr *eb = &b->attr.except;
+ return ea->pin_state != eb->pin_state;
+}
+
/** Compares the attributes of two Const nodes. */
static int node_cmp_attr_Const(const ir_node *a, const ir_node *b)
{
{
const alloc_attr *pa = &a->attr.alloc;
const alloc_attr *pb = &b->attr.alloc;
- return (pa->where != pb->where) || (pa->type != pb->type);
+ if (pa->where != pb->where || pa->type != pb->type)
+ return 1;
+ return node_cmp_exception(a, b);
}
/** Compares the attributes of two Free nodes. */
{
const call_attr *pa = &a->attr.call;
const call_attr *pb = &b->attr.call;
- return (pa->type != pb->type)
- || (pa->tail_call != pb->tail_call);
+ if (pa->type != pb->type || pa->tail_call != pb->tail_call)
+ return 1;
+ return node_cmp_exception(a, b);
}
/** Compares the attributes of two Sel nodes. */
/* NEVER do CSE on volatile Loads */
return 1;
/* do not CSE Loads with different alignment. Be conservative. */
- if (get_Load_align(a) != get_Load_align(b))
+ if (get_Load_unaligned(a) != get_Load_unaligned(b))
return 1;
-
- return get_Load_mode(a) != get_Load_mode(b);
+ if (get_Load_mode(a) != get_Load_mode(b))
+ return 1;
+ return node_cmp_exception(a, b);
}
/** Compares the attributes of two Store nodes. */
static int node_cmp_attr_Store(const ir_node *a, const ir_node *b)
{
/* do not CSE Stores with different alignment. Be conservative. */
- if (get_Store_align(a) != get_Store_align(b))
+ if (get_Store_unaligned(a) != get_Store_unaligned(b))
return 1;
-
/* NEVER do CSE on volatile Stores */
- return (get_Store_volatility(a) == volatility_is_volatile ||
- get_Store_volatility(b) == volatility_is_volatile);
+ if (get_Store_volatility(a) == volatility_is_volatile ||
+ get_Store_volatility(b) == volatility_is_volatile)
+ return 1;
+ return node_cmp_exception(a, b);
}
-/** Compares two exception attributes */
-static int node_cmp_exception(const ir_node *a, const ir_node *b)
+static int node_cmp_attr_CopyB(const ir_node *a, const ir_node *b)
{
- const except_attr *ea = &a->attr.except;
- const except_attr *eb = &b->attr.except;
+ if (get_CopyB_type(a) != get_CopyB_type(b))
+ return 1;
- return ea->pin_state != eb->pin_state;
+ return node_cmp_exception(a, b);
}
-#define node_cmp_attr_Bound node_cmp_exception
+static int node_cmp_attr_Bound(const ir_node *a, const ir_node *b)
+{
+ return node_cmp_exception(a, b);
+}
/** Compares the attributes of two Div nodes. */
static int node_cmp_attr_Div(const ir_node *a, const ir_node *b)
{
const div_attr *ma = &a->attr.div;
const div_attr *mb = &b->attr.div;
- return ma->exc.pin_state != mb->exc.pin_state ||
- ma->resmode != mb->resmode ||
- ma->no_remainder != mb->no_remainder;
+ if (ma->resmode != mb->resmode || ma->no_remainder != mb->no_remainder)
+ return 1;
+ return node_cmp_exception(a, b);
}
/** Compares the attributes of two Mod nodes. */
{
const mod_attr *ma = &a->attr.mod;
const mod_attr *mb = &b->attr.mod;
- return ma->exc.pin_state != mb->exc.pin_state ||
- ma->resmode != mb->resmode;
+ if (ma->resmode != mb->resmode)
+ return 1;
+ return node_cmp_exception(a, b);
}
static int node_cmp_attr_Cmp(const ir_node *a, const ir_node *b)
/** Compares the attributes of two Builtin nodes. */
static int node_cmp_attr_Builtin(const ir_node *a, const ir_node *b)
{
- /* no need to compare the type, equal kind means equal type */
- return get_Builtin_kind(a) != get_Builtin_kind(b);
+ if (get_Builtin_kind(a) != get_Builtin_kind(b))
+ return 1;
+ if (get_Builtin_type(a) != get_Builtin_type(b))
+ return 1;
+ return node_cmp_exception(a, b);
}
/** Compares the attributes of two ASM nodes. */
/* Should we really check the constraints here? Should be better, but is strange. */
n = get_ASM_n_input_constraints(a);
if (n != get_ASM_n_input_constraints(b))
- return 0;
+ return 1;
ca = get_ASM_input_constraints(a);
cb = get_ASM_input_constraints(b);
for (i = 0; i < n; ++i) {
- if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint)
+ if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint
+ || ca[i].mode != cb[i].mode)
return 1;
}
n = get_ASM_n_output_constraints(a);
if (n != get_ASM_n_output_constraints(b))
- return 0;
+ return 1;
ca = get_ASM_output_constraints(a);
cb = get_ASM_output_constraints(b);
for (i = 0; i < n; ++i) {
- if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint)
+ if (ca[i].pos != cb[i].pos || ca[i].constraint != cb[i].constraint
+ || ca[i].mode != cb[i].mode)
return 1;
}
n = get_ASM_n_clobbers(a);
if (n != get_ASM_n_clobbers(b))
- return 0;
+ return 1;
cla = get_ASM_clobbers(a);
clb = get_ASM_clobbers(b);
if (cla[i] != clb[i])
return 1;
}
- return 0;
+
+ return node_cmp_exception(a, b);
}
/** Compares the inexistent attributes of two Dummy nodes. */
{
(void) a;
(void) b;
+ /* Dummy nodes never equal by definition */
return 1;
}
+static int node_cmp_attr_InstOf(const ir_node *a, const ir_node *b)
+{
+ if (get_InstOf_type(a) != get_InstOf_type(b))
+ return 1;
+ return node_cmp_exception(a, b);
+}
+
/**
* Set the default node attribute compare operation for an ir_op_ops.
*
break
switch (code) {
- CASE(Const);
- CASE(Proj);
+ CASE(ASM);
CASE(Alloc);
- CASE(Free);
- CASE(SymConst);
+ CASE(Bound);
+ CASE(Builtin);
CASE(Call);
- CASE(Sel);
- CASE(Phi);
- CASE(Cmp);
- CASE(Conv);
CASE(Cast);
- CASE(Load);
- CASE(Store);
+ CASE(Cmp);
CASE(Confirm);
- CASE(ASM);
+ CASE(Const);
+ CASE(Conv);
+ CASE(CopyB);
CASE(Div);
- CASE(Mod);
- CASE(Bound);
- CASE(Builtin);
CASE(Dummy);
- /* FIXME CopyB */
+ CASE(Free);
+ CASE(InstOf);
+ CASE(Load);
+ CASE(Mod);
+ CASE(Phi);
+ CASE(Proj);
+ CASE(Sel);
+ CASE(Store);
+ CASE(SymConst);
default:
/* leave NULL */
break;
/* for pinned nodes, the block inputs must be equal */
if (get_irn_n(a, -1) != get_irn_n(b, -1))
return 1;
- } else if (! get_opt_global_cse()) {
- /* for block-local CSE both nodes must be in the same Block */
- if (get_nodes_block(a) != get_nodes_block(b))
- return 1;
+ } else {
+ ir_node *block_a = get_nodes_block(a);
+ ir_node *block_b = get_nodes_block(b);
+ if (! get_opt_global_cse()) {
+ /* for block-local CSE both nodes must be in the same Block */
+ if (block_a != block_b)
+ return 1;
+ } else {
+ /* The optimistic approach would be to do nothing here.
+ * However doing GCSE optimistically produces a lot of partially dead code which appears
+ * to be worse in practice than the missed opportunities.
+ * So we use a very conservative variant here and only CSE if 1 value dominates the
+ * other. */
+ if (!block_dominates(block_a, block_b)
+ && !block_dominates(block_b, block_a))
+ return 1;
+ }
}
/* compare a->in[0..ins] with b->in[0..ins] */
if (nn != n) {
/* n is reachable again */
- edges_node_revival(nn, get_irn_irg(nn));
+ edges_node_revival(nn);
}
return nn;
current_ir_graph = rem;
} /* visit_all_identities */
-/**
- * Garbage in, garbage out. If a node has a dead input, i.e., the
- * Bad node is input to the node, return the Bad node.
- */
-static ir_node *gigo(ir_node *node)
-{
- int i, irn_arity;
- ir_op *op = get_irn_op(node);
-
- /* remove garbage blocks by looking at control flow that leaves the block
- and replacing the control flow by Bad. */
- if (get_irn_mode(node) == mode_X) {
- ir_node *block = get_nodes_block(skip_Proj(node));
- ir_graph *irg = get_irn_irg(block);
-
- /* Don't optimize nodes in immature blocks. */
- if (!get_Block_matured(block))
- return node;
- /* Don't optimize End, may have Bads. */
- if (op == op_End) return node;
-
- if (is_Block(block)) {
- if (is_Block_dead(block)) {
- /* control flow from dead block is dead */
- return get_irg_bad(irg);
- }
-
- for (i = get_irn_arity(block) - 1; i >= 0; --i) {
- if (!is_Bad(get_irn_n(block, i)))
- break;
- }
- if (i < 0) {
- ir_graph *irg = get_irn_irg(block);
- /* the start block is never dead */
- if (block != get_irg_start_block(irg)
- && block != get_irg_end_block(irg)) {
- /*
- * Do NOT kill control flow without setting
- * the block to dead of bad things can happen:
- * We get a Block that is not reachable be irg_block_walk()
- * but can be found by irg_walk()!
- */
- set_Block_dead(block);
- return get_irg_bad(irg);
- }
- }
- }
- }
-
- /* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
- blocks predecessors is dead. */
- if (op != op_Block && op != op_Phi && op != op_Tuple && op != op_Anchor) {
- ir_graph *irg = get_irn_irg(node);
- irn_arity = get_irn_arity(node);
-
- /*
- * Beware: we can only read the block of a non-floating node.
- */
- if (is_irn_pinned_in_irg(node) &&
- is_Block_dead(get_nodes_block(skip_Proj(node))))
- return get_irg_bad(irg);
-
- for (i = 0; i < irn_arity; i++) {
- ir_node *pred = get_irn_n(node, i);
-
- if (is_Bad(pred))
- return get_irg_bad(irg);
-#if 0
- /* Propagating Unknowns here seems to be a bad idea, because
- sometimes we need a node as a input and did not want that
- it kills its user.
- However, it might be useful to move this into a later phase
- (if you think that optimizing such code is useful). */
- if (is_Unknown(pred) && mode_is_data(get_irn_mode(node)))
- return new_r_Unknown(irg, get_irn_mode(node));
-#endif
- }
- }
-#if 0
- /* With this code we violate the agreement that local_optimize
- only leaves Bads in Block, Phi and Tuple nodes. */
- /* If Block has only Bads as predecessors it's garbage. */
- /* If Phi has only Bads as predecessors it's garbage. */
- if ((op == op_Block && get_Block_matured(node)) || op == op_Phi) {
- irn_arity = get_irn_arity(node);
- for (i = 0; i < irn_arity; i++) {
- if (!is_Bad(get_irn_n(node, i))) break;
- }
- if (i == irn_arity) node = get_irg_bad(irg);
- }
-#endif
- return node;
-} /* gigo */
-
/**
* These optimizations deallocate nodes from the obstack.
* It can only be called if it is guaranteed that no other nodes
/* Always optimize Phi nodes: part of the construction. */
if ((!get_opt_optimize()) && (iro != iro_Phi)) return n;
- /* Remove nodes with dead (Bad) input.
- Run always for transformation induced Bads. */
- n = gigo(n);
- if (n != oldn) {
- edges_node_deleted(oldn, irg);
-
- /* We found an existing, better node, so we can deallocate the old node. */
- irg_kill_node(irg, oldn);
- return n;
- }
-
/* constant expression evaluation / constant folding */
if (get_opt_constant_folding()) {
/* neither constants nor Tuple values can be evaluated */
memcpy(oldn->in, n->in, ARR_LEN(n->in) * sizeof(n->in[0]));
/* note the inplace edges module */
- edges_node_deleted(n, irg);
+ edges_node_deleted(n);
/* evaluation was successful -- replace the node. */
irg_kill_node(irg, n);
n = identify_cons(n);
if (n != oldn) {
- edges_node_deleted(oldn, irg);
+ edges_node_deleted(oldn);
/* We found an existing, better node, so we can deallocate the old node. */
irg_kill_node(irg, oldn);
if (iro == iro_Deleted)
return n;
- /* Remove nodes with dead (Bad) input.
- Run always for transformation induced Bads. */
- n = gigo(n);
- if (is_Bad(n))
- return n;
-
/* constant expression evaluation / constant folding */
if (get_opt_constant_folding()) {
/* neither constants nor Tuple values can be evaluated */
if (get_opt_global_cse())
set_irg_pinned(irg, op_pin_state_floats);
- if (get_irg_outs_state(irg) == outs_consistent)
- set_irg_outs_inconsistent(irg);
/* FIXME: Maybe we could also test whether optimizing the node can
change the control graph. */
- set_irg_doms_inconsistent(irg);
+ clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE);
return optimize_in_place_2(n);
} /* optimize_in_place */
* @return
* The operations.
*/
-static ir_op_ops *firm_set_default_hash(ir_opcode code, ir_op_ops *ops)
+static ir_op_ops *firm_set_default_hash(unsigned code, ir_op_ops *ops)
{
#define CASE(a) \
case iro_##a: \
/*
* Sets the default operation for an ir_ops.
*/
-ir_op_ops *firm_set_default_operations(ir_opcode code, ir_op_ops *ops)
+ir_op_ops *firm_set_default_operations(unsigned code, ir_op_ops *ops)
{
ops = firm_set_default_hash(code, ops);
ops = firm_set_default_computed_value(code, ops);