ir_tarval *tv;
if (a == b) {
- n = a; /* Or has it's own neutral element */
+ n = a; /* idempotence */
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_OR);
return n;
}
- /* constants are cormalized to right, check this site first */
+ /* constants are normalized to right, check this side first */
tv = value_of(b);
if (tarval_is_null(tv)) {
n = a;
ir_tarval *tv;
if (a == b) {
- n = a; /* And has it's own neutral element */
+ n = a; /* idempotence */
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_AND);
return n;
}
- /* constants are normalized to right, check this site first */
+ /* constants are normalized to right, check this side first */
tv = value_of(b);
if (tarval_is_all_one(tv)) {
n = a;
first_val = get_Phi_pred(n, i);
if ( (first_val != n) /* not self pointer */
#if 0
- /* BEWARE: when the if is changed to 1, Phi's will ignore it's Bad
- * predecessors. Then, Phi nodes in dead code might be removed, causing
- * nodes pointing to themself (Add's for instance).
- * This is really bad and causes endless recursions in several
- * code pathes, so we do NOT optimize such a code.
+ /* BEWARE: when the if is changed to 1, Phis will ignore their Bad
+ * predecessors. Then, Phi nodes in unreachable code might be removed,
+ * causing nodes pointing to themselev (Adds for instance).
+ * This is really bad and causes endless recursion on several
+ * code pathes, so we do NOT optimize such code.
* This is not that bad as it sounds, optimize_cf() removes bad control flow
* (and bad Phi predecessors), so live code is optimized later.
*/
} /* equivalent_node_Proj_Store */
/**
- * Does all optimizations on nodes that must be done on it's Proj's
+ * Does all optimizations on nodes that must be done on its Projs
* because of creating new nodes.
*/
static ir_node *equivalent_node_Proj(ir_node *proj)
return n;
}
+/**
+ * Create a 0 constant of given mode.
+ */
+static ir_node *create_zero_const(ir_graph *irg, ir_mode *mode)
+{
+ ir_tarval *tv = get_mode_null(mode);
+ ir_node *cnst = new_r_Const(irg, tv);
+
+ return cnst;
+}
+
/**
* Transform an And.
*/
ir_mode *mode;
vrp_attr *a_vrp, *b_vrp;
- /* we can combine the relations of two compares with the same operands */
if (is_Cmp(a) && is_Cmp(b)) {
- ir_node *a_left = get_Cmp_left(a);
- ir_node *a_right = get_Cmp_left(a);
- ir_node *b_left = get_Cmp_left(b);
- ir_node *b_right = get_Cmp_right(b);
+ ir_node *a_left = get_Cmp_left(a);
+ ir_node *a_right = get_Cmp_right(a);
+ ir_node *b_left = get_Cmp_left(b);
+ ir_node *b_right = get_Cmp_right(b);
+ ir_relation a_relation = get_Cmp_relation(a);
+ ir_relation b_relation = get_Cmp_relation(b);
+ /* we can combine the relations of two compares with the same
+ * operands */
if (a_left == b_left && b_left == b_right) {
dbg_info *dbgi = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- ir_relation a_relation = get_Cmp_relation(a);
- ir_relation b_relation = get_Cmp_relation(b);
ir_relation new_relation = a_relation & b_relation;
return new_rd_Cmp(dbgi, block, a_left, a_right, new_relation);
}
+ /* Cmp(a==0) and Cmp(b==0) can be optimized to Cmp(a|b==0) */
+ if (is_Const(a_right) && is_Const_null(a_right)
+ && is_Const(b_right) && is_Const_null(b_right)
+ && a_relation == b_relation && a_relation == ir_relation_equal
+ && !mode_is_float(get_irn_mode(a_left))
+ && !mode_is_float(get_irn_mode(b_left))) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_mode *mode = get_irn_mode(a_left);
+ ir_node *n_b_left = get_irn_mode(b_left) != mode ?
+ new_rd_Conv(dbgi, block, b_left, mode) : b_left;
+ ir_node *or = new_rd_Or(dbgi, block, a_left, n_b_left, mode);
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *zero = create_zero_const(irg, mode);
+ return new_rd_Cmp(dbgi, block, or, zero, ir_relation_equal);
+ }
}
mode = get_irn_mode(n);
return false;
}
-/**
- * Create a 0 constant of given mode.
- */
-static ir_node *create_zero_const(ir_graph *irg, ir_mode *mode)
-{
- ir_tarval *tv = get_mode_null(mode);
- ir_node *cnst = new_r_Const(irg, tv);
-
- return cnst;
-}
-
/**
* Normalizes and optimizes Cmp nodes.
*/
} /* transform_node_Proj_Bound */
/**
- * Does all optimizations on nodes that must be done on it's Proj's
+ * Does all optimizations on nodes that must be done on its Projs
* because of creating new nodes.
*/
static ir_node *transform_node_Proj(ir_node *proj)
return n;
} /* transform_node_Or_Rotl */
+static bool is_cmp_unequal_zero(const ir_node *node)
+{
+ ir_relation relation = get_Cmp_relation(node);
+ ir_node *left = get_Cmp_left(node);
+ ir_node *right = get_Cmp_right(node);
+ ir_mode *mode = get_irn_mode(left);
+
+ if (!is_Const(right) || !is_Const_null(right))
+ return false;
+ if (mode_is_signed(mode)) {
+ return relation == ir_relation_less_greater;
+ } else {
+ return relation == ir_relation_greater;
+ }
+}
+
/**
* Transform an Or.
*/
ir_relation new_relation = a_relation | b_relation;
return new_rd_Cmp(dbgi, block, a_left, a_right, new_relation);
}
+ /* Cmp(a!=0) or Cmp(b!=0) => Cmp(a|b != 0) */
+ if (is_cmp_unequal_zero(a) && is_cmp_unequal_zero(b)
+ && !mode_is_float(get_irn_mode(a_left))
+ && !mode_is_float(get_irn_mode(b_left))) {
+ ir_graph *irg = get_irn_irg(n);
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_mode *mode = get_irn_mode(a_left);
+ ir_node *n_b_left = get_irn_mode(b_left) != mode ?
+ new_rd_Conv(dbgi, block, b_left, mode) : b_left;
+ ir_node *or = new_rd_Or(dbgi, block, a_left, n_b_left, mode);
+ ir_node *zero = create_zero_const(irg, mode);
+ return new_rd_Cmp(dbgi, block, or, zero, ir_relation_less_greater);
+ }
}
mode = get_irn_mode(n);
}
var = get_method_variadicity(mtp);
set_method_variadicity(ctp, var);
- if (var == variadicity_variadic) {
- set_method_first_variadic_param_index(ctp, get_method_first_variadic_param_index(mtp) + 1);
- }
/* When we resolve a trampoline, the function must be called by a this-call */
set_method_calling_convention(ctp, get_method_calling_convention(mtp) | cc_this_call);
set_method_additional_properties(ctp, get_method_additional_properties(mtp));
/* NEVER do CSE on volatile Loads */
return 1;
/* do not CSE Loads with different alignment. Be conservative. */
- if (get_Load_align(a) != get_Load_align(b))
+ if (get_Load_unaligned(a) != get_Load_unaligned(b))
return 1;
return get_Load_mode(a) != get_Load_mode(b);
static int node_cmp_attr_Store(const ir_node *a, const ir_node *b)
{
/* do not CSE Stores with different alignment. Be conservative. */
- if (get_Store_align(a) != get_Store_align(b))
+ if (get_Store_unaligned(a) != get_Store_unaligned(b))
return 1;
/* NEVER do CSE on volatile Stores */
if (nn != n) {
/* n is reachable again */
- edges_node_revival(nn, get_irn_irg(nn));
+ edges_node_revival(nn);
}
return nn;
#if 0
/* Propagating Unknowns here seems to be a bad idea, because
sometimes we need a node as a input and did not want that
- it kills it's user.
+ it kills its user.
However, it might be useful to move this into a later phase
(if you think that optimizing such code is useful). */
if (is_Unknown(pred) && mode_is_data(get_irn_mode(node)))
Run always for transformation induced Bads. */
n = gigo(n);
if (n != oldn) {
- edges_node_deleted(oldn, irg);
+ edges_node_deleted(oldn);
/* We found an existing, better node, so we can deallocate the old node. */
irg_kill_node(irg, oldn);
size_t node_size;
/*
- * we MUST copy the node here temporary, because it's still
+ * we MUST copy the node here temporarily, because it's still
* needed for DBG_OPT_CSTEVAL
*/
node_size = offsetof(ir_node, attr) + n->op->attr_size;
memcpy(oldn->in, n->in, ARR_LEN(n->in) * sizeof(n->in[0]));
/* note the inplace edges module */
- edges_node_deleted(n, irg);
+ edges_node_deleted(n);
/* evaluation was successful -- replace the node. */
irg_kill_node(irg, n);
n = identify_cons(n);
if (n != oldn) {
- edges_node_deleted(oldn, irg);
+ edges_node_deleted(oldn);
/* We found an existing, better node, so we can deallocate the old node. */
irg_kill_node(irg, oldn);