if (mode_is_num(mode)) {
/* the following code leads to endless recursion when Mul are replaced by a simple instruction chain */
- if (!get_opt_arch_dep_running() && a == b && mode_is_int(mode)) {
+ if (!is_arch_dep_running() && a == b && mode_is_int(mode)) {
ir_node *block = get_irn_n(n, -1);
n = new_rd_Mul(
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B);
return n;
}
- if (! get_opt_reassociation()) {
+ if (! is_reassoc_running()) {
/* do NOT execute this code if reassociation is enabled, it does the inverse! */
if (is_Mul(a)) {
ir_node *ma = get_Mul_left(a);
ir_node *op = get_Not_op(b);
if (op == a) {
- /* ~x + x = -1 */
+ /* x + ~x = -1 */
ir_node *blk = get_irn_n(n, -1);
n = new_r_Const(current_ir_graph, blk, mode, get_mode_minus_one(mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_X_NOT_X);
}
}
/* do NOT execute this code if reassociation is enabled, it does the inverse! */
- if (get_opt_reassociation() && is_Mul(a)) {
+ if (!is_reassoc_running() && is_Mul(a)) {
ir_node *ma = get_Mul_left(a);
ir_node *mb = get_Mul_right(a);
return n;
}
}
- if (is_Sub(a)) {
- ir_node *x = get_Sub_left(a);
- ir_node *y = get_Sub_right(a);
- ir_node *blk = get_irn_n(n, -1);
- ir_mode *m_b = get_irn_mode(b);
- ir_mode *m_y = get_irn_mode(y);
+ if (is_Sub(a)) { /* (x - y) - b -> x - (y + b) */
+ ir_node *x = get_Sub_left(a);
+ ir_node *y = get_Sub_right(a);
+ ir_node *blk = get_irn_n(n, -1);
+ ir_mode *m_b = get_irn_mode(b);
+ ir_mode *m_y = get_irn_mode(y);
+ ir_mode *add_mode;
ir_node *add;
/* Determine the right mode for the Add. */
if (m_b == m_y)
- mode = m_b;
+ add_mode = m_b;
else if (mode_is_reference(m_b))
- mode = m_b;
+ add_mode = m_b;
else if (mode_is_reference(m_y))
- mode = m_y;
+ add_mode = m_y;
else {
/*
* Both modes are different but none is reference,
return n;
}
- add = new_r_Add(current_ir_graph, blk, y, b, mode);
+ add = new_r_Add(current_ir_graph, blk, y, b, add_mode);
n = new_rd_Sub(get_irn_dbg_info(n), current_ir_graph, blk, x, add, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_SUB_X_Y_Z);
HANDLE_BINOP_PHI(tarval_eor, a,b,c);
/* we can evaluate 2 Projs of the same Cmp */
- if(get_irn_mode(n) == mode_b && is_Proj(a) && is_Proj(b)) {
+ if (mode == mode_b && is_Proj(a) && is_Proj(b)) {
ir_node *pred_a = get_Proj_pred(a);
ir_node *pred_b = get_Proj_pred(b);
if(pred_a == pred_b) {
mode_b, get_negated_pnc(get_Proj_proj(a), mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT_BOOL);
- } else if (mode == mode_b && is_Const(b) && is_Const_one(b)) {
- /* The Eor is a Not. Replace it by a Not. */
- /* ????!!!Extend to bitfield 1111111. */
- n = new_r_Not(current_ir_graph, get_irn_n(n, -1), a, mode_b);
-
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
+ } else if (is_Const(b)) {
+ if (is_Not(a)) { /* ~x ^ const -> x ^ ~const */
+ ir_node *cnst = new_Const(mode, tarval_not(get_Const_tarval(b)));
+ ir_node *not_op = get_Not_op(a);
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(n);
+ ir_mode *mode = get_irn_mode(n);
+ n = new_rd_Eor(dbg, irg, block, not_op, cnst, mode);
+ return n;
+ } else if (is_Const_all_one(b)) { /* x ^ 1...1 -> ~1 */
+ n = new_r_Not(current_ir_graph, get_nodes_block(n), a, mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
+ }
} else {
n = transform_bitwise_distributive(n, transform_node_Eor);
}
n = new_r_Proj(current_ir_graph, get_irn_n(n, -1), get_Proj_pred(a),
mode_b, get_negated_pnc(get_Proj_proj(a), mode_b));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_CMP);
- return n;
+ return n;
+ }
+ if (is_Eor(a)) {
+ ir_node *eor_b = get_Eor_right(a);
+ if (is_Const(eor_b)) { /* ~(x ^ const) -> x ^ ~const */
+ ir_node *cnst = new_Const(mode, tarval_not(get_Const_tarval(eor_b)));
+ ir_node *eor_a = get_Eor_left(a);
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(n);
+ ir_mode *mode = get_irn_mode(n);
+ n = new_rd_Eor(dbg, irg, block, eor_a, cnst, mode);
+ return n;
+ }
}
if (get_mode_arithmetic(mode) == irma_twos_complement) {
if (is_Minus(a)) { /* ~-x -> x + -1 */
return n;
}
+ if (is_Mul(a)) { /* -(a * const) -> a * -const */
+ ir_node *mul_l = get_Mul_left(a);
+ ir_node *mul_r = get_Mul_right(a);
+ if (is_Const(mul_r)) {
+ tarval *tv = tarval_neg(get_Const_tarval(mul_r));
+ ir_node *cnst = new_Const(mode, tv);
+ dbg_info *dbg = get_irn_dbg_info(a);
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(a);
+ n = new_rd_Mul(dbg, irg, block, mul_l, cnst, mode);
+ return n;
+ }
+ }
+
return n;
} /* transform_node_Minus */
if (value_not_zero(b, &confirm)) {
/* div(x, y) && y != 0 */
+ if (confirm == NULL) {
+ /* we are sure we have a Const != 0 */
+ new_mem = get_Div_mem(div);
+ if (is_Pin(new_mem))
+ new_mem = get_Pin_op(new_mem);
+ set_Div_mem(div, new_mem);
+ set_irn_pinned(div, op_pin_state_floats);
+ }
+
proj_nr = get_Proj_proj(proj);
switch (proj_nr) {
case pn_Div_X_regular:
/* mod(x, y) && y != 0 */
proj_nr = get_Proj_proj(proj);
+ if (confirm == NULL) {
+ /* we are sure we have a Const != 0 */
+ new_mem = get_Mod_mem(mod);
+ if (is_Pin(new_mem))
+ new_mem = get_Pin_op(new_mem);
+ set_Mod_mem(mod, new_mem);
+ set_irn_pinned(mod, op_pin_state_floats);
+ }
+
switch (proj_nr) {
case pn_Mod_X_regular:
/* This node can only float up to the Confirm block */
new_mem = new_r_Pin(current_ir_graph, get_nodes_block(confirm), new_mem);
}
- set_irn_pinned(mod, op_pin_state_floats);
/* this is a Mod without exception, we can remove the memory edge */
- set_Mod_mem(mod, get_irg_no_mem(current_ir_graph));
+ set_Mod_mem(mod, new_mem);
return res;
case pn_Mod_res:
if (get_Mod_left(mod) == b) {
/* DivMod(x, y) && y != 0 */
proj_nr = get_Proj_proj(proj);
+ if (confirm == NULL) {
+ /* we are sure we have a Const != 0 */
+ new_mem = get_DivMod_mem(divmod);
+ if (is_Pin(new_mem))
+ new_mem = get_Pin_op(new_mem);
+ set_DivMod_mem(divmod, new_mem);
+ set_irn_pinned(divmod, op_pin_state_floats);
+ }
+
switch (proj_nr) {
case pn_DivMod_X_regular:
/* This node can only float up to the Confirm block */
new_mem = new_r_Pin(current_ir_graph, get_nodes_block(confirm), new_mem);
}
- set_irn_pinned(divmod, op_pin_state_floats);
/* this is a DivMod without exception, we can remove the memory edge */
- set_DivMod_mem(divmod, get_irg_no_mem(current_ir_graph));
+ set_DivMod_mem(divmod, new_mem);
return res;
case pn_DivMod_res_mod:
/* a-b == 0 ==> a == b, a-b != 0 ==> a != b */
if (tarval_is_null(tv) && is_Sub(left)) {
- right =get_Sub_right(left);
+ right = get_Sub_right(left);
left = get_Sub_left(left);
tv = value_of(right);
} /* tarval != bad */
}
+ if (changed & 2) /* need a new Const */
+ right = new_Const(mode, tv);
+
+ if ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) && is_Const(right) && is_Const_null(right) && is_Proj(left)) {
+ ir_node *op = get_Proj_pred(left);
+
+ if ((is_Mod(op) && get_Proj_proj(left) == pn_Mod_res) ||
+ (is_DivMod(op) && get_Proj_proj(left) == pn_DivMod_res_mod)) {
+ ir_node *c = get_binop_right(op);
+
+ if (is_Const(c)) {
+ tarval *tv = get_Const_tarval(c);
+
+ if (tarval_is_single_bit(tv)) {
+ /* special case: (x % 2^n) CMP 0 ==> x & (2^n-1) CMP 0 */
+ ir_node *v = get_binop_left(op);
+ ir_node *blk = get_irn_n(op, -1);
+ ir_mode *mode = get_irn_mode(v);
+
+ tv = tarval_sub(tv, get_mode_one(mode));
+ left = new_rd_And(get_irn_dbg_info(op), current_ir_graph, blk, v, new_Const(mode, tv), mode);
+ changed |= 1;
+ }
+ }
+ }
+ }
+
if (changed) {
ir_node *block = get_irn_n(n, -1); /* Beware of get_nodes_Block() */
- if (changed & 2) /* need a new Const */
- right = new_Const(mode, tv);
-
/* create a new compare */
n = new_rd_Cmp(get_irn_dbg_info(n), current_ir_graph, block, left, right);
}
}
- if (mode_is_int(mode) && mode_is_signed(mode) &&
- get_mode_arithmetic(mode) == irma_twos_complement) {
- ir_node *x = get_Cmp_left(cmp);
-
- /* the following optimization works only with signed integer two-complement mode */
-
- if (mode == get_irn_mode(x)) {
- /*
- * FIXME: this restriction is two rigid, as it would still
- * work if mode(x) = Hs and mode == Is, but at least it removes
- * all wrong cases.
- */
- if ((pn == pn_Cmp_Lt || pn == pn_Cmp_Le) &&
- is_Const(t) && is_Const_all_one(t) &&
- is_Const(f) && is_Const_null(f)) {
- /*
- * Mux(x:T </<= 0, 0, -1) -> Shrs(x, sizeof_bits(T) - 1)
- * Conditions:
- * T must be signed.
- */
- n = new_rd_Shrs(get_irn_dbg_info(n),
- current_ir_graph, block, x,
- new_r_Const_long(current_ir_graph, block, mode_Iu,
- get_mode_size_bits(mode) - 1),
- mode);
- DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_SHR);
- return n;
- } else if ((pn == pn_Cmp_Gt || pn == pn_Cmp_Ge) &&
- is_Const(t) && is_Const_one(t) &&
- is_Const(f) && is_Const_null(f)) {
- /*
- * Mux(x:T >/>= 0, 0, 1) -> Shr(-x, sizeof_bits(T) - 1)
- * Conditions:
- * T must be signed.
- */
- n = new_rd_Shr(get_irn_dbg_info(n),
- current_ir_graph, block,
- new_r_Minus(current_ir_graph, block, x, mode),
- new_r_Const_long(current_ir_graph, block, mode_Iu,
- get_mode_size_bits(mode) - 1),
- mode);
- DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_SHR);
- return n;
- }
- }
- }
}
}
}
return n;
} /* transform_node_Psi */
+/**
+ * optimize sync nodes that have other syncs as input we simply add the inputs
+ * of the other sync to our own inputs
+ */
+static ir_node *transform_node_Sync(ir_node *n) {
+ int i, arity;
+
+ arity = get_irn_arity(n);
+ for(i = 0; i < get_irn_arity(n); /*empty*/) {
+ int i2, arity2;
+ ir_node *in = get_irn_n(n, i);
+ if(!is_Sync(in)) {
+ ++i;
+ continue;
+ }
+
+ /* set sync input 0 instead of the sync */
+ set_irn_n(n, i, get_irn_n(in, 0));
+ /* so we check this input again for syncs */
+
+ /* append all other inputs of the sync to our sync */
+ arity2 = get_irn_arity(in);
+ for(i2 = 1; i2 < arity2; ++i2) {
+ ir_node *in_in = get_irn_n(in, i2);
+ add_irn_n(n, in_in);
+ /* increase arity so we also check the new inputs for syncs */
+ arity++;
+ }
+ }
+
+ /* rehash the sync node */
+ add_identities(current_ir_graph->value_table, n);
+
+ return n;
+}
+
/**
* Tries several [inplace] [optimizing] transformations and returns an
* equivalent node. The difference to equivalent_node() is that these
CASE(End);
CASE(Mux);
CASE(Psi);
+ CASE(Sync);
default:
/* leave NULL */;
}