#include "irtools.h"
#include "irhooks.h"
#include "array_t.h"
+#include "vrp.h"
+#include "firm_types.h"
/* Make types visible to allow most efficient access */
#include "entity_t.h"
/**
* Returns the tarval of a Const node or tarval_bad for all other nodes.
*/
-static tarval *default_value_of(const ir_node *n) {
+static tarval *default_value_of(const ir_node *n)
+{
if (is_Const(n))
return get_Const_tarval(n); /* might return tarval_bad */
else
value_of_func value_of_ptr = default_value_of;
/* * Set a new value_of function. */
-void set_value_of_func(value_of_func func) {
+void set_value_of_func(value_of_func func)
+{
if (func != NULL)
value_of_ptr = func;
else
/**
* Return the value of a Constant.
*/
-static tarval *computed_value_Const(const ir_node *n) {
+static tarval *computed_value_Const(const ir_node *n)
+{
return get_Const_tarval(n);
} /* computed_value_Const */
/**
* Return the value of a 'sizeof', 'alignof' or 'offsetof' SymConst.
*/
-static tarval *computed_value_SymConst(const ir_node *n) {
+static tarval *computed_value_SymConst(const ir_node *n)
+{
ir_type *type;
ir_entity *ent;
/**
* Return the value of an Add.
*/
-static tarval *computed_value_Add(const ir_node *n) {
+static tarval *computed_value_Add(const ir_node *n)
+{
ir_node *a = get_Add_left(n);
ir_node *b = get_Add_right(n);
* Return the value of a Sub.
* Special case: a - a
*/
-static tarval *computed_value_Sub(const ir_node *n) {
+static tarval *computed_value_Sub(const ir_node *n)
+{
ir_mode *mode = get_irn_mode(n);
ir_node *a = get_Sub_left(n);
ir_node *b = get_Sub_right(n);
* Return the value of a Carry.
* Special : a op 0, 0 op b
*/
-static tarval *computed_value_Carry(const ir_node *n) {
+static tarval *computed_value_Carry(const ir_node *n)
+{
ir_node *a = get_binop_left(n);
ir_node *b = get_binop_right(n);
ir_mode *m = get_irn_mode(n);
* Return the value of a Borrow.
* Special : a op 0
*/
-static tarval *computed_value_Borrow(const ir_node *n) {
+static tarval *computed_value_Borrow(const ir_node *n)
+{
ir_node *a = get_binop_left(n);
ir_node *b = get_binop_right(n);
ir_mode *m = get_irn_mode(n);
/**
* Return the value of an unary Minus.
*/
-static tarval *computed_value_Minus(const ir_node *n) {
+static tarval *computed_value_Minus(const ir_node *n)
+{
ir_node *a = get_Minus_op(n);
tarval *ta = value_of(a);
/**
* Return the value of a Mul.
*/
-static tarval *computed_value_Mul(const ir_node *n) {
+static tarval *computed_value_Mul(const ir_node *n)
+{
ir_node *a = get_Mul_left(n);
ir_node *b = get_Mul_right(n);
ir_mode *mode;
/**
* Return the value of an Abs.
*/
-static tarval *computed_value_Abs(const ir_node *n) {
+static tarval *computed_value_Abs(const ir_node *n)
+{
ir_node *a = get_Abs_op(n);
tarval *ta = value_of(a);
* Return the value of an And.
* Special case: a & 0, 0 & b
*/
-static tarval *computed_value_And(const ir_node *n) {
+static tarval *computed_value_And(const ir_node *n)
+{
ir_node *a = get_And_left(n);
ir_node *b = get_And_right(n);
* Return the value of an Or.
* Special case: a | 1...1, 1...1 | b
*/
-static tarval *computed_value_Or(const ir_node *n) {
+static tarval *computed_value_Or(const ir_node *n)
+{
ir_node *a = get_Or_left(n);
ir_node *b = get_Or_right(n);
/**
* Return the value of an Eor.
*/
-static tarval *computed_value_Eor(const ir_node *n) {
+static tarval *computed_value_Eor(const ir_node *n)
+{
ir_node *a = get_Eor_left(n);
ir_node *b = get_Eor_right(n);
tb = value_of(b);
if ((ta != tarval_bad) && (tb != tarval_bad)) {
- return tarval_eor (ta, tb);
+ return tarval_eor(ta, tb);
}
return tarval_bad;
} /* computed_value_Eor */
/**
* Return the value of a Not.
*/
-static tarval *computed_value_Not(const ir_node *n) {
+static tarval *computed_value_Not(const ir_node *n)
+{
ir_node *a = get_Not_op(n);
tarval *ta = value_of(a);
/**
* Return the value of a Shl.
*/
-static tarval *computed_value_Shl(const ir_node *n) {
+static tarval *computed_value_Shl(const ir_node *n)
+{
ir_node *a = get_Shl_left(n);
ir_node *b = get_Shl_right(n);
tarval *tb = value_of(b);
if ((ta != tarval_bad) && (tb != tarval_bad)) {
- return tarval_shl (ta, tb);
+ return tarval_shl(ta, tb);
}
return tarval_bad;
} /* computed_value_Shl */
/**
* Return the value of a Shr.
*/
-static tarval *computed_value_Shr(const ir_node *n) {
+static tarval *computed_value_Shr(const ir_node *n)
+{
ir_node *a = get_Shr_left(n);
ir_node *b = get_Shr_right(n);
tarval *tb = value_of(b);
if ((ta != tarval_bad) && (tb != tarval_bad)) {
- return tarval_shr (ta, tb);
+ return tarval_shr(ta, tb);
}
return tarval_bad;
} /* computed_value_Shr */
/**
* Return the value of a Shrs.
*/
-static tarval *computed_value_Shrs(const ir_node *n) {
+static tarval *computed_value_Shrs(const ir_node *n)
+{
ir_node *a = get_Shrs_left(n);
ir_node *b = get_Shrs_right(n);
tarval *tb = value_of(b);
if ((ta != tarval_bad) && (tb != tarval_bad)) {
- return tarval_shrs (ta, tb);
+ return tarval_shrs(ta, tb);
}
return tarval_bad;
} /* computed_value_Shrs */
/**
* Return the value of a Rotl.
*/
-static tarval *computed_value_Rotl(const ir_node *n) {
+static tarval *computed_value_Rotl(const ir_node *n)
+{
ir_node *a = get_Rotl_left(n);
ir_node *b = get_Rotl_right(n);
/**
* Return the value of a Conv.
*/
-static tarval *computed_value_Conv(const ir_node *n) {
+static tarval *computed_value_Conv(const ir_node *n)
+{
ir_node *a = get_Conv_op(n);
tarval *ta = value_of(a);
* Calculate the value of a Mux: can be evaluated, if the
* sel and the right input are known.
*/
-static tarval *computed_value_Mux(const ir_node *n) {
+static tarval *computed_value_Mux(const ir_node *n)
+{
ir_node *sel = get_Mux_sel(n);
tarval *ts = value_of(sel);
* Calculate the value of a Confirm: can be evaluated,
* if it has the form Confirm(x, '=', Const).
*/
-static tarval *computed_value_Confirm(const ir_node *n) {
+static tarval *computed_value_Confirm(const ir_node *n)
+{
/*
* Beware: we might produce Phi(Confirm(x == true), Confirm(x == false)).
- * Do NOT optimize them away (CondEval wants them), so wait until
+ * Do NOT optimize them away (jump threading wants them), so wait until
* remove_confirm is activated.
*/
if (get_opt_remove_confirm()) {
* only 1 is used.
* There are several case where we can evaluate a Cmp node, see later.
*/
-static tarval *computed_value_Proj_Cmp(const ir_node *n) {
- ir_node *a = get_Proj_pred(n);
- ir_node *aa = get_Cmp_left(a);
- ir_node *ab = get_Cmp_right(a);
- long proj_nr = get_Proj_proj(n);
+static tarval *computed_value_Proj_Cmp(const ir_node *n)
+{
+ ir_node *cmp = get_Proj_pred(n);
+ ir_node *left = get_Cmp_left(cmp);
+ ir_node *right = get_Cmp_right(cmp);
+ long pn_cmp = get_Proj_proj(n);
+ ir_mode *mode = get_irn_mode(left);
+ tarval *tv_l, *tv_r;
/*
* BEWARE: a == a is NOT always True for floating Point values, as
* NaN != NaN is defined, so we must check this here.
*/
- if (aa == ab && (
- !mode_is_float(get_irn_mode(aa)) || proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Gt)
- ) { /* 1.: */
-
+ if (left == right && (!mode_is_float(mode) || pn_cmp == pn_Cmp_Lt || pn_cmp == pn_Cmp_Gt)) {
/* This is a trick with the bits used for encoding the Cmp
Proj numbers, the following statement is not the same:
- return new_tarval_from_long (proj_nr == pn_Cmp_Eq, mode_b) */
- return new_tarval_from_long (proj_nr & pn_Cmp_Eq, mode_b);
+ return new_tarval_from_long(pn_cmp == pn_Cmp_Eq, mode_b) */
+ return new_tarval_from_long(pn_cmp & pn_Cmp_Eq, mode_b);
}
- else {
- tarval *taa = value_of(aa);
- tarval *tab = value_of(ab);
- ir_mode *mode = get_irn_mode(aa);
+ tv_l = value_of(left);
+ tv_r = value_of(right);
+ if ((tv_l != tarval_bad) && (tv_r != tarval_bad)) {
/*
* The predecessors of Cmp are target values. We can evaluate
* the Cmp.
*/
- if ((taa != tarval_bad) && (tab != tarval_bad)) {
- /* strange checks... */
- pn_Cmp flags = tarval_cmp(taa, tab);
- if (flags != pn_Cmp_False) {
- return new_tarval_from_long (proj_nr & flags, mode_b);
- }
+ pn_Cmp flags = tarval_cmp(tv_l, tv_r);
+ if (flags != pn_Cmp_False) {
+ return new_tarval_from_long (pn_cmp & flags, mode_b);
}
+ } else if (mode_is_int(mode)) {
/* for integer values, we can check against MIN/MAX */
- else if (mode_is_int(mode)) {
+ pn_Cmp cmp_result;
+
+ if (tv_l == get_mode_min(mode)) {
/* MIN <=/> x. This results in true/false. */
- if (taa == get_mode_min(mode)) {
- /* a compare with the MIN value */
- if (proj_nr == pn_Cmp_Le)
- return get_tarval_b_true();
- else if (proj_nr == pn_Cmp_Gt)
- return get_tarval_b_false();
- }
+ if (pn_cmp == pn_Cmp_Le)
+ return tarval_b_true;
+ else if (pn_cmp == pn_Cmp_Gt)
+ return tarval_b_false;
+ } else if (tv_r == get_mode_min(mode)) {
/* x >=/< MIN. This results in true/false. */
- else
- if (tab == get_mode_min(mode)) {
- /* a compare with the MIN value */
- if (proj_nr == pn_Cmp_Ge)
- return get_tarval_b_true();
- else if (proj_nr == pn_Cmp_Lt)
- return get_tarval_b_false();
- }
- /* MAX >=/< x. This results in true/false. */
- else if (taa == get_mode_max(mode)) {
- if (proj_nr == pn_Cmp_Ge)
- return get_tarval_b_true();
- else if (proj_nr == pn_Cmp_Lt)
- return get_tarval_b_false();
- }
- /* x <=/> MAX. This results in true/false. */
- else if (tab == get_mode_max(mode)) {
- if (proj_nr == pn_Cmp_Le)
- return get_tarval_b_true();
- else if (proj_nr == pn_Cmp_Gt)
- return get_tarval_b_false();
+ if (pn_cmp == pn_Cmp_Ge)
+ return tarval_b_true;
+ else if (pn_cmp == pn_Cmp_Lt)
+ return tarval_b_false;
+ } else if (tv_l == get_mode_max(mode)) {
+ /* MAX >=/< x. This results in true/false. */
+ if (pn_cmp == pn_Cmp_Ge)
+ return tarval_b_true;
+ else if (pn_cmp == pn_Cmp_Lt)
+ return tarval_b_false;
+ } else if (tv_r == get_mode_max(mode)) {
+ /* x <=/> MAX. This results in true/false. */
+ if (pn_cmp == pn_Cmp_Le)
+ return tarval_b_true;
+ else if (pn_cmp == pn_Cmp_Gt)
+ return tarval_b_false;
+ }
+
+ cmp_result = vrp_cmp(left, right);
+ if (cmp_result != pn_Cmp_False) {
+ if (cmp_result == pn_Cmp_Lg) {
+ if (pn_cmp == pn_Cmp_Eq) {
+ return tarval_b_false;
+ } else if (pn_cmp == pn_Cmp_Lg) {
+ return tarval_b_true;
}
+ } else {
+ return new_tarval_from_long(cmp_result & pn_cmp, mode_b);
+ }
}
- /*
- * The predecessors are Allocs or (void*)(0) constants. Allocs never
- * return NULL, they raise an exception. Therefore we can predict
- * the Cmp result.
- */
- else {
- ir_node *aaa = skip_Proj(aa);
- ir_node *aba = skip_Proj(ab);
-
- if ( ( (/* aa is ProjP and aaa is Alloc */
- is_Proj(aa)
- && mode_is_reference(get_irn_mode(aa))
- && is_Alloc(aaa))
- && ( (/* ab is NULL */
- mode_is_reference(get_irn_mode(ab))
- && tarval_is_null(tab))
- || (/* ab is other Alloc */
- is_Proj(ab)
- && mode_is_reference(get_irn_mode(ab))
- && is_Alloc(aba)
- && (aaa != aba))))
- || (/* aa is NULL and aba is Alloc */
- mode_is_reference(get_irn_mode(aa))
- && tarval_is_null(taa)
- && is_Proj(ab)
- && mode_is_reference(get_irn_mode(ab))
- && is_Alloc(aba)))
- /* 3.: */
- return new_tarval_from_long(proj_nr & pn_Cmp_Lg, mode_b);
- }
- }
- return computed_value_Cmp_Confirm(a, aa, ab, proj_nr);
+ } else if (mode_is_reference(mode)) {
+ /* pointer compare */
+ ir_node *s_l = skip_Proj(left);
+ ir_node *s_r = skip_Proj(right);
+
+ if ((is_Alloc(s_l) && tarval_is_null(tv_r)) ||
+ (tarval_is_null(tv_l) && is_Alloc(s_r))) {
+ /*
+ * The predecessors are Allocs and (void*)(0) constants. In Firm Allocs never
+ * return NULL, they raise an exception. Therefore we can predict
+ * the Cmp result.
+ */
+ return new_tarval_from_long(pn_cmp & pn_Cmp_Lg, mode_b);
+ }
+ }
+ return computed_value_Cmp_Confirm(cmp, left, right, pn_cmp);
} /* computed_value_Proj_Cmp */
/**
* Return the value of a floating point Quot.
*/
-static tarval *do_computed_value_Quot(const ir_node *a, const ir_node *b) {
+static tarval *do_computed_value_Quot(const ir_node *a, const ir_node *b)
+{
tarval *ta = value_of(a);
tarval *tb = value_of(b);
* Calculate the value of an integer Div of two nodes.
* Special case: 0 / b
*/
-static tarval *do_computed_value_Div(const ir_node *a, const ir_node *b) {
+static tarval *do_computed_value_Div(const ir_node *a, const ir_node *b)
+{
tarval *ta = value_of(a);
tarval *tb;
const ir_node *dummy;
* Calculate the value of an integer Mod of two nodes.
* Special case: a % 1
*/
-static tarval *do_computed_value_Mod(const ir_node *a, const ir_node *b) {
+static tarval *do_computed_value_Mod(const ir_node *a, const ir_node *b)
+{
tarval *ta = value_of(a);
tarval *tb = value_of(b);
/**
* Return the value of a Proj(DivMod).
*/
-static tarval *computed_value_Proj_DivMod(const ir_node *n) {
+static tarval *computed_value_Proj_DivMod(const ir_node *n)
+{
long proj_nr = get_Proj_proj(n);
/* compute either the Div or the Mod part */
/**
* Return the value of a Proj(Div).
*/
-static tarval *computed_value_Proj_Div(const ir_node *n) {
+static tarval *computed_value_Proj_Div(const ir_node *n)
+{
long proj_nr = get_Proj_proj(n);
if (proj_nr == pn_Div_res) {
/**
* Return the value of a Proj(Mod).
*/
-static tarval *computed_value_Proj_Mod(const ir_node *n) {
+static tarval *computed_value_Proj_Mod(const ir_node *n)
+{
long proj_nr = get_Proj_proj(n);
if (proj_nr == pn_Mod_res) {
/**
* Return the value of a Proj(Quot).
*/
-static tarval *computed_value_Proj_Quot(const ir_node *n) {
+static tarval *computed_value_Proj_Quot(const ir_node *n)
+{
long proj_nr = get_Proj_proj(n);
if (proj_nr == pn_Quot_res) {
/**
* Return the value of a Proj.
*/
-static tarval *computed_value_Proj(const ir_node *proj) {
+static tarval *computed_value_Proj(const ir_node *proj)
+{
ir_node *n = get_Proj_pred(proj);
if (n->op->ops.computed_value_Proj != NULL)
*
* @param n The node this should be evaluated
*/
-tarval *computed_value(const ir_node *n) {
+tarval *computed_value(const ir_node *n)
+{
+ vrp_attr *vrp = vrp_get_info(n);
+ if (vrp && vrp->valid && tarval_cmp(vrp->bits_set, vrp->bits_not_set) == pn_Cmp_Eq) {
+ return vrp->bits_set;
+ }
if (n->op->ops.computed_value)
return n->op->ops.computed_value(n);
return tarval_bad;
CASE_PROJ(Quot);
CASE(Proj);
default:
- /* leave NULL */;
+ /* leave NULL */
+ break;
}
return ops;
int n_preds;
/* don't optimize dead or labeled blocks */
- if (is_Block_dead(n) || has_Block_label(n))
+ if (is_Block_dead(n) || has_Block_entity(n))
return n;
n_preds = get_Block_n_cfgpreds(n);
* Returns a equivalent node for a Jmp, a Bad :-)
* Of course this only happens if the Block of the Jmp is dead.
*/
-static ir_node *equivalent_node_Jmp(ir_node *n) {
+static ir_node *equivalent_node_Jmp(ir_node *n)
+{
ir_node *oldn = n;
/* unreachable code elimination */
* Optimize operations that are commutative and have neutral 0,
* so a op 0 = 0 op a = a.
*/
-static ir_node *equivalent_node_neutral_zero(ir_node *n) {
+static ir_node *equivalent_node_neutral_zero(ir_node *n)
+{
ir_node *oldn = n;
ir_node *a = get_binop_left(n);
/**
* Eor is commutative and has neutral 0.
*/
-static ir_node *equivalent_node_Eor(ir_node *n) {
+static ir_node *equivalent_node_Eor(ir_node *n)
+{
ir_node *oldn = n;
ir_node *a;
ir_node *b;
* Beware: The Mode of an Add may be different than the mode of its
* predecessors, so we could not return a predecessors in all cases.
*/
-static ir_node *equivalent_node_Add(ir_node *n) {
+static ir_node *equivalent_node_Add(ir_node *n)
+{
ir_node *oldn = n;
ir_node *left, *right;
ir_mode *mode = get_irn_mode(n);
* optimize operations that are not commutative but have neutral 0 on left,
* so a op 0 = a.
*/
-static ir_node *equivalent_node_left_zero(ir_node *n) {
+static ir_node *equivalent_node_left_zero(ir_node *n)
+{
ir_node *oldn = n;
ir_node *a = get_binop_left(n);
* Beware: The Mode of a Sub may be different than the mode of its
* predecessors, so we could not return a predecessors in all cases.
*/
-static ir_node *equivalent_node_Sub(ir_node *n) {
+static ir_node *equivalent_node_Sub(ir_node *n)
+{
ir_node *oldn = n;
ir_node *b;
ir_mode *mode = get_irn_mode(n);
* We handle it anyway here but the better way would be a
* flag. This would be needed for Pascal for instance.
*/
-static ir_node *equivalent_node_idempotent_unop(ir_node *n) {
+static ir_node *equivalent_node_idempotent_unop(ir_node *n)
+{
ir_node *oldn = n;
ir_node *pred = get_unop_op(n);
/**
* Optimize a * 1 = 1 * a = a.
*/
-static ir_node *equivalent_node_Mul(ir_node *n) {
+static ir_node *equivalent_node_Mul(ir_node *n)
+{
ir_node *oldn = n;
ir_node *a = get_Mul_left(n);
/**
* Use algebraic simplification a | a = a | 0 = 0 | a = a.
*/
-static ir_node *equivalent_node_Or(ir_node *n) {
+static ir_node *equivalent_node_Or(ir_node *n)
+{
ir_node *oldn = n;
ir_node *a = get_Or_left(n);
/**
* Optimize a & 0b1...1 = 0b1...1 & a = a & a = (a|X) & a = a.
*/
-static ir_node *equivalent_node_And(ir_node *n) {
+static ir_node *equivalent_node_And(ir_node *n)
+{
ir_node *oldn = n;
ir_node *a = get_And_left(n);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_AND);
return n;
}
- /* constants are cormalized to right, check this site first */
+ /* constants are normalized to right, check this site first */
tv = value_of(b);
if (tarval_is_all_one(tv)) {
n = a;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_AND);
return n;
}
+ if (tv != get_tarval_bad()) {
+ ir_mode *mode = get_irn_mode(n);
+ if (!mode_is_signed(mode) && is_Conv(a)) {
+ ir_node *convop = get_Conv_op(a);
+ ir_mode *convopmode = get_irn_mode(convop);
+ if (!mode_is_signed(convopmode)) {
+ if (tarval_is_all_one(tarval_convert_to(tv, convopmode))) {
+ /* Conv(X) & all_one(mode(X)) = Conv(X) */
+ n = a;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_AND);
+ return n;
+ }
+ }
+ }
+ }
tv = value_of(a);
if (tarval_is_all_one(tv)) {
n = b;
/**
* Try to remove useless Conv's:
*/
-static ir_node *equivalent_node_Conv(ir_node *n) {
+static ir_node *equivalent_node_Conv(ir_node *n)
+{
ir_node *oldn = n;
ir_node *a = get_Conv_op(n);
* A Cast may be removed if the type of the previous node
* is already the type of the Cast.
*/
-static ir_node *equivalent_node_Cast(ir_node *n) {
+static ir_node *equivalent_node_Cast(ir_node *n)
+{
ir_node *oldn = n;
ir_node *pred = get_Cast_op(n);
* - fold Phi-nodes, iff they have only one predecessor except
* themselves.
*/
-static ir_node *equivalent_node_Phi(ir_node *n) {
+static ir_node *equivalent_node_Phi(ir_node *n)
+{
int i, n_preds;
ir_node *oldn = n;
* - fold Sync-nodes, iff they have only one predecessor except
* themselves.
*/
-static ir_node *equivalent_node_Sync(ir_node *n) {
+static ir_node *equivalent_node_Sync(ir_node *n)
+{
int arity = get_Sync_n_preds(n);
int i;
/**
* Optimize Proj(Tuple).
*/
-static ir_node *equivalent_node_Proj_Tuple(ir_node *proj) {
+static ir_node *equivalent_node_Proj_Tuple(ir_node *proj)
+{
ir_node *oldn = proj;
ir_node *tuple = get_Proj_pred(proj);
/**
* Optimize a / 1 = a.
*/
-static ir_node *equivalent_node_Proj_Div(ir_node *proj) {
+static ir_node *equivalent_node_Proj_Div(ir_node *proj)
+{
ir_node *oldn = proj;
ir_node *div = get_Proj_pred(proj);
ir_node *b = get_Div_right(div);
/**
* Optimize a / 1.0 = a.
*/
-static ir_node *equivalent_node_Proj_Quot(ir_node *proj) {
+static ir_node *equivalent_node_Proj_Quot(ir_node *proj)
+{
ir_node *oldn = proj;
ir_node *quot = get_Proj_pred(proj);
ir_node *b = get_Quot_right(quot);
/**
* Optimize a / 1 = a.
*/
-static ir_node *equivalent_node_Proj_DivMod(ir_node *proj) {
+static ir_node *equivalent_node_Proj_DivMod(ir_node *proj)
+{
ir_node *oldn = proj;
ir_node *divmod = get_Proj_pred(proj);
ir_node *b = get_DivMod_right(divmod);
/**
* Optimize CopyB(mem, x, x) into a Nop.
*/
-static ir_node *equivalent_node_Proj_CopyB(ir_node *proj) {
+static ir_node *equivalent_node_Proj_CopyB(ir_node *proj)
+{
ir_node *oldn = proj;
ir_node *copyb = get_Proj_pred(proj);
ir_node *a = get_CopyB_dst(copyb);
DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
break;
- case pn_CopyB_M_except:
case pn_CopyB_X_except:
DBG_OPT_EXC_REM(proj);
proj = get_irg_bad(current_ir_graph);
/**
* Optimize Bounds(idx, idx, upper) into idx.
*/
-static ir_node *equivalent_node_Proj_Bound(ir_node *proj) {
+static ir_node *equivalent_node_Proj_Bound(ir_node *proj)
+{
ir_node *oldn = proj;
ir_node *bound = get_Proj_pred(proj);
ir_node *idx = get_Bound_index(bound);
break;
default:
/* cannot optimize pn_Bound_X_regular, handled in transform ... */
- ;
+ break;
}
}
return proj;
/**
* Optimize an Exception Proj(Load) with a non-null address.
*/
-static ir_node *equivalent_node_Proj_Load(ir_node *proj) {
+static ir_node *equivalent_node_Proj_Load(ir_node *proj)
+{
if (get_opt_ldst_only_null_ptr_exceptions()) {
if (get_irn_mode(proj) == mode_X) {
ir_node *load = get_Proj_pred(proj);
/**
* Optimize an Exception Proj(Store) with a non-null address.
*/
-static ir_node *equivalent_node_Proj_Store(ir_node *proj) {
+static ir_node *equivalent_node_Proj_Store(ir_node *proj)
+{
if (get_opt_ldst_only_null_ptr_exceptions()) {
if (get_irn_mode(proj) == mode_X) {
ir_node *store = get_Proj_pred(proj);
* Does all optimizations on nodes that must be done on it's Proj's
* because of creating new nodes.
*/
-static ir_node *equivalent_node_Proj(ir_node *proj) {
+static ir_node *equivalent_node_Proj(ir_node *proj)
+{
ir_node *n = get_Proj_pred(proj);
if (get_irn_mode(proj) == mode_X) {
/**
* Remove Id's.
*/
-static ir_node *equivalent_node_Id(ir_node *n) {
+static ir_node *equivalent_node_Id(ir_node *n)
+{
ir_node *oldn = n;
do {
static ir_node *equivalent_node_Mux(ir_node *n)
{
ir_node *oldn = n, *sel = get_Mux_sel(n);
+ ir_node *n_t, *n_f;
tarval *ts = value_of(sel);
/* Mux(true, f, t) == t */
if (ts == tarval_b_true) {
n = get_Mux_true(n);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_C);
+ return n;
}
/* Mux(false, f, t) == f */
- else if (ts == tarval_b_false) {
+ if (ts == tarval_b_false) {
n = get_Mux_false(n);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_C);
+ return n;
+ }
+ n_t = get_Mux_true(n);
+ n_f = get_Mux_false(n);
+
+ /* Mux(v, x, T) == x */
+ if (is_Unknown(n_f)) {
+ n = n_t;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_EQ);
+ return n;
}
+ /* Mux(v, T, x) == x */
+ if (is_Unknown(n_t)) {
+ n = n_f;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_EQ);
+ return n;
+ }
+
/* Mux(v, x, x) == x */
- else if (get_Mux_false(n) == get_Mux_true(n)) {
- n = get_Mux_true(n);
+ if (n_t == n_f) {
+ n = n_t;
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_EQ);
+ return n;
}
- else if (is_Proj(sel) && !mode_honor_signed_zeros(get_irn_mode(n))) {
+ if (is_Proj(sel) && !mode_honor_signed_zeros(get_irn_mode(n))) {
ir_node *cmp = get_Proj_pred(sel);
long proj_nr = get_Proj_proj(sel);
ir_node *f = get_Mux_false(n);
* Remove Confirm nodes if setting is on.
* Replace Confirms(x, '=', Constlike) by Constlike.
*/
-static ir_node *equivalent_node_Confirm(ir_node *n) {
+static ir_node *equivalent_node_Confirm(ir_node *n)
+{
ir_node *pred = get_Confirm_value(n);
pn_Cmp pnc = get_Confirm_cmp(n);
* If a node returns a Tuple we can not just skip it. If the size of the
* in array fits, we transform n into a tuple (e.g., Div).
*/
-ir_node *equivalent_node(ir_node *n) {
+ir_node *equivalent_node(ir_node *n)
+{
if (n->op->ops.equivalent_node)
return n->op->ops.equivalent_node(n);
return n;
CASE(Mux);
CASE(Confirm);
default:
- /* leave NULL */;
+ /* leave NULL */
+ break;
}
return ops;
* Returns non-zero if a node is a Phi node
* with all predecessors constant.
*/
-static int is_const_Phi(ir_node *n) {
+static int is_const_Phi(ir_node *n)
+{
int i;
if (! is_Phi(n) || get_irn_arity(n) == 0)
typedef tarval *(*tarval_sub_type)(tarval *a, tarval *b, ir_mode *mode);
typedef tarval *(*tarval_binop_type)(tarval *a, tarval *b);
+/**
+ * in reality eval_func should be tarval (*eval_func)() but incomplete
+ * declarations are bad style and generate noisy warnings
+ */
+typedef void (*eval_func)(void);
+
/**
* Wrapper for the tarval binop evaluation, tarval_sub has one more parameter.
*/
-static tarval *do_eval(tarval *(*eval)(), tarval *a, tarval *b, ir_mode *mode) {
- if (eval == tarval_sub) {
+static tarval *do_eval(eval_func eval, tarval *a, tarval *b, ir_mode *mode)
+{
+ if (eval == (eval_func) tarval_sub) {
tarval_sub_type func = (tarval_sub_type)eval;
return func(a, b, mode);
*
* @return a new Phi node if the conversion was successful, NULL else
*/
-static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, tarval *(*eval)(), ir_mode *mode, int left) {
+static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, eval_func eval, ir_mode *mode, int left)
+{
tarval *tv;
void **res;
ir_node *pred;
pred = get_irn_n(phi, i);
res[i] = new_r_Const_type(irg, res[i], get_Const_type(pred));
}
- return new_r_Phi(irg, get_nodes_block(phi), n, (ir_node **)res, mode);
+ return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
} /* apply_binop_on_phi */
/**
*
* @return a new Phi node if the conversion was successful, NULL else
*/
-static ir_node *apply_binop_on_2_phis(ir_node *a, ir_node *b, tarval *(*eval)(), ir_mode *mode) {
+static ir_node *apply_binop_on_2_phis(ir_node *a, ir_node *b, eval_func eval, ir_mode *mode)
+{
tarval *tv_l, *tv_r, *tv;
void **res;
ir_node *pred;
pred = get_irn_n(a, i);
res[i] = new_r_Const_type(irg, res[i], get_Const_type(pred));
}
- return new_r_Phi(irg, get_nodes_block(a), n, (ir_node **)res, mode);
+ return new_r_Phi(get_nodes_block(a), n, (ir_node **)res, mode);
} /* apply_binop_on_2_phis */
/**
*
* @return a new Phi node if the conversion was successful, NULL else
*/
-static ir_node *apply_unop_on_phi(ir_node *phi, tarval *(*eval)(tarval *)) {
+static ir_node *apply_unop_on_phi(ir_node *phi, tarval *(*eval)(tarval *))
+{
tarval *tv;
void **res;
ir_node *pred;
pred = get_irn_n(phi, i);
res[i] = new_r_Const_type(irg, res[i], get_Const_type(pred));
}
- return new_r_Phi(irg, get_nodes_block(phi), n, (ir_node **)res, mode);
+ return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
} /* apply_unop_on_phi */
/**
*
* @return a new Phi node if the conversion was successful, NULL else
*/
-static ir_node *apply_conv_on_phi(ir_node *phi, ir_mode *mode) {
+static ir_node *apply_conv_on_phi(ir_node *phi, ir_mode *mode)
+{
tarval *tv;
void **res;
ir_node *pred;
pred = get_irn_n(phi, i);
res[i] = new_r_Const_type(irg, res[i], get_Const_type(pred));
}
- return new_r_Phi(irg, get_nodes_block(phi), n, (ir_node **)res, mode);
+ return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
} /* apply_conv_on_phi */
/**
* SubP(P, ConvIs(Iu)), SubP(P, ConvIu(Is)).
* If possible, remove the Conv's.
*/
-static ir_node *transform_node_AddSub(ir_node *n) {
+static ir_node *transform_node_AddSub(ir_node *n)
+{
ir_mode *mode = get_irn_mode(n);
if (mode_is_reference(mode)) {
/* convert a AddP(P, *s) into AddP(P, *u) */
ir_mode *nm = get_reference_mode_unsigned_eq(mode);
- ir_node *pre = new_r_Conv(current_ir_graph, get_nodes_block(n), right, nm);
+ ir_node *pre = new_r_Conv(get_nodes_block(n), right, nm);
set_binop_right(n, pre);
}
}
} /* transform_node_AddSub */
#define HANDLE_BINOP_PHI(eval, a, b, c, mode) \
+ do { \
c = NULL; \
if (is_Const(b) && is_const_Phi(a)) { \
/* check for Op(Phi, Const) */ \
if (c) { \
DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI); \
return c; \
- }
+ } \
+ } while(0)
#define HANDLE_UNOP_PHI(eval, a, c) \
+ do { \
c = NULL; \
if (is_const_Phi(a)) { \
/* check for Op(Phi) */ \
DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI); \
return c; \
} \
- }
+ } \
+ } while(0)
/**
* Do the AddSub optimization, then Transform
* Transform Add(a,-b) into Sub(a,b).
* Reassociation might fold this further.
*/
-static ir_node *transform_node_Add(ir_node *n) {
+static ir_node *transform_node_Add(ir_node *n)
+{
ir_mode *mode;
ir_node *a, *b, *c, *oldn = n;
+ vrp_attr *a_vrp, *b_vrp;
n = transform_node_AddSub(n);
if (mode_is_reference(mode)) {
ir_mode *lmode = get_irn_mode(a);
- if (is_Const(b) && is_Const_null(b) && mode_is_int(lmode)) {
+ if (is_Const(b) && is_Const_null(b) && mode_is_int(lmode)) {
/* an Add(a, NULL) is a hidden Conv */
dbg_info *dbg = get_irn_dbg_info(n);
- return new_rd_Conv(dbg, current_ir_graph, get_nodes_block(n), a, mode);
+ return new_rd_Conv(dbg, get_nodes_block(n), a, mode);
}
}
- HANDLE_BINOP_PHI(tarval_add, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_add, a, b, c, mode);
/* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
if (mode_is_num(mode)) {
/* the following code leads to endless recursion when Mul are replaced by a simple instruction chain */
- if (!is_arch_dep_running() && a == b && mode_is_int(mode)) {
+ if (!is_irg_state(current_ir_graph, IR_GRAPH_STATE_ARCH_DEP)
+ && a == b && mode_is_int(mode)) {
ir_node *block = get_nodes_block(n);
n = new_rd_Mul(
get_irn_dbg_info(n),
- current_ir_graph,
block,
a,
new_Const_long(mode, 2),
if (is_Minus(a)) {
n = new_rd_Sub(
get_irn_dbg_info(n),
- current_ir_graph,
get_nodes_block(n),
b,
get_Minus_op(a),
if (is_Minus(b)) {
n = new_rd_Sub(
get_irn_dbg_info(n),
- current_ir_graph,
get_nodes_block(n),
a,
get_Minus_op(b),
if (is_Const(b) && is_Const_one(b)) {
/* ~x + 1 = -x */
ir_node *blk = get_nodes_block(n);
- n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, op, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n), blk, op, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_PLUS_1);
return n;
}
}
}
}
+
+ a_vrp = vrp_get_info(a);
+ b_vrp = vrp_get_info(b);
+
+ if (a_vrp && b_vrp) {
+ tarval *c = tarval_and(
+ a_vrp->bits_not_set,
+ b_vrp->bits_not_set
+ );
+
+ if (tarval_is_null(c)) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ return new_rd_Or(dbgi, get_nodes_block(n),
+ a, b, mode);
+ }
+ }
return n;
} /* transform_node_Add */
/**
* returns -cnst or NULL if impossible
*/
-static ir_node *const_negate(ir_node *cnst) {
+static ir_node *const_negate(ir_node *cnst)
+{
tarval *tv = tarval_neg(get_Const_tarval(cnst));
dbg_info *dbgi = get_irn_dbg_info(cnst);
ir_graph *irg = get_irn_irg(cnst);
* Sub(x, Add(x, a)) -> -a
* Sub(x, Const) -> Add(x, -Const)
*/
-static ir_node *transform_node_Sub(ir_node *n) {
+static ir_node *transform_node_Sub(ir_node *n)
+{
ir_mode *mode;
ir_node *oldn = n;
ir_node *a, *b, *c;
if (is_Const(b) && is_Const_null(b) && mode_is_reference(lmode)) {
/* a Sub(a, NULL) is a hidden Conv */
dbg_info *dbg = get_irn_dbg_info(n);
- n = new_rd_Conv(dbg, current_ir_graph, get_nodes_block(n), a, mode);
+ n = new_rd_Conv(dbg, get_nodes_block(n), a, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_CONV);
return n;
}
get_Const_tarval(a) == get_mode_minus_one(mode)) {
/* -1 - x -> ~x */
dbg_info *dbg = get_irn_dbg_info(n);
- n = new_rd_Not(dbg, current_ir_graph, get_nodes_block(n), b, mode);
+ n = new_rd_Not(dbg, get_nodes_block(n), b, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_NOT);
return n;
}
}
restart:
- HANDLE_BINOP_PHI(tarval_sub, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_sub, a, b, c, mode);
/* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
if (cnst != NULL) {
ir_node *block = get_nodes_block(n);
dbg_info *dbgi = get_irn_dbg_info(n);
- ir_graph *irg = get_irn_irg(n);
- n = new_rd_Add(dbgi, irg, block, a, cnst, mode);
+ n = new_rd_Add(dbgi, block, a, cnst, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
return n;
}
}
if (is_Minus(a)) { /* (-a) - b -> -(a + b) */
- ir_graph *irg = current_ir_graph;
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
ir_node *left = get_Minus_op(a);
- ir_node *add = new_rd_Add(dbg, irg, block, left, b, mode);
+ ir_node *add = new_rd_Add(dbg, block, left, b, mode);
- n = new_rd_Minus(dbg, irg, block, add, mode);
+ n = new_rd_Minus(dbg, block, add, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
return n;
} else if (is_Minus(b)) { /* a - (-b) -> a + b */
- ir_graph *irg = current_ir_graph;
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
ir_node *right = get_Minus_op(b);
- n = new_rd_Add(dbg, irg, block, a, right, mode);
+ n = new_rd_Add(dbg, block, a, right, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MINUS);
return n;
} else if (is_Sub(b)) {
/* a - (b - c) -> a + (c - b)
* -> (a - b) + c iff (b - c) is a pointer */
- ir_graph *irg = current_ir_graph;
dbg_info *s_dbg = get_irn_dbg_info(b);
ir_node *s_block = get_nodes_block(b);
ir_node *s_left = get_Sub_left(b);
ir_node *s_right = get_Sub_right(b);
ir_mode *s_mode = get_irn_mode(b);
if (mode_is_reference(s_mode)) {
- ir_node *sub = new_rd_Sub(s_dbg, irg, s_block, a, s_left, mode);
+ ir_node *sub = new_rd_Sub(s_dbg, s_block, a, s_left, mode);
dbg_info *a_dbg = get_irn_dbg_info(n);
ir_node *a_block = get_nodes_block(n);
if (s_mode != mode)
- s_right = new_r_Conv(irg, a_block, s_right, mode);
- n = new_rd_Add(a_dbg, irg, a_block, sub, s_right, mode);
+ s_right = new_r_Conv(a_block, s_right, mode);
+ n = new_rd_Add(a_dbg, a_block, sub, s_right, mode);
} else {
- ir_node *sub = new_rd_Sub(s_dbg, irg, s_block, s_right, s_left, s_mode);
+ ir_node *sub = new_rd_Sub(s_dbg, s_block, s_right, s_left, s_mode);
dbg_info *a_dbg = get_irn_dbg_info(n);
ir_node *a_block = get_nodes_block(n);
- n = new_rd_Add(a_dbg, irg, a_block, a, sub, mode);
+ n = new_rd_Add(a_dbg, a_block, a, sub, mode);
}
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
return n;
if (is_Const(m_right)) {
ir_node *cnst2 = const_negate(m_right);
if (cnst2 != NULL) {
- ir_graph *irg = current_ir_graph;
dbg_info *m_dbg = get_irn_dbg_info(b);
ir_node *m_block = get_nodes_block(b);
ir_node *m_left = get_Mul_left(b);
ir_mode *m_mode = get_irn_mode(b);
- ir_node *mul = new_rd_Mul(m_dbg, irg, m_block, m_left, cnst2, m_mode);
+ ir_node *mul = new_rd_Mul(m_dbg, m_block, m_left, cnst2, m_mode);
dbg_info *a_dbg = get_irn_dbg_info(n);
ir_node *a_block = get_nodes_block(n);
- n = new_rd_Add(a_dbg, irg, a_block, a, mul, mode);
+ n = new_rd_Add(a_dbg, a_block, a, mul, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
return n;
}
if (mode_is_num(mode) && mode == get_irn_mode(a) && is_Const(a) && is_Const_null(a)) {
n = new_rd_Minus(
get_irn_dbg_info(n),
- current_ir_graph,
get_nodes_block(n),
b,
mode);
if (left == b) {
if (mode != get_irn_mode(right)) {
/* This Sub is an effective Cast */
- right = new_r_Conv(get_irn_irg(n), get_nodes_block(n), right, mode);
+ right = new_r_Conv(get_nodes_block(n), right, mode);
}
n = right;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
} else if (right == b) {
if (mode != get_irn_mode(left)) {
/* This Sub is an effective Cast */
- left = new_r_Conv(get_irn_irg(n), get_nodes_block(n), left, mode);
+ left = new_r_Conv(get_nodes_block(n), left, mode);
}
n = left;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
if (left == a) {
ir_mode *r_mode = get_irn_mode(right);
- n = new_r_Minus(get_irn_irg(n), get_nodes_block(n), right, r_mode);
+ n = new_r_Minus(get_nodes_block(n), right, r_mode);
if (mode != r_mode) {
/* This Sub is an effective Cast */
- n = new_r_Conv(get_irn_irg(n), get_nodes_block(n), n, mode);
+ n = new_r_Conv(get_nodes_block(n), n, mode);
}
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
return n;
} else if (right == a) {
ir_mode *l_mode = get_irn_mode(left);
- n = new_r_Minus(get_irn_irg(n), get_nodes_block(n), left, l_mode);
+ n = new_r_Minus(get_nodes_block(n), left, l_mode);
if (mode != l_mode) {
/* This Sub is an effective Cast */
- n = new_r_Conv(get_irn_irg(n), get_nodes_block(n), n, mode);
+ n = new_r_Conv(get_nodes_block(n), n, mode);
}
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
return n;
ir_node *blk = get_nodes_block(n);
n = new_rd_Mul(
get_irn_dbg_info(n),
- current_ir_graph, blk,
+ blk,
ma,
new_rd_Sub(
get_irn_dbg_info(n),
- current_ir_graph, blk,
+ blk,
mb,
- new_Const_long(mode, 1),
+ new_Const(get_mode_one(mode)),
mode),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A);
ir_node *blk = get_nodes_block(n);
n = new_rd_Mul(
get_irn_dbg_info(n),
- current_ir_graph, blk,
+ blk,
mb,
new_rd_Sub(
get_irn_dbg_info(n),
- current_ir_graph, blk,
+ blk,
ma,
- new_Const_long(mode, 1),
+ new_Const(get_mode_one(mode)),
mode),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A);
}
}
if (is_Sub(a)) { /* (x - y) - b -> x - (y + b) */
- ir_node *x = get_Sub_left(a);
+ ir_node *x = get_Sub_left(a);
ir_node *y = get_Sub_right(a);
ir_node *blk = get_nodes_block(n);
ir_mode *m_b = get_irn_mode(b);
return n;
}
- add = new_r_Add(current_ir_graph, blk, y, b, add_mode);
+ add = new_r_Add(blk, y, b, add_mode);
- n = new_rd_Sub(get_irn_dbg_info(n), current_ir_graph, blk, x, add, mode);
+ n = new_rd_Sub(get_irn_dbg_info(n), blk, x, add, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_SUB_X_Y_Z);
return n;
}
if (tv != tarval_bad) {
ir_node *blk = get_nodes_block(n);
ir_node *c = new_Const(tv);
- n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, blk, get_Not_op(b), c, mode);
+ n = new_rd_Add(get_irn_dbg_info(n), blk, get_Not_op(b), c, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_C_NOT_X);
return n;
}
* Several transformation done on n*n=2n bits mul.
* These transformations must be done here because new nodes may be produced.
*/
-static ir_node *transform_node_Mul2n(ir_node *n, ir_mode *mode) {
+static ir_node *transform_node_Mul2n(ir_node *n, ir_mode *mode)
+{
ir_node *oldn = n;
ir_node *a = get_Mul_left(n);
ir_node *b = get_Mul_right(n);
if (ta == get_mode_one(smode)) {
/* (L)1 * (L)b = (L)b */
ir_node *blk = get_nodes_block(n);
- n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, b, mode);
+ n = new_rd_Conv(get_irn_dbg_info(n), blk, b, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
return n;
}
else if (ta == get_mode_minus_one(smode)) {
/* (L)-1 * (L)b = (L)b */
ir_node *blk = get_nodes_block(n);
- n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, b, smode);
- n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, n, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n), blk, b, smode);
+ n = new_rd_Conv(get_irn_dbg_info(n), blk, n, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
return n;
}
if (tb == get_mode_one(smode)) {
/* (L)a * (L)1 = (L)a */
ir_node *blk = get_irn_n(a, -1);
- n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, a, mode);
+ n = new_rd_Conv(get_irn_dbg_info(n), blk, a, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
return n;
}
else if (tb == get_mode_minus_one(smode)) {
/* (L)a * (L)-1 = (L)-a */
ir_node *blk = get_nodes_block(n);
- n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, a, smode);
- n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, n, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n), blk, a, smode);
+ n = new_rd_Conv(get_irn_dbg_info(n), blk, n, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
return n;
}
* Do constant evaluation of Phi nodes.
* Do architecture dependent optimizations on Mul nodes
*/
-static ir_node *transform_node_Mul(ir_node *n) {
+static ir_node *transform_node_Mul(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_mode *mode = get_irn_mode(n);
ir_node *a = get_Mul_left(n);
if (mode != get_irn_mode(a))
return transform_node_Mul2n(n, mode);
- HANDLE_BINOP_PHI(tarval_mul, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_mul, a, b, c, mode);
if (mode_is_signed(mode)) {
ir_node *r = NULL;
else if (value_of(b) == get_mode_minus_one(mode))
r = a;
if (r) {
- n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), r, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n), get_nodes_block(n), r, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
return n;
}
if (cnst != NULL) {
dbg_info *dbgi = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- n = new_rd_Mul(dbgi, current_ir_graph, block, get_Minus_op(a), cnst, mode);
+ n = new_rd_Mul(dbgi, block, get_Minus_op(a), cnst, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
return n;
}
} else if (is_Minus(b)) { /* (-a) * (-b) -> a * b */
dbg_info *dbgi = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- n = new_rd_Mul(dbgi, current_ir_graph, block, get_Minus_op(a), get_Minus_op(b), mode);
+ n = new_rd_Mul(dbgi, block, get_Minus_op(a), get_Minus_op(b), mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_MINUS);
return n;
} else if (is_Sub(b)) { /* (-a) * (b - c) -> a * (c - b) */
ir_node *sub_l = get_Sub_left(b);
ir_node *sub_r = get_Sub_right(b);
dbg_info *dbgi = get_irn_dbg_info(n);
- ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(n);
- ir_node *new_b = new_rd_Sub(dbgi, irg, block, sub_r, sub_l, mode);
- n = new_rd_Mul(dbgi, irg, block, get_Minus_op(a), new_b, mode);
+ ir_node *new_b = new_rd_Sub(dbgi, block, sub_r, sub_l, mode);
+ n = new_rd_Mul(dbgi, block, get_Minus_op(a), new_b, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS);
return n;
}
ir_node *sub_l = get_Sub_left(a);
ir_node *sub_r = get_Sub_right(a);
dbg_info *dbgi = get_irn_dbg_info(n);
- ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(n);
- ir_node *new_a = new_rd_Sub(dbgi, irg, block, sub_r, sub_l, mode);
- n = new_rd_Mul(dbgi, irg, block, new_a, get_Minus_op(b), mode);
+ ir_node *new_a = new_rd_Sub(dbgi, block, sub_r, sub_l, mode);
+ n = new_rd_Mul(dbgi, block, new_a, get_Minus_op(b), mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS);
return n;
}
+ } else if (is_Shl(a)) {
+ ir_node *const shl_l = get_Shl_left(a);
+ if (is_Const(shl_l) && is_Const_one(shl_l)) {
+ /* (1 << x) * b -> b << x */
+ dbg_info *const dbgi = get_irn_dbg_info(n);
+ ir_node *const block = get_nodes_block(n);
+ ir_node *const shl_r = get_Shl_right(a);
+ n = new_rd_Shl(dbgi, block, b, shl_r, mode);
+ // TODO add me DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_SHIFT);
+ return n;
+ }
+ } else if (is_Shl(b)) {
+ ir_node *const shl_l = get_Shl_left(b);
+ if (is_Const(shl_l) && is_Const_one(shl_l)) {
+ /* a * (1 << x) -> a << x */
+ dbg_info *const dbgi = get_irn_dbg_info(n);
+ ir_node *const block = get_nodes_block(n);
+ ir_node *const shl_r = get_Shl_right(b);
+ n = new_rd_Shl(dbgi, block, a, shl_r, mode);
+ // TODO add me DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_SHIFT);
+ return n;
+ }
}
if (get_mode_arithmetic(mode) == irma_ieee754) {
if (is_Const(a)) {
if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)
&& !tarval_is_negative(tv)) {
/* 2.0 * b = b + b */
- n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), b, b, mode);
+ n = new_rd_Add(get_irn_dbg_info(n), get_nodes_block(n), b, b, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
return n;
}
if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)
&& !tarval_is_negative(tv)) {
/* a * 2.0 = a + a */
- n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), a, a, mode);
+ n = new_rd_Add(get_irn_dbg_info(n), get_nodes_block(n), a, a, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
return n;
}
/**
* Transform a Div Node.
*/
-static ir_node *transform_node_Div(ir_node *n) {
+static ir_node *transform_node_Div(ir_node *n)
+{
ir_mode *mode = get_Div_resmode(n);
ir_node *a = get_Div_left(n);
ir_node *b = get_Div_right(n);
ir_node *value;
- tarval *tv;
+ const ir_node *dummy;
if (is_Const(b) && is_const_Phi(a)) {
/* check for Div(Phi, Const) */
- value = apply_binop_on_phi(a, get_Const_tarval(b), tarval_div, mode, 0);
+ value = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_div, mode, 0);
if (value) {
DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
goto make_tuple;
}
else if (is_Const(a) && is_const_Phi(b)) {
/* check for Div(Const, Phi) */
- value = apply_binop_on_phi(b, get_Const_tarval(a), tarval_div, mode, 1);
+ value = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_div, mode, 1);
if (value) {
DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
goto make_tuple;
}
else if (is_const_Phi(a) && is_const_Phi(b)) {
/* check for Div(Phi, Phi) */
- value = apply_binop_on_2_phis(a, b, tarval_div, mode);
+ value = apply_binop_on_2_phis(a, b, (eval_func) tarval_div, mode);
if (value) {
DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
goto make_tuple;
}
value = n;
- tv = value_of(n);
- if (tv != tarval_bad) {
- value = new_Const(tv);
+ if (a == b && value_not_zero(a, &dummy)) {
+ /* BEWARE: we can optimize a/a to 1 only if this cannot cause a exception */
+ value = new_Const(get_mode_one(mode));
DBG_OPT_CSTEVAL(n, value);
goto make_tuple;
} else {
- ir_node *a = get_Div_left(n);
- ir_node *b = get_Div_right(n);
- const ir_node *dummy;
-
- if (a == b && value_not_zero(a, &dummy)) {
- /* BEWARE: we can optimize a/a to 1 only if this cannot cause a exception */
- value = new_Const(get_mode_one(mode));
- DBG_OPT_CSTEVAL(n, value);
- goto make_tuple;
- } else {
- if (mode_is_signed(mode) && is_Const(b)) {
- tarval *tv = get_Const_tarval(b);
+ if (mode_is_signed(mode) && is_Const(b)) {
+ tarval *tv = get_Const_tarval(b);
- if (tv == get_mode_minus_one(mode)) {
- /* a / -1 */
- value = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), a, mode);
- DBG_OPT_CSTEVAL(n, value);
- goto make_tuple;
- }
+ if (tv == get_mode_minus_one(mode)) {
+ /* a / -1 */
+ value = new_rd_Minus(get_irn_dbg_info(n), get_nodes_block(n), a, mode);
+ DBG_OPT_CSTEVAL(n, value);
+ goto make_tuple;
}
- /* Try architecture dependent optimization */
- value = arch_dep_replace_div_by_const(n);
}
+ /* Try architecture dependent optimization */
+ value = arch_dep_replace_div_by_const(n);
}
if (value != n) {
mem = skip_Pin(mem);
turn_into_tuple(n, pn_Div_max);
set_Tuple_pred(n, pn_Div_M, mem);
- set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(blk));
set_Tuple_pred(n, pn_Div_X_except, new_Bad());
set_Tuple_pred(n, pn_Div_res, value);
}
/**
* Transform a Mod node.
*/
-static ir_node *transform_node_Mod(ir_node *n) {
+static ir_node *transform_node_Mod(ir_node *n)
+{
ir_mode *mode = get_Mod_resmode(n);
ir_node *a = get_Mod_left(n);
ir_node *b = get_Mod_right(n);
if (is_Const(b) && is_const_Phi(a)) {
/* check for Div(Phi, Const) */
- value = apply_binop_on_phi(a, get_Const_tarval(b), tarval_mod, mode, 0);
+ value = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_mod, mode, 0);
if (value) {
DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
goto make_tuple;
}
else if (is_Const(a) && is_const_Phi(b)) {
/* check for Div(Const, Phi) */
- value = apply_binop_on_phi(b, get_Const_tarval(a), tarval_mod, mode, 1);
+ value = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_mod, mode, 1);
if (value) {
DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
goto make_tuple;
}
else if (is_const_Phi(a) && is_const_Phi(b)) {
/* check for Div(Phi, Phi) */
- value = apply_binop_on_2_phis(a, b, tarval_mod, mode);
+ value = apply_binop_on_2_phis(a, b, (eval_func) tarval_mod, mode);
if (value) {
DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
goto make_tuple;
mem = skip_Pin(mem);
turn_into_tuple(n, pn_Mod_max);
set_Tuple_pred(n, pn_Mod_M, mem);
- set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(blk));
set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
set_Tuple_pred(n, pn_Mod_res, value);
}
/**
* Transform a DivMod node.
*/
-static ir_node *transform_node_DivMod(ir_node *n) {
+static ir_node *transform_node_DivMod(ir_node *n)
+{
const ir_node *dummy;
ir_node *a = get_DivMod_left(n);
ir_node *b = get_DivMod_right(n);
if (is_Const(b) && is_const_Phi(a)) {
/* check for Div(Phi, Const) */
- va = apply_binop_on_phi(a, get_Const_tarval(b), tarval_div, mode, 0);
- vb = apply_binop_on_phi(a, get_Const_tarval(b), tarval_mod, mode, 0);
+ va = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_div, mode, 0);
+ vb = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_mod, mode, 0);
if (va && vb) {
DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI);
DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI);
}
else if (is_Const(a) && is_const_Phi(b)) {
/* check for Div(Const, Phi) */
- va = apply_binop_on_phi(b, get_Const_tarval(a), tarval_div, mode, 1);
- vb = apply_binop_on_phi(b, get_Const_tarval(a), tarval_mod, mode, 1);
+ va = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_div, mode, 1);
+ vb = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_mod, mode, 1);
if (va && vb) {
DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI);
DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI);
}
else if (is_const_Phi(a) && is_const_Phi(b)) {
/* check for Div(Phi, Phi) */
- va = apply_binop_on_2_phis(a, b, tarval_div, mode);
- vb = apply_binop_on_2_phis(a, b, tarval_mod, mode);
+ va = apply_binop_on_2_phis(a, b, (eval_func) tarval_div, mode);
+ vb = apply_binop_on_2_phis(a, b, (eval_func) tarval_mod, mode);
if (va && vb) {
DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI);
DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI);
DBG_OPT_CSTEVAL(n, vb);
goto make_tuple;
} else if (mode_is_signed(mode) && tb == get_mode_minus_one(mode)) {
- va = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), a, mode);
+ va = new_rd_Minus(get_irn_dbg_info(n), get_nodes_block(n), a, mode);
vb = new_Const(get_mode_null(mode));
DBG_OPT_CSTEVAL(n, va);
DBG_OPT_CSTEVAL(n, vb);
blk = get_nodes_block(n);
turn_into_tuple(n, pn_DivMod_max);
set_Tuple_pred(n, pn_DivMod_M, mem);
- set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(blk));
set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
set_Tuple_pred(n, pn_DivMod_res_div, va);
set_Tuple_pred(n, pn_DivMod_res_mod, vb);
/**
* Optimize x / c to x * (1/c)
*/
-static ir_node *transform_node_Quot(ir_node *n) {
+static ir_node *transform_node_Quot(ir_node *n)
+{
ir_mode *mode = get_Quot_resmode(n);
ir_node *oldn = n;
ir_node *blk = get_nodes_block(n);
ir_node *c = new_Const(tv);
ir_node *a = get_Quot_left(n);
- ir_node *m = new_rd_Mul(get_irn_dbg_info(n), current_ir_graph, blk, a, c, mode);
+ ir_node *m = new_rd_Mul(get_irn_dbg_info(n), blk, a, c, mode);
ir_node *mem = get_Quot_mem(n);
/* skip a potential Pin */
mem = skip_Pin(mem);
turn_into_tuple(n, pn_Quot_max);
set_Tuple_pred(n, pn_Quot_M, mem);
- set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(current_ir_graph, blk));
- set_Tuple_pred(n, pn_Quot_X_except, new_r_Bad(current_ir_graph));
+ set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(blk));
+ set_Tuple_pred(n, pn_Quot_X_except, new_Bad());
set_Tuple_pred(n, pn_Quot_res, m);
DBG_OPT_ALGSIM1(oldn, a, b, m, FS_OPT_FP_INV_MUL);
}
* Optimize Abs(x) into -x if x is Confirmed <= 0
* Optimize Abs(-x) int Abs(x)
*/
-static ir_node *transform_node_Abs(ir_node *n) {
+static ir_node *transform_node_Abs(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_Abs_op(n);
ir_mode *mode;
* Note that -x would create a new node, so we could
* not run it in the equivalent_node() context.
*/
- n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph,
- get_nodes_block(n), a, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n), get_nodes_block(n), a, mode);
DBG_OPT_CONFIRM(oldn, n);
return n;
if (is_Minus(a)) {
/* Abs(-x) = Abs(x) */
mode = get_irn_mode(n);
- n = new_rd_Abs(get_irn_dbg_info(n), current_ir_graph,
- get_nodes_block(n), get_Minus_op(a), mode);
+ n = new_rd_Abs(get_irn_dbg_info(n), get_nodes_block(n), get_Minus_op(a), mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ABS_MINUS_X);
return n;
}
*
* For == and != can be handled in Proj(Cmp)
*/
-static ir_node *transform_node_Cmp(ir_node *n) {
+static ir_node *transform_node_Cmp(ir_node *n)
+{
ir_node *oldn = n;
ir_node *left = get_Cmp_left(n);
ir_node *right = get_Cmp_right(n);
!mode_overflow_on_unary_Minus(get_irn_mode(left))) {
ir_node *const new_left = get_Minus_op(right);
ir_node *const new_right = get_Minus_op(left);
- n = new_rd_Cmp(get_irn_dbg_info(n), current_ir_graph,
- get_nodes_block(n), new_left, new_right);
+ n = new_rd_Cmp(get_irn_dbg_info(n), get_nodes_block(n), new_left, new_right);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CMP_OP_OP);
}
return n;
* Replace the Cond by a Jmp if it branches on a constant
* condition.
*/
-static ir_node *transform_node_Cond(ir_node *n) {
+static ir_node *transform_node_Cond(ir_node *n)
+{
ir_node *jmp;
ir_node *a = get_Cond_selector(n);
/* It's a boolean Cond, branching on a boolean constant.
Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
ir_node *blk = get_nodes_block(n);
- jmp = new_r_Jmp(current_ir_graph, blk);
+ jmp = new_r_Jmp(blk);
turn_into_tuple(n, pn_Cond_max);
if (ta == tarval_b_true) {
set_Tuple_pred(n, pn_Cond_false, new_Bad());
ir_op *op = get_irn_op(a);
ir_op *op_root = get_irn_op(n);
- if(op != get_irn_op(b))
+ if (op != get_irn_op(b))
return n;
+ /* and(conv(a), conv(b)) -> conv(and(a,b)) */
if (op == op_Conv) {
ir_node *a_op = get_Conv_op(a);
ir_node *b_op = get_Conv_op(b);
ir_mode *a_mode = get_irn_mode(a_op);
ir_mode *b_mode = get_irn_mode(b_op);
- if(a_mode == b_mode && (mode_is_int(a_mode) || a_mode == mode_b)) {
+ if (a_mode == b_mode && (mode_is_int(a_mode) || a_mode == mode_b)) {
ir_node *blk = get_nodes_block(n);
n = exact_copy(n);
set_binop_right(n, b_op);
set_irn_mode(n, a_mode);
n = trans_func(n);
- n = new_r_Conv(current_ir_graph, blk, n, get_irn_mode(oldn));
+ n = new_r_Conv(blk, n, get_irn_mode(oldn));
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_SHIFT_AND);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
return n;
}
}
c = a_left;
op1 = a_right;
op2 = b_right;
- } else if(a_left == b_right) {
+ } else if (a_left == b_right) {
c = a_left;
op1 = a_right;
op2 = b_left;
- } else if(a_right == b_left) {
+ } else if (a_right == b_left) {
c = a_right;
op1 = a_left;
op2 = b_right;
}
}
- if(a_right == b_right) {
+ if (a_right == b_right) {
c = a_right;
op1 = a_left;
op2 = b_left;
set_binop_right(new_n, op2);
new_n = trans_func(new_n);
- if(op_root == op_Eor && op == op_Or) {
+ if (op_root == op_Eor && op == op_Or) {
dbg_info *dbgi = get_irn_dbg_info(n);
- ir_graph *irg = current_ir_graph;
ir_mode *mode = get_irn_mode(c);
- c = new_rd_Not(dbgi, irg, blk, c, mode);
- n = new_rd_And(dbgi, irg, blk, new_n, c, mode);
+ c = new_rd_Not(dbgi, blk, c, mode);
+ n = new_rd_And(dbgi, blk, new_n, c, mode);
} else {
n = exact_copy(a);
set_nodes_block(n, blk);
/**
* Transform an And.
*/
-static ir_node *transform_node_And(ir_node *n) {
+static ir_node *transform_node_And(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_And_left(n);
ir_node *b = get_And_right(n);
ir_mode *mode;
+ vrp_attr *a_vrp, *b_vrp;
mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_and, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_and, a, b, c, mode);
/* we can evaluate 2 Projs of the same Cmp */
if (mode == mode_b && is_Proj(a) && is_Proj(b)) {
ir_node *pred_b = get_Proj_pred(b);
if (pred_a == pred_b) {
dbg_info *dbgi = get_irn_dbg_info(n);
- ir_node *block = get_nodes_block(pred_a);
pn_Cmp pn_a = get_Proj_proj(a);
pn_Cmp pn_b = get_Proj_proj(b);
/* yes, we can simply calculate with pncs */
pn_Cmp new_pnc = pn_a & pn_b;
- return new_rd_Proj(dbgi, current_ir_graph, block, pred_a, mode_b, new_pnc);
+ return new_rd_Proj(dbgi, pred_a, mode_b, new_pnc);
}
}
if (is_Or(a)) {
/* (a|b) & ~(a&b) = a^b */
ir_node *block = get_nodes_block(n);
- n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph, block, ba, bb, mode);
+ n = new_rd_Eor(get_irn_dbg_info(n), block, ba, bb, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_TO_EOR);
return n;
}
/* (a|b) & ~(a&b) = a^b */
ir_node *block = get_nodes_block(n);
- n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph, block, aa, ab, mode);
+ n = new_rd_Eor(get_irn_dbg_info(n), block, aa, ab, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_TO_EOR);
return n;
}
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- ar = new_rd_Not(dbg, current_ir_graph, block, ar, mode);
- n = new_rd_And(dbg, current_ir_graph, block, ar, b, mode);
+ ar = new_rd_Not(dbg, block, ar, mode);
+ n = new_rd_And(dbg, block, ar, b, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
return n;
}
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- al = new_rd_Not(dbg, current_ir_graph, block, al, mode);
- n = new_rd_And(dbg, current_ir_graph, block, al, b, mode);
+ al = new_rd_Not(dbg, block, al, mode);
+ n = new_rd_And(dbg, block, al, b, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
return n;
}
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- br = new_rd_Not(dbg, current_ir_graph, block, br, mode);
- n = new_rd_And(dbg, current_ir_graph, block, br, a, mode);
+ br = new_rd_Not(dbg, block, br, mode);
+ n = new_rd_And(dbg, block, br, a, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
return n;
}
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- bl = new_rd_Not(dbg, current_ir_graph, block, bl, mode);
- n = new_rd_And(dbg, current_ir_graph, block, bl, a, mode);
+ bl = new_rd_Not(dbg, block, bl, mode);
+ n = new_rd_And(dbg, block, bl, a, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
return n;
}
a = get_Not_op(a);
b = get_Not_op(b);
- n = new_rd_Or(get_irn_dbg_info(n), current_ir_graph, block, a, b, mode);
- n = new_rd_Not(get_irn_dbg_info(n), current_ir_graph, block, n, mode);
+ n = new_rd_Or(get_irn_dbg_info(n), block, a, b, mode);
+ n = new_rd_Not(get_irn_dbg_info(n), block, n, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_DEMORGAN);
return n;
}
+ b_vrp = vrp_get_info(b);
+ if (is_Const(a) && b_vrp && (tarval_cmp(tarval_or(get_Const_tarval(a),
+ b_vrp->bits_not_set), get_Const_tarval(a)) == pn_Cmp_Eq)) {
+
+ return b;
+
+ }
+
+ a_vrp = vrp_get_info(a);
+ if (is_Const(b) && a_vrp && (tarval_cmp(tarval_or(get_Const_tarval(b),
+ a_vrp->bits_not_set), get_Const_tarval(b)) == pn_Cmp_Eq)) {
+ return a;
+ }
+
n = transform_bitwise_distributive(n, transform_node_And);
return n;
/**
* Transform an Eor.
*/
-static ir_node *transform_node_Eor(ir_node *n) {
+static ir_node *transform_node_Eor(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_Eor_left(n);
ir_node *b = get_Eor_right(n);
ir_mode *mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_eor, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_eor, a, b, c, mode);
/* we can evaluate 2 Projs of the same Cmp */
if (mode == mode_b && is_Proj(a) && is_Proj(b)) {
ir_node *pred_a = get_Proj_pred(a);
ir_node *pred_b = get_Proj_pred(b);
- if(pred_a == pred_b) {
+ if (pred_a == pred_b) {
dbg_info *dbgi = get_irn_dbg_info(n);
- ir_node *block = get_nodes_block(pred_a);
pn_Cmp pn_a = get_Proj_proj(a);
pn_Cmp pn_b = get_Proj_proj(b);
/* yes, we can simply calculate with pncs */
pn_Cmp new_pnc = pn_a ^ pn_b;
- return new_rd_Proj(dbgi, current_ir_graph, block, pred_a, mode_b,
- new_pnc);
+ return new_rd_Proj(dbgi, pred_a, mode_b, new_pnc);
}
}
n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph,
get_mode_null(mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_A_A);
- } else if (mode == mode_b &&
- is_Proj(a) &&
- is_Const(b) && is_Const_one(b) &&
- is_Cmp(get_Proj_pred(a))) {
- /* The Eor negates a Cmp. The Cmp has the negated result anyways! */
- n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
- mode_b, get_negated_pnc(get_Proj_proj(a), mode));
-
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT_BOOL);
} else if (is_Const(b)) {
if (is_Not(a)) { /* ~x ^ const -> x ^ ~const */
ir_node *cnst = new_Const(tarval_not(get_Const_tarval(b)));
ir_node *not_op = get_Not_op(a);
dbg_info *dbg = get_irn_dbg_info(n);
- ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(n);
ir_mode *mode = get_irn_mode(n);
- n = new_rd_Eor(dbg, irg, block, not_op, cnst, mode);
+ n = new_rd_Eor(dbg, block, not_op, cnst, mode);
return n;
} else if (is_Const_all_one(b)) { /* x ^ 1...1 -> ~1 */
- n = new_r_Not(current_ir_graph, get_nodes_block(n), a, mode);
+ n = new_r_Not(get_nodes_block(n), a, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
}
} else {
/**
* Transform a Not.
*/
-static ir_node *transform_node_Not(ir_node *n) {
+static ir_node *transform_node_Not(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_Not_op(n);
ir_mode *mode = get_irn_mode(n);
HANDLE_UNOP_PHI(tarval_not,a,c);
/* check for a boolean Not */
- if (mode == mode_b &&
- is_Proj(a) &&
- is_Cmp(get_Proj_pred(a))) {
- /* We negate a Cmp. The Cmp has the negated result anyways! */
- n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
- mode_b, get_negated_pnc(get_Proj_proj(a), mode_b));
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_CMP);
- return n;
+ if (mode == mode_b && is_Proj(a)) {
+ ir_node *a_pred = get_Proj_pred(a);
+ if (is_Cmp(a_pred)) {
+ /* We negate a Cmp. The Cmp has the negated result anyways! */
+ n = new_r_Proj(get_Proj_pred(a),
+ mode_b, get_negated_pnc(get_Proj_proj(a), mode_b));
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_CMP);
+ return n;
+ }
}
- if (is_Eor(a)) {
+ if (is_Eor(a)) {
ir_node *eor_b = get_Eor_right(a);
if (is_Const(eor_b)) { /* ~(x ^ const) -> x ^ ~const */
ir_node *cnst = new_Const(tarval_not(get_Const_tarval(eor_b)));
ir_node *eor_a = get_Eor_left(a);
dbg_info *dbg = get_irn_dbg_info(n);
- ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(n);
ir_mode *mode = get_irn_mode(n);
- n = new_rd_Eor(dbg, irg, block, eor_a, cnst, mode);
+ n = new_rd_Eor(dbg, block, eor_a, cnst, mode);
return n;
}
}
ir_node *block = get_nodes_block(n);
ir_node *add_l = get_Minus_op(a);
ir_node *add_r = new_rd_Const(dbg, irg, get_mode_minus_one(mode));
- n = new_rd_Add(dbg, irg, block, add_l, add_r, mode);
+ n = new_rd_Add(dbg, block, add_l, add_r, mode);
} else if (is_Add(a)) {
ir_node *add_r = get_Add_right(a);
if (is_Const(add_r) && is_Const_all_one(add_r)) {
/* ~(x + -1) = -x */
ir_node *op = get_Add_left(a);
ir_node *blk = get_nodes_block(n);
- n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, op, get_irn_mode(n));
+ n = new_rd_Minus(get_irn_dbg_info(n), blk, op, get_irn_mode(n));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_MINUS_1);
}
}
* -(a >>s (size-1)) = a >>u (size-1)
* -(a * const) -> a * -const
*/
-static ir_node *transform_node_Minus(ir_node *n) {
+static ir_node *transform_node_Minus(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_Minus_op(n);
ir_mode *mode;
tarval *tv = get_mode_one(mode);
ir_node *blk = get_nodes_block(n);
ir_node *c = new_Const(tv);
- n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, blk, op, c, mode);
+ n = new_rd_Add(get_irn_dbg_info(n), blk, op, c, mode);
DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_NOT);
return n;
}
/* -(a >>u (size-1)) = a >>s (size-1) */
ir_node *v = get_Shr_left(a);
- n = new_rd_Shrs(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), v, c, mode);
+ n = new_rd_Shrs(get_irn_dbg_info(n), get_nodes_block(n), v, c, mode);
DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_PREDICATE);
return n;
}
/* -(a >>s (size-1)) = a >>u (size-1) */
ir_node *v = get_Shrs_left(a);
- n = new_rd_Shr(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), v, c, mode);
+ n = new_rd_Shr(get_irn_dbg_info(n), get_nodes_block(n), v, c, mode);
DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_PREDICATE);
return n;
}
ir_node *ra = get_Sub_right(a);
ir_node *blk = get_nodes_block(n);
- n = new_rd_Sub(get_irn_dbg_info(n), current_ir_graph, blk, ra, la, mode);
+ n = new_rd_Sub(get_irn_dbg_info(n), blk, ra, la, mode);
DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_SUB);
return n;
}
if (tv != tarval_bad) {
ir_node *cnst = new_Const(tv);
dbg_info *dbg = get_irn_dbg_info(a);
- ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(a);
- n = new_rd_Mul(dbg, irg, block, mul_l, cnst, mode);
+ n = new_rd_Mul(dbg, block, mul_l, cnst, mode);
DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_MUL_C);
return n;
}
/**
* Transform a Cast_type(Const) into a new Const_type
*/
-static ir_node *transform_node_Cast(ir_node *n) {
+static ir_node *transform_node_Cast(ir_node *n)
+{
ir_node *oldn = n;
ir_node *pred = get_Cast_op(n);
ir_type *tp = get_irn_type(n);
n = new_rd_Const_type(NULL, current_ir_graph, get_Const_tarval(pred), tp);
DBG_OPT_CSTEVAL(oldn, n);
} else if (is_SymConst(pred) && get_SymConst_value_type(pred) != tp) {
- n = new_rd_SymConst_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_irn_mode(pred),
+ n = new_rd_SymConst_type(NULL, current_ir_graph, get_irn_mode(pred),
get_SymConst_symbol(pred), get_SymConst_kind(pred), tp);
DBG_OPT_CSTEVAL(oldn, n);
}
/**
* Transform a Proj(Load) with a non-null address.
*/
-static ir_node *transform_node_Proj_Load(ir_node *proj) {
+static ir_node *transform_node_Proj_Load(ir_node *proj)
+{
if (get_opt_ldst_only_null_ptr_exceptions()) {
if (get_irn_mode(proj) == mode_X) {
ir_node *load = get_Proj_pred(proj);
return get_irg_bad(current_ir_graph);
} else {
ir_node *blk = get_nodes_block(load);
- return new_r_Jmp(current_ir_graph, blk);
+ return new_r_Jmp(blk);
}
}
}
/**
* Transform a Proj(Store) with a non-null address.
*/
-static ir_node *transform_node_Proj_Store(ir_node *proj) {
+static ir_node *transform_node_Proj_Store(ir_node *proj)
+{
if (get_opt_ldst_only_null_ptr_exceptions()) {
if (get_irn_mode(proj) == mode_X) {
ir_node *store = get_Proj_pred(proj);
return get_irg_bad(current_ir_graph);
} else {
ir_node *blk = get_nodes_block(store);
- return new_r_Jmp(current_ir_graph, blk);
+ return new_r_Jmp(blk);
}
}
}
* Transform a Proj(Div) with a non-zero value.
* Removes the exceptions and routes the memory to the NoMem node.
*/
-static ir_node *transform_node_Proj_Div(ir_node *proj) {
+static ir_node *transform_node_Proj_Div(ir_node *proj)
+{
ir_node *div = get_Proj_pred(proj);
ir_node *b = get_Div_right(div);
ir_node *res, *new_mem;
proj_nr = get_Proj_proj(proj);
switch (proj_nr) {
case pn_Div_X_regular:
- return new_r_Jmp(current_ir_graph, get_irn_n(div, -1));
+ return new_r_Jmp(get_nodes_block(div));
case pn_Div_X_except:
/* we found an exception handler, remove it */
if (confirm) {
/* This node can only float up to the Confirm block */
- new_mem = new_r_Pin(current_ir_graph, get_nodes_block(confirm), new_mem);
+ new_mem = new_r_Pin(get_nodes_block(confirm), new_mem);
}
set_irn_pinned(div, op_pin_state_floats);
/* this is a Div without exception, we can remove the memory edge */
* Transform a Proj(Mod) with a non-zero value.
* Removes the exceptions and routes the memory to the NoMem node.
*/
-static ir_node *transform_node_Proj_Mod(ir_node *proj) {
+static ir_node *transform_node_Proj_Mod(ir_node *proj)
+{
ir_node *mod = get_Proj_pred(proj);
ir_node *b = get_Mod_right(mod);
ir_node *res, *new_mem;
switch (proj_nr) {
case pn_Mod_X_regular:
- return new_r_Jmp(current_ir_graph, get_irn_n(mod, -1));
+ return new_r_Jmp(get_irn_n(mod, -1));
case pn_Mod_X_except:
/* we found an exception handler, remove it */
if (confirm) {
/* This node can only float up to the Confirm block */
- new_mem = new_r_Pin(current_ir_graph, get_nodes_block(confirm), new_mem);
+ new_mem = new_r_Pin(get_nodes_block(confirm), new_mem);
}
/* this is a Mod without exception, we can remove the memory edge */
set_Mod_mem(mod, new_mem);
* Transform a Proj(DivMod) with a non-zero value.
* Removes the exceptions and routes the memory to the NoMem node.
*/
-static ir_node *transform_node_Proj_DivMod(ir_node *proj) {
+static ir_node *transform_node_Proj_DivMod(ir_node *proj)
+{
ir_node *divmod = get_Proj_pred(proj);
ir_node *b = get_DivMod_right(divmod);
ir_node *res, *new_mem;
switch (proj_nr) {
case pn_DivMod_X_regular:
- return new_r_Jmp(current_ir_graph, get_irn_n(divmod, -1));
+ return new_r_Jmp(get_nodes_block(divmod));
case pn_DivMod_X_except:
/* we found an exception handler, remove it */
if (confirm) {
/* This node can only float up to the Confirm block */
- new_mem = new_r_Pin(current_ir_graph, get_nodes_block(confirm), new_mem);
+ new_mem = new_r_Pin(get_nodes_block(confirm), new_mem);
}
/* this is a DivMod without exception, we can remove the memory edge */
set_DivMod_mem(divmod, new_mem);
/**
* Optimizes jump tables (CondIs or CondIu) by removing all impossible cases.
*/
-static ir_node *transform_node_Proj_Cond(ir_node *proj) {
+static ir_node *transform_node_Proj_Cond(ir_node *proj)
+{
if (get_opt_unreachable_code()) {
ir_node *n = get_Proj_pred(proj);
ir_node *b = get_Cond_selector(n);
/* we have a constant switch */
long num = get_Proj_proj(proj);
- if (num != get_Cond_defaultProj(n)) { /* we cannot optimize default Proj's yet */
+ if (num != get_Cond_default_proj(n)) { /* we cannot optimize default Proj's yet */
if (get_tarval_long(tb) == num) {
/* Do NOT create a jump here, or we will have 2 control flow ops
* in a block. This case is optimized away in optimize_cf(). */
/* this case will NEVER be taken, kill it */
return get_irg_bad(current_ir_graph);
}
+ }
+ } else {
+ long num = get_Proj_proj(proj);
+ vrp_attr *b_vrp = vrp_get_info(b);
+ if (num != get_Cond_default_proj(n) && b_vrp) {
+ /* Try handling with vrp data. We only remove dead parts. */
+ tarval *tp = new_tarval_from_long(num, get_irn_mode(b));
+
+ if (b_vrp->range_type == VRP_RANGE) {
+ pn_Cmp cmp_result = tarval_cmp(b_vrp->range_bottom, tp);
+ pn_Cmp cmp_result2 = tarval_cmp(b_vrp->range_top, tp);
+
+ if ((cmp_result & pn_Cmp_Gt) == cmp_result && (cmp_result2
+ & pn_Cmp_Lt) == cmp_result2) {
+ return get_irg_bad(current_ir_graph);
+ }
+ } else if (b_vrp->range_type == VRP_ANTIRANGE) {
+ pn_Cmp cmp_result = tarval_cmp(b_vrp->range_bottom, tp);
+ pn_Cmp cmp_result2 = tarval_cmp(b_vrp->range_top, tp);
+
+ if ((cmp_result & pn_Cmp_Le) == cmp_result && (cmp_result2
+ & pn_Cmp_Ge) == cmp_result2) {
+ return get_irg_bad(current_ir_graph);
+ }
+ }
+
+ if (!(tarval_cmp(
+ tarval_and( b_vrp->bits_set, tp),
+ b_vrp->bits_set
+ ) == pn_Cmp_Eq)) {
+
+ return get_irg_bad(current_ir_graph);
+ }
+
+ if (!(tarval_cmp(
+ tarval_and(
+ tarval_not(tp),
+ tarval_not(b_vrp->bits_not_set)),
+ tarval_not(b_vrp->bits_not_set))
+ == pn_Cmp_Eq)) {
+
+ return get_irg_bad(current_ir_graph);
+ }
+
+
}
}
}
/**
* Create a 0 constant of given mode.
*/
-static ir_node *create_zero_const(ir_mode *mode) {
+static ir_node *create_zero_const(ir_mode *mode)
+{
tarval *tv = get_mode_null(mode);
ir_node *cnst = new_Const(tv);
/**
* Normalizes and optimizes Cmp nodes.
*/
-static ir_node *transform_node_Proj_Cmp(ir_node *proj) {
+static ir_node *transform_node_Proj_Cmp(ir_node *proj)
+{
ir_node *n = get_Proj_pred(proj);
ir_node *left = get_Cmp_left(n);
ir_node *right = get_Cmp_right(n);
if (smaller_mode(mode_left, mode) && smaller_mode(mode_right, mode)
&& mode_left != mode_b && mode_right != mode_b) {
- ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(n);
if (mode_left == mode_right) {
changed |= 1;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV_CONV);
} else if (smaller_mode(mode_left, mode_right)) {
- left = new_r_Conv(irg, block, op_left, mode_right);
+ left = new_r_Conv(block, op_left, mode_right);
right = op_right;
changed |= 1;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
} else if (smaller_mode(mode_right, mode_left)) {
left = op_left;
- right = new_r_Conv(irg, block, op_right, mode_left);
+ right = new_r_Conv(block, op_right, mode_left);
changed |= 1;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
}
ir_node *llr = get_Shr_right(ll);
if (is_Const(llr)) {
- ir_graph *irg = current_ir_graph;
dbg_info *dbg = get_irn_dbg_info(left);
tarval *c1 = get_Const_tarval(llr);
tarval *mask = tarval_shl(c2, c1);
tarval *value = tarval_shl(c3, c1);
- left = new_rd_And(dbg, irg, block, get_Shr_left(ll), new_Const(mask), mode);
+ left = new_rd_And(dbg, block, get_Shr_left(ll), new_Const(mask), mode);
right = new_Const(value);
changed |= 1;
}
/* replace mode_b compares with ands/ors */
if (get_irn_mode(left) == mode_b) {
- ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(n);
ir_node *bres;
switch (proj_nr) {
- case pn_Cmp_Le: bres = new_r_Or( irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b); break;
- case pn_Cmp_Lt: bres = new_r_And(irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b); break;
- case pn_Cmp_Ge: bres = new_r_Or( irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b); break;
- case pn_Cmp_Gt: bres = new_r_And(irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b); break;
- case pn_Cmp_Lg: bres = new_r_Eor(irg, block, left, right, mode_b); break;
- case pn_Cmp_Eq: bres = new_r_Not(irg, block, new_r_Eor(irg, block, left, right, mode_b), mode_b); break;
+ case pn_Cmp_Le: bres = new_r_Or( block, new_r_Not(block, left, mode_b), right, mode_b); break;
+ case pn_Cmp_Lt: bres = new_r_And(block, new_r_Not(block, left, mode_b), right, mode_b); break;
+ case pn_Cmp_Ge: bres = new_r_Or( block, left, new_r_Not(block, right, mode_b), mode_b); break;
+ case pn_Cmp_Gt: bres = new_r_And(block, left, new_r_Not(block, right, mode_b), mode_b); break;
+ case pn_Cmp_Lg: bres = new_r_Eor(block, left, right, mode_b); break;
+ case pn_Cmp_Eq: bres = new_r_Not(block, new_r_Eor(block, left, right, mode_b), mode_b); break;
default: bres = NULL;
}
if (bres) {
/*
* UpConv(x) REL 0 ==> x REL 0
+ * Don't do this for float values as it's unclear whether it is a
+ * win. (on the other side it makes detection/creation of fabs hard)
*/
if (get_mode_size_bits(mode) > get_mode_size_bits(op_mode) &&
((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) ||
- mode_is_signed(mode) || !mode_is_signed(op_mode))) {
+ mode_is_signed(mode) || !mode_is_signed(op_mode)) &&
+ !mode_is_float(mode)) {
tv = get_mode_null(op_mode);
left = op;
mode = op_mode;
}
sl = get_Shl_left(left);
blk = get_nodes_block(n);
- left = new_rd_And(get_irn_dbg_info(left), current_ir_graph, blk, sl, new_Const(amask), mode);
+ left = new_rd_And(get_irn_dbg_info(left), blk, sl, new_Const(amask), mode);
tv = tarval_shr(tv, tv1);
changed |= 2;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND);
}
sl = get_Shr_left(left);
blk = get_nodes_block(n);
- left = new_rd_And(get_irn_dbg_info(left), current_ir_graph, blk, sl, new_Const(amask), mode);
+ left = new_rd_And(get_irn_dbg_info(left), blk, sl, new_Const(amask), mode);
tv = tarval_shl(tv, tv1);
changed |= 2;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND);
}
sl = get_Shrs_left(left);
blk = get_nodes_block(n);
- left = new_rd_And(get_irn_dbg_info(left), current_ir_graph, blk, sl, new_Const(amask), mode);
+ left = new_rd_And(get_irn_dbg_info(left), blk, sl, new_Const(amask), mode);
tv = tarval_shl(tv, tv1);
changed |= 2;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND);
ir_mode *mode = get_irn_mode(v);
tv = tarval_sub(tv, get_mode_one(mode), NULL);
- left = new_rd_And(get_irn_dbg_info(op), current_ir_graph, blk, v, new_Const(tv), mode);
+ left = new_rd_And(get_irn_dbg_info(op), blk, v, new_Const(tv), mode);
changed |= 1;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_MOD_TO_AND);
}
ir_node *block = get_nodes_block(n);
/* create a new compare */
- n = new_rd_Cmp(get_irn_dbg_info(n), current_ir_graph, block, left, right);
- proj = new_rd_Proj(get_irn_dbg_info(proj), current_ir_graph, block, n, get_irn_mode(proj), proj_nr);
+ n = new_rd_Cmp(get_irn_dbg_info(n), block, left, right);
+ proj = new_rd_Proj(get_irn_dbg_info(proj), n, get_irn_mode(proj), proj_nr);
}
return proj;
/**
* Optimize CopyB(mem, x, x) into a Nop.
*/
-static ir_node *transform_node_Proj_CopyB(ir_node *proj) {
+static ir_node *transform_node_Proj_CopyB(ir_node *proj)
+{
ir_node *copyb = get_Proj_pred(proj);
ir_node *a = get_CopyB_dst(copyb);
ir_node *b = get_CopyB_src(copyb);
case pn_CopyB_X_regular:
/* Turn CopyB into a tuple (mem, jmp, bad, bad) */
DBG_OPT_EXC_REM(proj);
- proj = new_r_Jmp(current_ir_graph, get_nodes_block(copyb));
+ proj = new_r_Jmp(get_nodes_block(copyb));
break;
- case pn_CopyB_M_except:
case pn_CopyB_X_except:
DBG_OPT_EXC_REM(proj);
- proj = get_irg_bad(current_ir_graph);
+ proj = get_irg_bad(get_irn_irg(proj));
break;
default:
break;
/**
* Optimize Bounds(idx, idx, upper) into idx.
*/
-static ir_node *transform_node_Proj_Bound(ir_node *proj) {
+static ir_node *transform_node_Proj_Bound(ir_node *proj)
+{
ir_node *oldn = proj;
ir_node *bound = get_Proj_pred(proj);
ir_node *idx = get_Bound_index(bound);
break;
case pn_Bound_X_except:
DBG_OPT_EXC_REM(proj);
- proj = get_irg_bad(current_ir_graph);
+ proj = get_irg_bad(get_irn_irg(proj));
break;
case pn_Bound_res:
proj = idx;
break;
case pn_Bound_X_regular:
DBG_OPT_EXC_REM(proj);
- proj = new_r_Jmp(current_ir_graph, get_nodes_block(bound));
+ proj = new_r_Jmp(get_nodes_block(bound));
break;
default:
break;
* Does all optimizations on nodes that must be done on it's Proj's
* because of creating new nodes.
*/
-static ir_node *transform_node_Proj(ir_node *proj) {
+static ir_node *transform_node_Proj(ir_node *proj)
+{
ir_node *n = get_Proj_pred(proj);
if (n->op->ops.transform_node_Proj)
/**
* Move Confirms down through Phi nodes.
*/
-static ir_node *transform_node_Phi(ir_node *phi) {
+static ir_node *transform_node_Phi(ir_node *phi)
+{
int i, n;
ir_mode *mode = get_irn_mode(phi);
}
/* move the Confirm nodes "behind" the Phi */
block = get_irn_n(phi, -1);
- new_Phi = new_r_Phi(current_ir_graph, block, n, in, get_irn_mode(phi));
- return new_r_Confirm(current_ir_graph, block, new_Phi, bound, pnc);
+ new_Phi = new_r_Phi(block, n, in, get_irn_mode(phi));
+ return new_r_Confirm(block, new_Phi, bound, pnc);
}
}
return phi;
* Returns the operands of a commutative bin-op, if one operand is
* a const, it is returned as the second one.
*/
-static void get_comm_Binop_Ops(ir_node *binop, ir_node **a, ir_node **c) {
+static void get_comm_Binop_Ops(ir_node *binop, ir_node **a, ir_node **c)
+{
ir_node *op_a = get_binop_left(binop);
ir_node *op_b = get_binop_right(binop);
* AND c1 ===> OR if (c1 | c2) == 0x111..11
* OR
*/
-static ir_node *transform_node_Or_bf_store(ir_node *or) {
+static ir_node *transform_node_Or_bf_store(ir_node *or)
+{
ir_node *and, *c1;
ir_node *or_l, *c2;
ir_node *and_l, *c3;
ir_node *new_and, *new_const, *block;
ir_mode *mode = get_irn_mode(or);
- tarval *tv1, *tv2, *tv3, *tv4, *tv, *n_tv4, *n_tv2;
+ tarval *tv1, *tv2, *tv3, *tv4, *tv;
- while (1) {
+ for (;;) {
get_comm_Binop_Ops(or, &and, &c1);
if (!is_Const(c1) || !is_And(and))
return or;
return or;
}
- n_tv4 = tarval_not(tv4);
- if (tv3 != tarval_and(tv3, n_tv4)) {
+ if (tv3 != tarval_andnot(tv3, tv4)) {
/* bit in the or_mask is outside the and_mask */
return or;
}
- n_tv2 = tarval_not(tv2);
- if (tv1 != tarval_and(tv1, n_tv2)) {
+ if (tv1 != tarval_andnot(tv1, tv2)) {
/* bit in the or_mask is outside the and_mask */
return or;
}
/* ok, all conditions met */
block = get_irn_n(or, -1);
- new_and = new_r_And(current_ir_graph, block,
- value, new_Const(tarval_and(tv4, tv2)), mode);
+ new_and = new_r_And(block, value, new_Const(tarval_and(tv4, tv2)), mode);
new_const = new_Const(tarval_or(tv3, tv1));
/**
* Optimize an Or(shl(x, c), shr(x, bits - c)) into a Rotl
*/
-static ir_node *transform_node_Or_Rotl(ir_node *or) {
+static ir_node *transform_node_Or_Rotl(ir_node *or)
+{
ir_mode *mode = get_irn_mode(or);
ir_node *shl, *shr, *block;
ir_node *irn, *x, *c1, *c2, *v, *sub, *n, *rotval;
/* yet, condition met */
block = get_nodes_block(or);
- n = new_r_Rotl(current_ir_graph, block, x, c1, mode);
+ n = new_r_Rotl(block, x, c1, mode);
DBG_OPT_ALGSIM1(or, shl, shr, n, FS_OPT_OR_SHFT_TO_ROTL);
return n;
/* yet, condition met */
block = get_nodes_block(or);
- n = new_r_Rotl(current_ir_graph, block, x, rotval, mode);
+ n = new_r_Rotl(block, x, rotval, mode);
DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROTL);
return n;
/**
* Transform an Or.
*/
-static ir_node *transform_node_Or(ir_node *n) {
+static ir_node *transform_node_Or(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_Or_left(n);
ir_node *b = get_Or_right(n);
mode = get_irn_mode(n);
a = get_Not_op(a);
b = get_Not_op(b);
- n = new_rd_And(get_irn_dbg_info(n), current_ir_graph, block, a, b, mode);
- n = new_rd_Not(get_irn_dbg_info(n), current_ir_graph, block, n, mode);
+ n = new_rd_And(get_irn_dbg_info(n), block, a, b, mode);
+ n = new_rd_Not(get_irn_dbg_info(n), block, n, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_DEMORGAN);
return n;
}
ir_node *pred_b = get_Proj_pred(b);
if (pred_a == pred_b) {
dbg_info *dbgi = get_irn_dbg_info(n);
- ir_node *block = get_nodes_block(pred_a);
pn_Cmp pn_a = get_Proj_proj(a);
pn_Cmp pn_b = get_Proj_proj(b);
/* yes, we can simply calculate with pncs */
pn_Cmp new_pnc = pn_a | pn_b;
- return new_rd_Proj(dbgi, current_ir_graph, block, pred_a, mode_b,
- new_pnc);
+ return new_rd_Proj(dbgi, pred_a, mode_b, new_pnc);
}
}
mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_or, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_or, a, b, c, mode);
n = transform_node_Or_bf_store(n);
n = transform_node_Or_Rotl(n);
*
* Should be moved to reassociation?
*/
-static ir_node *transform_node_shift(ir_node *n) {
+static ir_node *transform_node_shift(ir_node *n)
+{
ir_node *left, *right;
ir_mode *mode;
tarval *tv1, *tv2, *res;
/* shifting too much */
if (!(tarval_cmp(res, modulo) & pn_Cmp_Lt)) {
if (is_Shrs(n)) {
- ir_graph *irg = get_irn_irg(n);
ir_node *block = get_nodes_block(n);
dbg_info *dbgi = get_irn_dbg_info(n);
ir_mode *smode = get_irn_mode(right);
ir_node *cnst = new_Const_long(smode, get_mode_size_bits(mode) - 1);
- return new_rd_Shrs(dbgi, irg, block, get_binop_left(left),
- cnst, mode);
+ return new_rd_Shrs(dbgi, block, get_binop_left(left), cnst, mode);
}
return new_Const(get_mode_null(mode));
in[0] = get_binop_left(left);
in[1] = new_Const(res);
- irn = new_ir_node(NULL, current_ir_graph, block, get_irn_op(n), mode, 2, in);
+ irn = new_ir_node(NULL, get_Block_irg(block), block, get_irn_op(n), mode, 2, in);
DBG_OPT_ALGSIM0(n, irn, FS_OPT_REASSOC_SHIFT);
* - Shl, Shr, Shrs, rotl instead of >>
* (with a special case for Or/Xor + Shrs)
*/
-static ir_node *transform_node_bitop_shift(ir_node *n) {
+static ir_node *transform_node_bitop_shift(ir_node *n)
+{
ir_node *left;
ir_node *right = get_binop_right(n);
ir_mode *mode = get_irn_mode(n);
ir_node *bitop_left;
ir_node *bitop_right;
ir_op *op_left;
- ir_graph *irg;
ir_node *block;
dbg_info *dbgi;
ir_node *new_shift;
bitop_left = get_binop_left(left);
- irg = get_irn_irg(n);
block = get_nodes_block(n);
dbgi = get_irn_dbg_info(n);
tv1 = get_Const_tarval(bitop_right);
assert(get_tarval_mode(tv1) == mode);
if (is_Shl(n)) {
- new_shift = new_rd_Shl(dbgi, irg, block, bitop_left, right, mode);
+ new_shift = new_rd_Shl(dbgi, block, bitop_left, right, mode);
tv_shift = tarval_shl(tv1, tv2);
- } else if(is_Shr(n)) {
- new_shift = new_rd_Shr(dbgi, irg, block, bitop_left, right, mode);
+ } else if (is_Shr(n)) {
+ new_shift = new_rd_Shr(dbgi, block, bitop_left, right, mode);
tv_shift = tarval_shr(tv1, tv2);
- } else if(is_Shrs(n)) {
- new_shift = new_rd_Shrs(dbgi, irg, block, bitop_left, right, mode);
+ } else if (is_Shrs(n)) {
+ new_shift = new_rd_Shrs(dbgi, block, bitop_left, right, mode);
tv_shift = tarval_shrs(tv1, tv2);
} else {
assert(is_Rotl(n));
- new_shift = new_rd_Rotl(dbgi, irg, block, bitop_left, right, mode);
+ new_shift = new_rd_Rotl(dbgi, block, bitop_left, right, mode);
tv_shift = tarval_rotl(tv1, tv2);
}
new_const = new_Const(tv_shift);
if (op_left == op_And) {
- new_bitop = new_rd_And(dbgi, irg, block, new_shift, new_const, mode);
- } else if(op_left == op_Or) {
- new_bitop = new_rd_Or(dbgi, irg, block, new_shift, new_const, mode);
+ new_bitop = new_rd_And(dbgi, block, new_shift, new_const, mode);
+ } else if (op_left == op_Or) {
+ new_bitop = new_rd_Or(dbgi, block, new_shift, new_const, mode);
} else {
assert(op_left == op_Eor);
- new_bitop = new_rd_Eor(dbgi, irg, block, new_shift, new_const, mode);
+ new_bitop = new_rd_Eor(dbgi, block, new_shift, new_const, mode);
}
return new_bitop;
* (x >> c1) << c2 <=> x OP (c2-c1) & ((-1 >> c1) << c2)
* (also with x >>s c1 when c1>=c2)
*/
-static ir_node *transform_node_shl_shr(ir_node *n) {
+static ir_node *transform_node_shl_shr(ir_node *n)
+{
ir_node *left;
ir_node *right = get_binop_right(n);
ir_node *x;
- ir_graph *irg;
ir_node *block;
ir_mode *mode;
dbg_info *dbgi;
tv_mask = tarval_shr(get_mode_all_one(mode), tv_shr);
}
tv_mask = tarval_shl(tv_mask, tv_shl);
- } else if(is_Shr(n) && is_Shl(left)) {
+ } else if (is_Shr(n) && is_Shl(left)) {
ir_node *shl_right = get_Shl_right(left);
if (!is_Const(shl_right))
assert(tv_mask != tarval_bad);
assert(get_tarval_mode(tv_mask) == mode);
- irg = get_irn_irg(n);
block = get_nodes_block(n);
dbgi = get_irn_dbg_info(n);
tv_shift = tarval_sub(tv_shr, tv_shl, NULL);
new_const = new_Const(tv_shift);
if (need_shrs) {
- new_shift = new_rd_Shrs(dbgi, irg, block, x, new_const, mode);
+ new_shift = new_rd_Shrs(dbgi, block, x, new_const, mode);
} else {
- new_shift = new_rd_Shr(dbgi, irg, block, x, new_const, mode);
+ new_shift = new_rd_Shr(dbgi, block, x, new_const, mode);
}
} else {
assert(pnc == pn_Cmp_Gt);
tv_shift = tarval_sub(tv_shl, tv_shr, NULL);
new_const = new_Const(tv_shift);
- new_shift = new_rd_Shl(dbgi, irg, block, x, new_const, mode);
+ new_shift = new_rd_Shl(dbgi, block, x, new_const, mode);
}
new_const = new_Const(tv_mask);
- new_and = new_rd_And(dbgi, irg, block, new_shift, new_const, mode);
+ new_and = new_rd_And(dbgi, block, new_shift, new_const, mode);
return new_and;
}
/**
* Transform a Shr.
*/
-static ir_node *transform_node_Shr(ir_node *n) {
+static ir_node *transform_node_Shr(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *left = get_Shr_left(n);
ir_node *right = get_Shr_right(n);
ir_mode *mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_shr, left, right, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_shr, left, right, c, mode);
n = transform_node_shift(n);
if (is_Shr(n))
/**
* Transform a Shrs.
*/
-static ir_node *transform_node_Shrs(ir_node *n) {
+static ir_node *transform_node_Shrs(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_Shrs_left(n);
ir_node *b = get_Shrs_right(n);
ir_mode *mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_shrs, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_shrs, a, b, c, mode);
n = transform_node_shift(n);
if (is_Shrs(n))
/**
* Transform a Shl.
*/
-static ir_node *transform_node_Shl(ir_node *n) {
+static ir_node *transform_node_Shl(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_Shl_left(n);
ir_node *b = get_Shl_right(n);
ir_mode *mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_shl, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_shl, a, b, c, mode);
n = transform_node_shift(n);
if (is_Shl(n))
/**
* Transform a Rotl.
*/
-static ir_node *transform_node_Rotl(ir_node *n) {
+static ir_node *transform_node_Rotl(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_node *a = get_Rotl_left(n);
ir_node *b = get_Rotl_right(n);
ir_mode *mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_rotl, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_rotl, a, b, c, mode);
n = transform_node_shift(n);
if (is_Rotl(n))
/**
* Transform a Conv.
*/
-static ir_node *transform_node_Conv(ir_node *n) {
+static ir_node *transform_node_Conv(ir_node *n)
+{
ir_node *c, *oldn = n;
ir_mode *mode = get_irn_mode(n);
ir_node *a = get_Conv_op(n);
ir_node *r = get_Add_right(a);
dbg_info *dbgi = get_irn_dbg_info(a);
ir_node *block = get_nodes_block(n);
- if(is_Conv(l)) {
+ if (is_Conv(l)) {
ir_node *lop = get_Conv_op(l);
- if(get_irn_mode(lop) == mode) {
+ if (get_irn_mode(lop) == mode) {
/* ConvP(AddI(ConvI(P), x)) -> AddP(P, x) */
- n = new_rd_Add(dbgi, current_ir_graph, block, lop, r, mode);
+ n = new_rd_Add(dbgi, block, lop, r, mode);
return n;
}
}
- if(is_Conv(r)) {
+ if (is_Conv(r)) {
ir_node *rop = get_Conv_op(r);
- if(get_irn_mode(rop) == mode) {
+ if (get_irn_mode(rop) == mode) {
/* ConvP(AddI(x, ConvI(P))) -> AddP(x, P) */
- n = new_rd_Add(dbgi, current_ir_graph, block, l, rop, mode);
+ n = new_rd_Add(dbgi, block, l, rop, mode);
return n;
}
}
* Remove dead blocks and nodes in dead blocks
* in keep alive list. We do not generate a new End node.
*/
-static ir_node *transform_node_End(ir_node *n) {
+static ir_node *transform_node_End(ir_node *n)
+{
int i, j, n_keepalives = get_End_n_keepalives(n);
ir_node **in;
return n;
} /* transform_node_End */
-/** returns 1 if a == -b */
-static int is_negated_value(ir_node *a, ir_node *b) {
+bool is_negated_value(ir_node *a, ir_node *b)
+{
if (is_Minus(a) && get_Minus_op(a) == b)
- return 1;
+ return true;
if (is_Minus(b) && get_Minus_op(b) == a)
- return 1;
+ return true;
if (is_Sub(a) && is_Sub(b)) {
ir_node *a_left = get_Sub_left(a);
ir_node *a_right = get_Sub_right(a);
ir_node *b_right = get_Sub_right(b);
if (a_left == b_right && a_right == b_left)
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/**
* Optimize a Mux into some simpler cases.
*/
-static ir_node *transform_node_Mux(ir_node *n) {
+static ir_node *transform_node_Mux(ir_node *n)
+{
ir_node *oldn = n, *sel = get_Mux_sel(n);
ir_mode *mode = get_irn_mode(n);
ir_node *t = get_Mux_true(n);
ir_node *f = get_Mux_false(n);
ir_graph *irg = current_ir_graph;
+ if (is_irg_state(irg, IR_GRAPH_STATE_KEEP_MUX))
+ return n;
+
+ if (is_Mux(t)) {
+ ir_node* block = get_nodes_block(n);
+ ir_node* c0 = sel;
+ ir_node* c1 = get_Mux_sel(t);
+ ir_node* t1 = get_Mux_true(t);
+ ir_node* f1 = get_Mux_false(t);
+ if (f == f1) {
+ /* Mux(cond0, Mux(cond1, x, y), y) -> typical if (cond0 && cond1) x else y */
+ ir_node* and_ = new_r_And(block, c0, c1, mode_b);
+ ir_node* new_mux = new_r_Mux(block, and_, f1, t1, mode);
+ n = new_mux;
+ sel = and_;
+ f = f1;
+ t = t1;
+ DBG_OPT_ALGSIM0(oldn, t, FS_OPT_MUX_COMBINE);
+ } else if (f == t1) {
+ /* Mux(cond0, Mux(cond1, x, y), x) */
+ ir_node* not_c1 = new_r_Not(block, c1, mode_b);
+ ir_node* and_ = new_r_And(block, c0, not_c1, mode_b);
+ ir_node* new_mux = new_r_Mux(block, and_, t1, f1, mode);
+ n = new_mux;
+ sel = and_;
+ f = t1;
+ t = f1;
+ DBG_OPT_ALGSIM0(oldn, t, FS_OPT_MUX_COMBINE);
+ }
+ } else if (is_Mux(f)) {
+ ir_node* block = get_nodes_block(n);
+ ir_node* c0 = sel;
+ ir_node* c1 = get_Mux_sel(f);
+ ir_node* t1 = get_Mux_true(f);
+ ir_node* f1 = get_Mux_false(f);
+ if (t == t1) {
+ /* Mux(cond0, x, Mux(cond1, x, y)) -> typical if (cond0 || cond1) x else y */
+ ir_node* or_ = new_r_Or(block, c0, c1, mode_b);
+ ir_node* new_mux = new_r_Mux(block, or_, f1, t1, mode);
+ n = new_mux;
+ sel = or_;
+ f = f1;
+ t = t1;
+ DBG_OPT_ALGSIM0(oldn, f, FS_OPT_MUX_COMBINE);
+ } else if (t == f1) {
+ /* Mux(cond0, x, Mux(cond1, y, x)) */
+ ir_node* not_c1 = new_r_Not(block, c1, mode_b);
+ ir_node* or_ = new_r_Or(block, c0, not_c1, mode_b);
+ ir_node* new_mux = new_r_Mux(block, or_, t1, f1, mode);
+ n = new_mux;
+ sel = or_;
+ f = t1;
+ t = f1;
+ DBG_OPT_ALGSIM0(oldn, f, FS_OPT_MUX_COMBINE);
+ }
+ }
+
/* first normalization step: move a possible zero to the false case */
if (is_Proj(sel)) {
ir_node *cmp = get_Proj_pred(sel);
/* Mux(x, 0, y) => Mux(x, y, 0) */
pn_Cmp pnc = get_Proj_proj(sel);
- sel = new_r_Proj(irg, get_nodes_block(cmp), cmp, mode_b,
+ sel = new_r_Proj(cmp, mode_b,
get_negated_pnc(pnc, get_irn_mode(get_Cmp_left(cmp))));
- n = new_rd_Mux(get_irn_dbg_info(n), irg, get_nodes_block(n), sel, t, f, mode);
+ n = new_rd_Mux(get_irn_dbg_info(n), get_nodes_block(n), sel, t, f, mode);
tmp = t;
t = f;
f = tmp;
if (mode == mode_b) {
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- ir_graph *irg = current_ir_graph;
if (is_Const(t)) {
tarval *tv_t = get_Const_tarval(t);
return sel;
} else {
/* Muxb(sel, true, x) = Or(sel, x) */
- n = new_rd_Or(dbg, irg, block, sel, f, mode_b);
+ n = new_rd_Or(dbg, block, sel, f, mode_b);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_OR_BOOL);
return n;
}
tarval *tv_f = get_Const_tarval(f);
if (tv_f == tarval_b_true) {
/* Muxb(sel, x, true) = Or(Not(sel), x) */
- ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b);
+ ir_node* not_sel = new_rd_Not(dbg, block, sel, mode_b);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_ORNOT_BOOL);
- n = new_rd_Or(dbg, irg, block, not_sel, t, mode_b);
+ n = new_rd_Or(dbg, block, not_sel, t, mode_b);
return n;
} else {
/* Muxb(sel, x, false) = And(sel, x) */
assert(tv_f == tarval_b_false);
- n = new_rd_And(dbg, irg, block, sel, t, mode_b);
+ n = new_rd_And(dbg, block, sel, t, mode_b);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_AND_BOOL);
return n;
}
if (is_Const(t) && is_Const(f) && mode_is_int(mode)) {
tarval *a = get_Const_tarval(t);
tarval *b = get_Const_tarval(f);
- tarval *null = get_tarval_null(mode);
tarval *diff, *min;
+ if (tarval_is_one(a) && tarval_is_null(b)) {
+ ir_node *block = get_nodes_block(n);
+ ir_node *conv = new_r_Conv(block, sel, mode);
+ n = conv;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_CONV);
+ return n;
+ } else if (tarval_is_null(a) && tarval_is_one(b)) {
+ ir_node *block = get_nodes_block(n);
+ ir_node *not_ = new_r_Not(block, sel, mode_b);
+ ir_node *conv = new_r_Conv(block, not_, mode);
+ n = conv;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_CONV);
+ return n;
+ }
+ /* TODO: it's not really clear if that helps in general or should be moved
+ * to backend, especially with the MUX->Conv transformation above */
if (tarval_cmp(a, b) & pn_Cmp_Gt) {
diff = tarval_sub(a, b, NULL);
min = b;
min = a;
}
- if (diff == get_tarval_one(mode) && min != null) {
+ if (diff == get_tarval_one(mode)) {
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- ir_graph *irg = current_ir_graph;
ir_node *t = new_Const(tarval_sub(a, min, NULL));
ir_node *f = new_Const(tarval_sub(b, min, NULL));
- n = new_rd_Mux(dbg, irg, block, sel, f, t, mode);
- n = new_rd_Add(dbg, irg, block, n, new_Const(min), mode);
+ n = new_rd_Mux(dbg, block, sel, f, t, mode);
+ n = new_rd_Add(dbg, block, n, new_Const(min), mode);
return n;
}
}
if (!mode_honor_signed_zeros(mode) && is_negated_value(f, t)) {
/* f = -t */
- if ( (cmp_l == t && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt))
- || (cmp_l == f && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt)))
+ /* NaN's work fine with abs, so it is ok to remove Uo */
+ long pnc = pn & ~pn_Cmp_Uo;
+
+ if ( (cmp_l == t && (pnc == pn_Cmp_Ge || pnc == pn_Cmp_Gt))
+ || (cmp_l == f && (pnc == pn_Cmp_Le || pnc == pn_Cmp_Lt)))
{
/* Mux(a >/>= 0, a, -a) = Mux(a </<= 0, -a, a) ==> Abs(a) */
- n = new_rd_Abs(get_irn_dbg_info(n), current_ir_graph, block,
- cmp_l, mode);
+ n = new_rd_Abs(get_irn_dbg_info(n), block, cmp_l, mode);
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
return n;
- } else if ((cmp_l == t && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt))
- || (cmp_l == f && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt)))
+ } else if ((cmp_l == t && (pnc == pn_Cmp_Le || pnc == pn_Cmp_Lt))
+ || (cmp_l == f && (pnc == pn_Cmp_Ge || pnc == pn_Cmp_Gt)))
{
/* Mux(a </<= 0, a, -a) = Mux(a >/>= 0, -a, a) ==> -Abs(a) */
- n = new_rd_Abs(get_irn_dbg_info(n), current_ir_graph, block,
- cmp_l, mode);
- n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph,
- block, n, mode);
+ n = new_rd_Abs(get_irn_dbg_info(n), block, cmp_l, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n), block, n, mode);
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
return n;
}
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
} else {
/* Mux((a & 2^C) == 0, 2^C, 0) */
- n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph,
+ n = new_rd_Eor(get_irn_dbg_info(n),
block, cmp_l, t, mode);
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
}
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
} else {
/* (a & (1 << n)) == 0, (1 << n), 0) */
- n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph,
+ n = new_rd_Eor(get_irn_dbg_info(n),
block, cmp_l, t, mode);
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
}
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
} else {
/* ((1 << n) & a) == 0, (1 << n), 0) */
- n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph,
+ n = new_rd_Eor(get_irn_dbg_info(n),
block, cmp_l, t, mode);
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
}
* optimize Sync nodes that have other syncs as input we simply add the inputs
* of the other sync to our own inputs
*/
-static ir_node *transform_node_Sync(ir_node *n) {
+static ir_node *transform_node_Sync(ir_node *n)
+{
int arity = get_Sync_n_preds(n);
int i;
add_identities(current_ir_graph->value_table, n);
return n;
-}
+} /* transform_node_Sync */
+
+/**
+ * optimize a trampoline Call into a direct Call
+ */
+static ir_node *transform_node_Call(ir_node *call)
+{
+ ir_node *callee = get_Call_ptr(call);
+ ir_node *adr, *mem, *res, *bl, **in;
+ ir_type *ctp, *mtp, *tp;
+ type_dbg_info *tdb;
+ dbg_info *db;
+ int i, n_res, n_param;
+ ir_variadicity var;
+
+ if (! is_Proj(callee))
+ return call;
+ callee = get_Proj_pred(callee);
+ if (! is_Builtin(callee))
+ return call;
+ if (get_Builtin_kind(callee) != ir_bk_inner_trampoline)
+ return call;
+
+ mem = get_Call_mem(call);
+
+ if (skip_Proj(mem) == callee) {
+ /* memory is routed to the trampoline, skip */
+ mem = get_Builtin_mem(callee);
+ }
+
+ /* build a new call type */
+ mtp = get_Call_type(call);
+ tdb = get_type_dbg_info(mtp);
+
+ n_res = get_method_n_ress(mtp);
+ n_param = get_method_n_params(mtp);
+ ctp = new_d_type_method(n_param + 1, n_res, tdb);
+
+ for (i = 0; i < n_res; ++i)
+ set_method_res_type(ctp, i, get_method_res_type(mtp, i));
+
+ NEW_ARR_A(ir_node *, in, n_param + 1);
+
+ /* FIXME: we don't need a new pointer type in every step */
+ tp = get_irg_frame_type(current_ir_graph);
+ tp = new_type_pointer(tp);
+ set_method_param_type(ctp, 0, tp);
+
+ in[0] = get_Builtin_param(callee, 2);
+ for (i = 0; i < n_param; ++i) {
+ set_method_param_type(ctp, i + 1, get_method_param_type(mtp, i));
+ in[i + 1] = get_Call_param(call, i);
+ }
+ var = get_method_variadicity(mtp);
+ set_method_variadicity(ctp, var);
+ if (var == variadicity_variadic) {
+ set_method_first_variadic_param_index(ctp, get_method_first_variadic_param_index(mtp) + 1);
+ }
+ /* When we resolve a trampoline, the function must be called by a this-call */
+ set_method_calling_convention(ctp, get_method_calling_convention(mtp) | cc_this_call);
+ set_method_additional_properties(ctp, get_method_additional_properties(mtp));
+
+ adr = get_Builtin_param(callee, 1);
+
+ db = get_irn_dbg_info(call);
+ bl = get_nodes_block(call);
+
+ res = new_rd_Call(db, bl, mem, adr, n_param + 1, in, ctp);
+ if (get_irn_pinned(call) == op_pin_state_floats)
+ set_irn_pinned(res, op_pin_state_floats);
+ return res;
+} /* transform_node_Call */
/**
* Tries several [inplace] [optimizing] transformations and returns an
* transformations _do_ generate new nodes, and thus the old node must
* not be freed even if the equivalent node isn't the old one.
*/
-static ir_node *transform_node(ir_node *n) {
+static ir_node *transform_node(ir_node *n)
+{
ir_node *oldn;
/*
*/
do {
oldn = n;
- if (n->op->ops.transform_node)
+ if (n->op->ops.transform_node != NULL)
n = n->op->ops.transform_node(n);
} while (oldn != n);
CASE(End);
CASE(Mux);
CASE(Sync);
+ CASE(Call);
default:
/* leave NULL */;
}
#define N_IR_NODES 512
/** Compares the attributes of two Const nodes. */
-static int node_cmp_attr_Const(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Const(ir_node *a, ir_node *b)
+{
return (get_Const_tarval(a) != get_Const_tarval(b))
|| (get_Const_type(a) != get_Const_type(b));
} /* node_cmp_attr_Const */
/** Compares the attributes of two Proj nodes. */
-static int node_cmp_attr_Proj(ir_node *a, ir_node *b) {
- return get_irn_proj_attr(a) != get_irn_proj_attr(b);
+static int node_cmp_attr_Proj(ir_node *a, ir_node *b)
+{
+ return a->attr.proj != b->attr.proj;
} /* node_cmp_attr_Proj */
/** Compares the attributes of two Filter nodes. */
-static int node_cmp_attr_Filter(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Filter(ir_node *a, ir_node *b)
+{
return get_Filter_proj(a) != get_Filter_proj(b);
} /* node_cmp_attr_Filter */
/** Compares the attributes of two Alloc nodes. */
-static int node_cmp_attr_Alloc(ir_node *a, ir_node *b) {
- const alloc_attr *pa = get_irn_alloc_attr(a);
- const alloc_attr *pb = get_irn_alloc_attr(b);
+static int node_cmp_attr_Alloc(ir_node *a, ir_node *b)
+{
+ const alloc_attr *pa = &a->attr.alloc;
+ const alloc_attr *pb = &b->attr.alloc;
return (pa->where != pb->where) || (pa->type != pb->type);
} /* node_cmp_attr_Alloc */
/** Compares the attributes of two Free nodes. */
-static int node_cmp_attr_Free(ir_node *a, ir_node *b) {
- const free_attr *pa = get_irn_free_attr(a);
- const free_attr *pb = get_irn_free_attr(b);
+static int node_cmp_attr_Free(ir_node *a, ir_node *b)
+{
+ const free_attr *pa = &a->attr.free;
+ const free_attr *pb = &b->attr.free;
return (pa->where != pb->where) || (pa->type != pb->type);
} /* node_cmp_attr_Free */
/** Compares the attributes of two SymConst nodes. */
-static int node_cmp_attr_SymConst(ir_node *a, ir_node *b) {
- const symconst_attr *pa = get_irn_symconst_attr(a);
- const symconst_attr *pb = get_irn_symconst_attr(b);
+static int node_cmp_attr_SymConst(ir_node *a, ir_node *b)
+{
+ const symconst_attr *pa = &a->attr.symc;
+ const symconst_attr *pb = &b->attr.symc;
return (pa->kind != pb->kind)
|| (pa->sym.type_p != pb->sym.type_p)
|| (pa->tp != pb->tp);
} /* node_cmp_attr_SymConst */
/** Compares the attributes of two Call nodes. */
-static int node_cmp_attr_Call(ir_node *a, ir_node *b) {
- return get_irn_call_attr(a) != get_irn_call_attr(b);
+static int node_cmp_attr_Call(ir_node *a, ir_node *b)
+{
+ const call_attr *pa = &a->attr.call;
+ const call_attr *pb = &b->attr.call;
+ return (pa->type != pb->type)
+ || (pa->tail_call != pb->tail_call);
} /* node_cmp_attr_Call */
/** Compares the attributes of two Sel nodes. */
-static int node_cmp_attr_Sel(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Sel(ir_node *a, ir_node *b)
+{
const ir_entity *a_ent = get_Sel_entity(a);
const ir_entity *b_ent = get_Sel_entity(b);
-#if 0
- return
- (a_ent->kind != b_ent->kind) ||
- (a_ent->name != b_ent->name) ||
- (a_ent->owner != b_ent->owner) ||
- (a_ent->ld_name != b_ent->ld_name) ||
- (a_ent->type != b_ent->type);
-#endif
- /* Matze: inlining of functions can produce 2 entities with same type,
- * name, etc. */
return a_ent != b_ent;
} /* node_cmp_attr_Sel */
/** Compares the attributes of two Phi nodes. */
-static int node_cmp_attr_Phi(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Phi(ir_node *a, ir_node *b)
+{
/* we can only enter this function if both nodes have the same number of inputs,
hence it is enough to check if one of them is a Phi0 */
if (is_Phi0(a)) {
/* check the Phi0 pos attribute */
- return get_irn_phi_attr(a)->u.pos != get_irn_phi_attr(b)->u.pos;
+ return a->attr.phi.u.pos != b->attr.phi.u.pos;
}
return 0;
} /* node_cmp_attr_Phi */
/** Compares the attributes of two Conv nodes. */
-static int node_cmp_attr_Conv(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Conv(ir_node *a, ir_node *b)
+{
return get_Conv_strict(a) != get_Conv_strict(b);
} /* node_cmp_attr_Conv */
/** Compares the attributes of two Cast nodes. */
-static int node_cmp_attr_Cast(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Cast(ir_node *a, ir_node *b)
+{
return get_Cast_type(a) != get_Cast_type(b);
} /* node_cmp_attr_Cast */
/** Compares the attributes of two Load nodes. */
-static int node_cmp_attr_Load(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Load(ir_node *a, ir_node *b)
+{
if (get_Load_volatility(a) == volatility_is_volatile ||
get_Load_volatility(b) == volatility_is_volatile)
/* NEVER do CSE on volatile Loads */
} /* node_cmp_attr_Load */
/** Compares the attributes of two Store nodes. */
-static int node_cmp_attr_Store(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Store(ir_node *a, ir_node *b)
+{
/* do not CSE Stores with different alignment. Be conservative. */
if (get_Store_align(a) != get_Store_align(b))
return 1;
} /* node_cmp_attr_Store */
/** Compares two exception attributes */
-static int node_cmp_exception(ir_node *a, ir_node *b) {
- const except_attr *ea = get_irn_except_attr(a);
- const except_attr *eb = get_irn_except_attr(b);
+static int node_cmp_exception(ir_node *a, ir_node *b)
+{
+ const except_attr *ea = &a->attr.except;
+ const except_attr *eb = &b->attr.except;
return ea->pin_state != eb->pin_state;
}
#define node_cmp_attr_Bound node_cmp_exception
/** Compares the attributes of two Div nodes. */
-static int node_cmp_attr_Div(ir_node *a, ir_node *b) {
- const divmod_attr *ma = get_irn_divmod_attr(a);
- const divmod_attr *mb = get_irn_divmod_attr(b);
+static int node_cmp_attr_Div(ir_node *a, ir_node *b)
+{
+ const divmod_attr *ma = &a->attr.divmod;
+ const divmod_attr *mb = &b->attr.divmod;
return ma->exc.pin_state != mb->exc.pin_state ||
- ma->res_mode != mb->res_mode ||
+ ma->resmode != mb->resmode ||
ma->no_remainder != mb->no_remainder;
} /* node_cmp_attr_Div */
/** Compares the attributes of two DivMod nodes. */
-static int node_cmp_attr_DivMod(ir_node *a, ir_node *b) {
- const divmod_attr *ma = get_irn_divmod_attr(a);
- const divmod_attr *mb = get_irn_divmod_attr(b);
+static int node_cmp_attr_DivMod(ir_node *a, ir_node *b)
+{
+ const divmod_attr *ma = &a->attr.divmod;
+ const divmod_attr *mb = &b->attr.divmod;
return ma->exc.pin_state != mb->exc.pin_state ||
- ma->res_mode != mb->res_mode;
+ ma->resmode != mb->resmode;
} /* node_cmp_attr_DivMod */
/** Compares the attributes of two Mod nodes. */
-static int node_cmp_attr_Mod(ir_node *a, ir_node *b) {
- const divmod_attr *ma = get_irn_divmod_attr(a);
- const divmod_attr *mb = get_irn_divmod_attr(b);
- return ma->exc.pin_state != mb->exc.pin_state ||
- ma->res_mode != mb->res_mode;
+static int node_cmp_attr_Mod(ir_node *a, ir_node *b)
+{
+ return node_cmp_attr_DivMod(a, b);
} /* node_cmp_attr_Mod */
/** Compares the attributes of two Quot nodes. */
-static int node_cmp_attr_Quot(ir_node *a, ir_node *b) {
- const divmod_attr *ma = get_irn_divmod_attr(a);
- const divmod_attr *mb = get_irn_divmod_attr(b);
- return ma->exc.pin_state != mb->exc.pin_state ||
- ma->res_mode != mb->res_mode;
+static int node_cmp_attr_Quot(ir_node *a, ir_node *b)
+{
+ return node_cmp_attr_DivMod(a, b);
} /* node_cmp_attr_Quot */
/** Compares the attributes of two Confirm nodes. */
-static int node_cmp_attr_Confirm(ir_node *a, ir_node *b) {
+static int node_cmp_attr_Confirm(ir_node *a, ir_node *b)
+{
+ /* no need to compare the bound, as this is a input */
return (get_Confirm_cmp(a) != get_Confirm_cmp(b));
} /* node_cmp_attr_Confirm */
+/** Compares the attributes of two Builtin nodes. */
+static int node_cmp_attr_Builtin(ir_node *a, ir_node *b)
+{
+ /* no need to compare the type, equal kind means equal type */
+ return get_Builtin_kind(a) != get_Builtin_kind(b);
+} /* node_cmp_attr_Builtin */
+
/** Compares the attributes of two ASM nodes. */
-static int node_cmp_attr_ASM(ir_node *a, ir_node *b) {
+static int node_cmp_attr_ASM(ir_node *a, ir_node *b)
+{
int i, n;
const ir_asm_constraint *ca;
const ir_asm_constraint *cb;
return 0;
} /* node_cmp_attr_ASM */
+/** Compares the inexistent attributes of two Dummy nodes. */
+static int node_cmp_attr_Dummy(ir_node *a, ir_node *b)
+{
+ (void) a;
+ (void) b;
+ return 1;
+}
+
/**
* Set the default node attribute compare operation for an ir_op_ops.
*
CASE(Mod);
CASE(Quot);
CASE(Bound);
+ CASE(Builtin);
+ CASE(Dummy);
/* FIXME CopyB */
default:
- /* leave NULL */;
+ /* leave NULL */
+ break;
}
return ops;
* Compare function for two nodes in the value table. Gets two
* nodes as parameters. Returns 0 if the nodes are a Common Sub Expression.
*/
-int identities_cmp(const void *elt, const void *key) {
+int identities_cmp(const void *elt, const void *key)
+{
ir_node *a = (ir_node *)elt;
ir_node *b = (ir_node *)key;
int i, irn_arity_a;
}
/* compare a->in[0..ins] with b->in[0..ins] */
- for (i = 0; i < irn_arity_a; i++)
- if (get_irn_intra_n(a, i) != get_irn_intra_n(b, i))
- return 1;
+ for (i = 0; i < irn_arity_a; ++i) {
+ ir_node *pred_a = get_irn_intra_n(a, i);
+ ir_node *pred_b = get_irn_intra_n(b, i);
+ if (pred_a != pred_b) {
+ /* if both predecessors are CSE neutral they might be different */
+ if (!is_irn_cse_neutral(pred_a) || !is_irn_cse_neutral(pred_b))
+ return 1;
+ }
+ }
/*
* here, we already now that the nodes are identical except their
*
* @param node The IR-node
*/
-unsigned ir_node_hash(const ir_node *node) {
+unsigned ir_node_hash(const ir_node *node)
+{
return node->op->ops.hash(node);
} /* ir_node_hash */
-pset *new_identities(void) {
+pset *new_identities(void)
+{
return new_pset(identities_cmp, N_IR_NODES);
} /* new_identities */
-void del_identities(pset *value_table) {
+void del_identities(pset *value_table)
+{
del_pset(value_table);
} /* del_identities */
/* Normalize a node by putting constants (and operands with larger
* node index) on the right (operator side). */
-void ir_normalize_node(ir_node *n) {
+void ir_normalize_node(ir_node *n)
+{
if (is_op_commutative(get_irn_op(n))) {
ir_node *l = get_binop_left(n);
ir_node *r = get_binop_right(n);
* dominance info here: We known, that one block must dominate the other and
* following the only block input will allow to find it.
*/
-static void update_known_irn(ir_node *known_irn, const ir_node *new_ir_node) {
+static void update_known_irn(ir_node *known_irn, const ir_node *new_ir_node)
+{
ir_node *known_blk, *new_block, *block, *mbh;
if (get_opt_global_cse()) {
* @return a node that computes the same value as n or n if no such
* node could be found
*/
-ir_node *identify_remember(pset *value_table, ir_node *n) {
- ir_node *o = NULL;
+ir_node *identify_remember(pset *value_table, ir_node *n)
+{
+ ir_node *nn = NULL;
if (!value_table) return n;
ir_normalize_node(n);
/* lookup or insert in hash table with given hash key. */
- o = pset_insert(value_table, n, ir_node_hash(n));
+ nn = pset_insert(value_table, n, ir_node_hash(n));
- if (o != n) {
- update_known_irn(o, n);
+ if (nn != n) {
+ update_known_irn(nn, n);
+
+ /* n is reachable again */
+ edges_node_revival(nn, get_irn_irg(nn));
}
- return o;
+ return nn;
} /* identify_remember */
/**
* @param value_table The value table
* @param n The node to lookup
*/
-static inline ir_node *identify_cons(pset *value_table, ir_node *n) {
+static inline ir_node *identify_cons(pset *value_table, ir_node *n)
+{
ir_node *old = n;
n = identify_remember(value_table, n);
} /* identify_cons */
/* Add a node to the identities value table. */
-void add_identities(pset *value_table, ir_node *node) {
+void add_identities(pset *value_table, ir_node *node)
+{
if (get_opt_cse() && is_no_Block(node))
identify_remember(value_table, node);
} /* add_identities */
/* Visit each node in the value table of a graph. */
-void visit_all_identities(ir_graph *irg, irg_walk_func visit, void *env) {
+void visit_all_identities(ir_graph *irg, irg_walk_func visit, void *env)
+{
ir_node *node;
ir_graph *rem = current_ir_graph;
* Garbage in, garbage out. If a node has a dead input, i.e., the
* Bad node is input to the node, return the Bad node.
*/
-static ir_node *gigo(ir_node *node) {
+static ir_node *gigo(ir_node *node)
+{
int i, irn_arity;
ir_op *op = get_irn_op(node);
*
* current_ir_graph must be set to the graph of the node!
*/
-ir_node *optimize_node(ir_node *n) {
+ir_node *optimize_node(ir_node *n)
+{
tarval *tv;
ir_node *oldn = n;
ir_opcode iro = get_irn_opcode(n);
* nodes lying on the obstack. Remove these by a dead node elimination,
* i.e., a copying garbage collection.
*/
-ir_node *optimize_in_place_2(ir_node *n) {
+ir_node *optimize_in_place_2(ir_node *n)
+{
tarval *tv;
ir_node *oldn = n;
ir_opcode iro = get_irn_opcode(n);
/**
* Wrapper for external use, set proper status bits after optimization.
*/
-ir_node *optimize_in_place(ir_node *n) {
+ir_node *optimize_in_place(ir_node *n)
+{
/* Handle graph state */
assert(get_irg_phase_state(current_ir_graph) != phase_building);
/**
* Calculate a hash value of a Const node.
*/
-static unsigned hash_Const(const ir_node *node) {
+static unsigned hash_Const(const ir_node *node)
+{
unsigned h;
/* special value for const, as they only differ in their tarval. */
- h = HASH_PTR(node->attr.con.tv);
- h = 9*h + HASH_PTR(get_irn_mode(node));
+ h = HASH_PTR(node->attr.con.tarval);
return h;
} /* hash_Const */
/**
* Calculate a hash value of a SymConst node.
*/
-static unsigned hash_SymConst(const ir_node *node) {
+static unsigned hash_SymConst(const ir_node *node)
+{
unsigned h;
- /* special value for const, as they only differ in their symbol. */
+ /* all others are pointers */
h = HASH_PTR(node->attr.symc.sym.type_p);
- h = 9*h + HASH_PTR(get_irn_mode(node));
return h;
} /* hash_SymConst */
/*
* Sets the default operation for an ir_ops.
*/
-ir_op_ops *firm_set_default_operations(ir_opcode code, ir_op_ops *ops) {
+ir_op_ops *firm_set_default_operations(ir_opcode code, ir_op_ops *ops)
+{
ops = firm_set_default_hash(code, ops);
ops = firm_set_default_computed_value(code, ops);
ops = firm_set_default_equivalent_node(code, ops);