# include "config.h"
#endif
-#ifdef HAVE_STRING_H
#include <string.h>
-#endif
#include "irnode_t.h"
#include "irgraph_t.h"
#include "irarch.h"
#include "hashptr.h"
#include "archop.h"
-#include "opt_polymorphy.h"
#include "opt_confirms.h"
+#include "opt_polymorphy.h"
#include "irtools.h"
#include "xmalloc.h"
tarval_add(ta, tb);
return tarval_carry() ? get_mode_one(m) : get_mode_null(m);
} else {
- if ( (classify_tarval(ta) == TV_CLASSIFY_NULL)
- || (classify_tarval(tb) == TV_CLASSIFY_NULL))
+ if (tarval_is_null(ta) || tarval_is_null(tb))
return get_mode_null(m);
}
return tarval_bad;
if ((ta != tarval_bad) && (tb != tarval_bad)) {
return tarval_cmp(ta, tb) == pn_Cmp_Lt ? get_mode_one(m) : get_mode_null(m);
- } else if (classify_tarval(ta) == TV_CLASSIFY_NULL) {
+ } else if (tarval_is_null(ta)) {
return get_mode_null(m);
}
return tarval_bad;
static tarval *computed_value_Mul(ir_node *n) {
ir_node *a = get_Mul_left(n);
ir_node *b = get_Mul_right(n);
+ ir_mode *mode;
tarval *ta = value_of(a);
tarval *tb = value_of(b);
- if ((ta != tarval_bad) && (tb != tarval_bad) && (get_irn_mode(a) == get_irn_mode(b))) {
+ mode = get_irn_mode(n);
+ if (mode != get_irn_mode(a)) {
+ /* n * n = 2n bit multiplication */
+ ta = tarval_convert_to(ta, mode);
+ tb = tarval_convert_to(tb, mode);
+ }
+
+ if (ta != tarval_bad && tb != tarval_bad) {
return tarval_mul(ta, tb);
} else {
- /* a*0 = 0 or 0*b = 0:
- calls computed_value recursive and returns the 0 with proper
- mode. */
- if ((ta != tarval_bad) && (ta == get_mode_null(get_tarval_mode(ta))))
+ /* a*0 = 0 or 0*b = 0 */
+ if (ta == get_mode_null(mode))
return ta;
- if ((tb != tarval_bad) && (tb == get_mode_null(get_tarval_mode(tb))))
+ if (tb == get_mode_null(mode))
return tb;
}
return tarval_bad;
if ((ta != tarval_bad) && (tb != tarval_bad)) {
return tarval_and (ta, tb);
} else {
- tarval *v;
-
- if ( (classify_tarval ((v = ta)) == TV_CLASSIFY_NULL)
- || (classify_tarval ((v = tb)) == TV_CLASSIFY_NULL)) {
- return v;
- }
+ if (tarval_is_null(ta)) return ta;
+ if (tarval_is_null(tb)) return tb;
}
return tarval_bad;
} /* computed_value_And */
if ((ta != tarval_bad) && (tb != tarval_bad)) {
return tarval_or (ta, tb);
} else {
- tarval *v;
- if ( (classify_tarval ((v = ta)) == TV_CLASSIFY_ALL_ONE)
- || (classify_tarval ((v = tb)) == TV_CLASSIFY_ALL_ONE)) {
- return v;
- }
+ if (tarval_is_all_one(ta)) return ta;
+ if (tarval_is_all_one(tb)) return tb;
}
return tarval_bad;
} /* computed_value_Or */
ir_node *aba = skip_Id(skip_Proj(ab));
if ( ( (/* aa is ProjP and aaa is Alloc */
- (get_irn_op(aa) == op_Proj)
- && (mode_is_reference(get_irn_mode(aa)))
- && (get_irn_op(aaa) == op_Alloc))
+ is_Proj(aa)
+ && mode_is_reference(get_irn_mode(aa))
+ && is_Alloc(aaa))
&& ( (/* ab is NULL */
- (get_irn_op(ab) == op_Const)
- && (mode_is_reference(get_irn_mode(ab)))
- && (get_Const_tarval(ab) == get_mode_null(get_irn_mode(ab))))
+ is_Const(ab)
+ && mode_is_reference(get_irn_mode(ab))
+ && is_Const_null(ab))
|| (/* ab is other Alloc */
- (get_irn_op(ab) == op_Proj)
- && (mode_is_reference(get_irn_mode(ab)))
- && (get_irn_op(aba) == op_Alloc)
+ is_Proj(ab)
+ && mode_is_reference(get_irn_mode(ab))
+ && is_Alloc(aba)
&& (aaa != aba))))
|| (/* aa is NULL and aba is Alloc */
- (get_irn_op(aa) == op_Const)
- && (mode_is_reference(get_irn_mode(aa)))
- && (get_Const_tarval(aa) == get_mode_null(get_irn_mode(aa)))
- && (get_irn_op(ab) == op_Proj)
- && (mode_is_reference(get_irn_mode(ab)))
- && (get_irn_op(aba) == op_Alloc)))
+ is_Const(aa)
+ && mode_is_reference(get_irn_mode(aa))
+ && is_Const_null(aa)
+ && is_Proj(ab)
+ && mode_is_reference(get_irn_mode(ab))
+ && is_Alloc(aba)))
/* 3.: */
return new_tarval_from_long(proj_nr & pn_Cmp_Ne, mode_b);
}
This should be true, as the block is matured before optimize is called.
But what about Phi-cycles with the Phi0/Id that could not be resolved?
Remaining Phi nodes are just Ids. */
- if ((n_preds == 1) && (get_irn_op(get_Block_cfgpred(n, 0)) == op_Jmp)) {
+ if (n_preds == 1 && is_Jmp(get_Block_cfgpred(n, 0))) {
ir_node *predblock = get_nodes_block(get_Block_cfgpred(n, 0));
if (predblock == oldn) {
/* Jmp jumps into the block it is in -- deal self cycle. */
n = predblock;
DBG_OPT_STG(oldn, n);
}
- } else if ((n_preds == 1) &&
- (get_irn_op(skip_Proj(get_Block_cfgpred(n, 0))) == op_Cond)) {
+ } else if (n_preds == 1 && is_Cond(skip_Proj(get_Block_cfgpred(n, 0)))) {
ir_node *predblock = get_Block_cfgpred_block(n, 0);
if (predblock == oldn) {
/* Jmp jumps into the block it is in -- deal self cycle. */
ir_node *a = get_Block_cfgpred(n, 0);
ir_node *b = get_Block_cfgpred(n, 1);
- if ((get_irn_op(a) == op_Proj) &&
- (get_irn_op(b) == op_Proj) &&
+ if (is_Proj(a) &&
+ is_Proj(b) &&
(get_Proj_pred(a) == get_Proj_pred(b)) &&
- (get_irn_op(get_Proj_pred(a)) == op_Cond) &&
+ is_Cond(get_Proj_pred(a)) &&
(get_irn_mode(get_Cond_selector(get_Proj_pred(a))) == mode_b)) {
/* Also a single entry Block following a single exit Block. Phis have
twice the same operand and will be optimized away. */
* which happens in this rare construction: NULL + 3.
* Then, a Conv would be needed which we cannot include here.
*/
- if (classify_tarval (tv) == TV_CLASSIFY_NULL) {
- if (get_irn_mode(on) == get_irn_mode(n)) {
- n = on;
+ if (tarval_is_null(tv) && get_irn_mode(on) == get_irn_mode(n)) {
+ n = on;
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_0);
- }
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_0);
}
return n;
left = get_Add_left(n);
right = get_Add_right(n);
- if (get_irn_op(left) == op_Sub) {
+ if (is_Sub(left)) {
if (get_Sub_right(left) == right) {
/* (a - x) + x */
}
}
}
- if (get_irn_op(right) == op_Sub) {
+ if (is_Sub(right)) {
if (get_Sub_right(right) == left) {
/* x + (a - x) */
ir_node *a = get_binop_left(n);
ir_node *b = get_binop_right(n);
- if (classify_tarval(value_of(b)) == TV_CLASSIFY_NULL) {
+ if (is_Const(b) && is_Const_null(b)) {
n = a;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_0);
*/
static ir_node *equivalent_node_Sub(ir_node *n) {
ir_node *oldn = n;
- ir_node *a, *b;
+ ir_node *b;
ir_mode *mode = get_irn_mode(n);
/* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
return n;
- a = get_Sub_left(n);
b = get_Sub_right(n);
/* Beware: modes might be different */
- if (classify_tarval(value_of(b)) == TV_CLASSIFY_NULL) {
+ if (is_Const(b) && is_Const_null(b)) {
+ ir_node *a = get_Sub_left(n);
if (mode == get_irn_mode(a)) {
n = a;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_0);
}
- } else if (get_irn_op(a) == op_Add) {
- if (mode_wrap_around(mode)) {
- ir_node *left = get_Add_left(a);
- ir_node *right = get_Add_right(a);
-
- if (left == b) {
- if (mode == get_irn_mode(right)) {
- n = right;
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
- }
- } else if (right == b) {
- if (mode == get_irn_mode(left)) {
- n = left;
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
- }
- }
- }
}
return n;
} /* equivalent_node_Sub */
/* optimize symmetric unop */
if (get_irn_op(pred) == get_irn_op(n)) {
n = get_unop_op(pred);
- DBG_OPT_ALGSIM2(oldn, pred, n);
+ DBG_OPT_ALGSIM2(oldn, pred, n, FS_OPT_IDEM_UNARY);
}
return n;
} /* equivalent_node_idempotent_unop */
/** Optimize Not(Not(x)) == x. */
#define equivalent_node_Not equivalent_node_idempotent_unop
-/** --x == x ??? Is this possible or can --x raise an
+/** -(-x) == x ??? Is this possible or can --x raise an
out of bounds exception if min =! max? */
#define equivalent_node_Minus equivalent_node_idempotent_unop
static ir_node *equivalent_node_Mul(ir_node *n) {
ir_node *oldn = n;
ir_node *a = get_Mul_left(n);
- ir_node *b = get_Mul_right(n);
- /* Mul is commutative and has again an other neutral element. */
- if (classify_tarval(value_of(a)) == TV_CLASSIFY_ONE) {
- n = b;
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
- } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) {
- n = a;
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
+ /* we can handle here only the n * n = n bit cases */
+ if (get_irn_mode(n) == get_irn_mode(a)) {
+ ir_node *b = get_Mul_right(n);
+
+ /* Mul is commutative and has again an other neutral element. */
+ if (is_Const(a) && is_Const_one(a)) {
+ n = b;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
+ } else if (is_Const(b) && is_Const_one(b)) {
+ n = a;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
+ }
}
return n;
} /* equivalent_node_Mul */
ir_node *b = get_Div_right(n);
/* Div is not commutative. */
- if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
+ if (is_Const(b) && is_Const_one(b)) { /* div(x, 1) == x */
/* Turn Div into a tuple (mem, bad, a) */
ir_node *mem = get_Div_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_Div_max);
set_Tuple_pred(n, pn_Div_M, mem);
set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk));
ir_node *b = get_Quot_right(n);
/* Div is not commutative. */
- if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* Quot(x, 1) == x */
- /* Turn Quot into a tuple (mem, bad, a) */
+ if (is_Const(b) && is_Const_one(b)) { /* Quot(x, 1) == x */
+ /* Turn Quot into a tuple (mem, jmp, bad, a) */
ir_node *mem = get_Quot_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_Quot_max);
set_Tuple_pred(n, pn_Quot_M, mem);
set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(current_ir_graph, blk));
ir_node *b = get_DivMod_right(n);
/* Div is not commutative. */
- if (classify_tarval(value_of(b)) == TV_CLASSIFY_ONE) { /* div(x, 1) == x */
- /* Turn DivMod into a tuple (mem, bad, a, 0) */
+ if (is_Const(b) && is_Const_one(b)) { /* div(x, 1) == x */
+ /* Turn DivMod into a tuple (mem, jmp, bad, a, 0) */
ir_node *a = get_DivMod_left(n);
ir_node *mem = get_Div_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *blk = get_irn_n(n, -1);
ir_mode *mode = get_DivMod_resmode(n);
turn_into_tuple(n, pn_DivMod_max);
if (a == b) {
n = a; /* Or has it's own neutral element */
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_OR);
- } else if (classify_tarval(value_of(a)) == TV_CLASSIFY_NULL) {
+ } else if (is_Const(a) && is_Const_null(a)) {
n = b;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_OR);
- } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_NULL) {
+ } else if (is_Const(b) && is_Const_null(b)) {
n = a;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_OR);
}
} /* equivalent_node_Or */
/**
- * Optimize a & 0b1...1 = 0b1...1 & a = a & a = a.
+ * Optimize a & 0b1...1 = 0b1...1 & a = a & a = (a|X) & a = a.
*/
static ir_node *equivalent_node_And(ir_node *n) {
ir_node *oldn = n;
if (a == b) {
n = a; /* And has it's own neutral element */
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_AND);
- } else if (classify_tarval(value_of(a)) == TV_CLASSIFY_ALL_ONE) {
+ return n;
+ }
+ if (is_Const(a) && is_Const_all_one(a)) {
n = b;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_AND);
- } else if (classify_tarval(value_of(b)) == TV_CLASSIFY_ALL_ONE) {
+ return n;
+ }
+ if (is_Const(b) && is_Const_all_one(b)) {
n = a;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_AND);
+ return n;
+ }
+ if (is_Or(a)) {
+ if (b == get_Or_left(a) || b == get_Or_right(a)) {
+ /* (a|X) & a */
+ n = b;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_AND);
+ return n;
+ }
+ }
+ if (is_Or(b)) {
+ if (a == get_Or_left(b) || a == get_Or_right(b)) {
+ /* a & (a|X) */
+ n = a;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_AND);
+ return n;
+ }
}
+
return n;
} /* equivalent_node_And */
ir_mode *a_mode = get_irn_mode(a);
if (n_mode == a_mode) { /* No Conv necessary */
- /* leave strict floating point Conv's */
- if (get_Conv_strict(n))
- return n;
+ if (get_Conv_strict(n)) {
+ /* special case: the predecessor might be a also a Conv */
+ if (is_Conv(a)) {
+ if (! get_Conv_strict(a)) {
+ /* first one is not strict, kick it */
+ set_Conv_op(n, get_Conv_op(a));
+ return n;
+ }
+ /* else both are strict conv, second is superflous */
+ } else {
+ /* leave strict floating point Conv's */
+ return n;
+ }
+ }
n = a;
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
- } else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
+ } else if (is_Conv(a)) { /* Conv(Conv(b)) */
ir_mode *b_mode;
b = get_Conv_op(a);
if (n_mode == mode_b) {
n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
- } else if (mode_is_int(n_mode) || mode_is_character(n_mode)) {
+ } else if (mode_is_int(n_mode)) {
if (smaller_mode(b_mode, a_mode)){
n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
* Optimize Proj(Tuple) and gigo() for ProjX in Bad block,
* ProjX(Load) and ProjX(Store).
*/
-static ir_node *equivalent_node_Proj(ir_node *n) {
- ir_node *oldn = n;
- ir_node *a = get_Proj_pred(n);
+static ir_node *equivalent_node_Proj(ir_node *proj) {
+ ir_node *oldn = proj;
+ ir_node *a = get_Proj_pred(proj);
- if ( get_irn_op(a) == op_Tuple) {
+ if (is_Tuple(a)) {
/* Remove the Tuple/Proj combination. */
- if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
- n = get_Tuple_pred(a, get_Proj_proj(n));
- DBG_OPT_TUPLE(oldn, a, n);
+ if ( get_Proj_proj(proj) <= get_Tuple_n_preds(a) ) {
+ proj = get_Tuple_pred(a, get_Proj_proj(proj));
+ DBG_OPT_TUPLE(oldn, a, proj);
} else {
- assert(0); /* This should not happen! */
- n = new_Bad();
+ /* This should not happen! */
+ assert(! "found a Proj with higher number than Tuple predecessors");
+ proj = new_Bad();
}
- } else if (get_irn_mode(n) == mode_X) {
- if (is_Block_dead(get_nodes_block(skip_Proj(n)))) {
+ } else if (get_irn_mode(proj) == mode_X) {
+ if (is_Block_dead(get_nodes_block(skip_Proj(proj)))) {
/* Remove dead control flow -- early gigo(). */
- n = new_Bad();
+ proj = new_Bad();
} else if (get_opt_ldst_only_null_ptr_exceptions()) {
- ir_op *op = get_irn_op(a);
+ if (is_Load(a)) {
+ /* get the Load address */
+ ir_node *addr = get_Load_ptr(a);
+ ir_node *blk = get_irn_n(a, -1);
+ ir_node *confirm;
- if (op == op_Load || op == op_Store) {
+ if (value_not_null(addr, &confirm)) {
+ if (confirm == NULL) {
+ /* this node may float if it did not depend on a Confirm */
+ set_irn_pinned(a, op_pin_state_floats);
+ }
+ if (get_Proj_proj(proj) == pn_Load_X_except) {
+ DBG_OPT_EXC_REM(proj);
+ return new_Bad();
+ } else
+ return new_r_Jmp(current_ir_graph, blk);
+ }
+ } else if (is_Store(a)) {
/* get the load/store address */
- ir_node *addr = get_irn_n(a, 1);
+ ir_node *addr = get_Store_ptr(a);
+ ir_node *blk = get_irn_n(a, -1);
ir_node *confirm;
if (value_not_null(addr, &confirm)) {
/* this node may float if it did not depend on a Confirm */
set_irn_pinned(a, op_pin_state_floats);
}
- DBG_OPT_EXC_REM(n);
- return new_Bad();
+ if (get_Proj_proj(proj) == pn_Store_X_except) {
+ DBG_OPT_EXC_REM(proj);
+ return new_Bad();
+ } else
+ return new_r_Jmp(current_ir_graph, blk);
}
}
}
}
- return n;
+ return proj;
} /* equivalent_node_Proj */
/**
n = get_Mux_true(n);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_EQ);
}
- else if (get_irn_op(sel) == op_Proj && !mode_honor_signed_zeros(get_irn_mode(n))) {
+ else if (is_Proj(sel) && !mode_honor_signed_zeros(get_irn_mode(n))) {
ir_node *cmp = get_Proj_pred(sel);
long proj_nr = get_Proj_proj(sel);
ir_node *b = get_Mux_false(n);
* with NaN's because -NaN == NaN.
* However, if +0 and -0 is handled differently, we cannot use the first one.
*/
- if (get_irn_op(cmp) == op_Cmp && get_Cmp_left(cmp) == a) {
- if (classify_Const(get_Cmp_right(cmp)) == CNST_NULL) {
+ if (is_Cmp(cmp) && get_Cmp_left(cmp) == a) {
+ ir_node *cmp_r = get_Cmp_right(cmp);
+ if (is_Const(cmp_r) && is_Const_null(cmp_r)) {
/* Mux(a CMP 0, X, a) */
- if (get_irn_op(b) == op_Minus && get_Minus_op(b) == a) {
+ if (is_Minus(b) && get_Minus_op(b) == a) {
/* Mux(a CMP 0, -a, a) */
if (proj_nr == pn_Cmp_Eq) {
/* Mux(a == 0, -a, a) ==> -a */
n = a;
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM);
}
- } else if (classify_Const(b) == CNST_NULL) {
+ } else if (is_Const(b) && is_Const_null(b)) {
/* Mux(a CMP 0, 0, a) */
if (proj_nr == pn_Cmp_Lg || proj_nr == pn_Cmp_Ne) {
/* Mux(a != 0, 0, a) ==> a */
ir_node *left = get_Cmp_left(n);
ir_node *right = get_Cmp_right(n);
- if (get_irn_op(left) == op_Minus && get_irn_op(right) == op_Minus &&
+ if (is_Minus(left) && is_Minus(right) &&
!mode_overflow_on_unary_Minus(get_irn_mode(left))) {
left = get_Minus_op(left);
right = get_Minus_op(right);
ir_node *pred = get_Confirm_value(n);
pn_Cmp pnc = get_Confirm_cmp(n);
- if (get_irn_op(pred) == op_Confirm && pnc == get_Confirm_cmp(pred)) {
+ if (is_Confirm(pred) && pnc == get_Confirm_cmp(pred)) {
/*
* rare case: two identical Confirms one after another,
* replace the second one with the first.
ir_node *b = get_CopyB_src(n);
if (a == b) {
- /* Turn CopyB into a tuple (mem, bad, bad) */
+ /* Turn CopyB into a tuple (mem, jmp, bad, bad) */
ir_node *mem = get_CopyB_mem(n);
ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_CopyB_max);
- set_Tuple_pred(n, pn_CopyB_M, mem);
+ set_Tuple_pred(n, pn_CopyB_M, mem);
set_Tuple_pred(n, pn_CopyB_X_regular, new_r_Jmp(current_ir_graph, blk));
- set_Tuple_pred(n, pn_CopyB_X_except, new_Bad()); /* no exception */
- set_Tuple_pred(n, pn_CopyB_M_except, new_Bad());
+ set_Tuple_pred(n, pn_CopyB_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_CopyB_M_except, new_Bad());
}
return n;
} /* equivalent_node_CopyB */
/* By definition lower < upper, so if idx == lower -->
lower <= idx && idx < upper */
if (idx == lower) {
- /* Turn Bound into a tuple (mem, bad, idx) */
+ /* Turn Bound into a tuple (mem, jmp, bad, idx) */
ret_tuple = 1;
} else {
ir_node *pred = skip_Proj(idx);
/*
* One could expect that we simply return the previous
* Bound here. However, this would be wrong, as we could
- * add an exception Proj to a new location than.
- * So, we must turn in into a tuple
+ * add an exception Proj to a new location then.
+ * So, we must turn in into a tuple.
*/
ret_tuple = 1;
}
}
}
if (ret_tuple) {
- /* Turn Bound into a tuple (mem, bad, idx) */
+ /* Turn Bound into a tuple (mem, jmp, bad, idx) */
ir_node *mem = get_Bound_mem(n);
ir_node *blk = get_nodes_block(n);
turn_into_tuple(n, pn_Bound_max);
#undef CASE
} /* firm_set_default_equivalent_node */
-/**
- * Do node specific optimizations of nodes predecessors.
- */
-static void optimize_preds(ir_node *n) {
- switch (get_irn_opcode(n)) {
-
- case iro_Cmp: { /* We don't want Cast as input to Cmp. */
- ir_node *a = get_Cmp_left(n), *b = get_Cmp_right(n);
-
- if (get_irn_op(a) == op_Cast) {
- a = get_Cast_op(a);
- set_Cmp_left(n, a);
- }
- if (get_irn_op(b) == op_Cast) {
- b = get_Cast_op(b);
- set_Cmp_right(n, b);
- }
- break;
- }
-
- default: break;
- } /* end switch */
-} /* optimize_preds */
-
/**
* Returns non-zero if a node is a Phi node
* with all predecessors constant.
ir_node *right = get_binop_right(n);
int ref_bits = get_mode_size_bits(mode);
- if (get_irn_op(left) == op_Conv) {
+ if (is_Conv(left)) {
ir_mode *mode = get_irn_mode(left);
int bits = get_mode_size_bits(mode);
}
}
- if (get_irn_op(right) == op_Conv) {
+ if (is_Conv(right)) {
ir_mode *mode = get_irn_mode(right);
int bits = get_mode_size_bits(mode);
return c; \
}
-#define HANDLE_UNOP_PHI(op,a,c) \
- c = NULL; \
- if (is_const_Phi(a)) { \
- /* check for Op(Phi) */ \
- c = apply_unop_on_phi(a, op); \
- } \
- if (c) { \
- DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI); \
- return c; \
+#define HANDLE_UNOP_PHI(op,a,c) \
+ c = NULL; \
+ if (is_const_Phi(a)) { \
+ /* check for Op(Phi) */ \
+ c = apply_unop_on_phi(a, op); \
+ if (c) { \
+ DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI); \
+ return c; \
+ } \
}
-
/**
* Do the AddSub optimization, then Transform
* Constant folding on Phi
return n;
if (mode_is_num(mode)) {
- if (a == b) {
+ /* the following code leads to endless recursion when Mul are replaced by a simple instruction chain */
+ if (!is_arch_dep_running() && a == b && mode_is_int(mode)) {
ir_node *block = get_irn_n(n, -1);
n = new_rd_Mul(
new_r_Const_long(current_ir_graph, block, mode, 2),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_A);
- } else if (get_irn_op(a) == op_Minus) {
+ return n;
+ }
+ if (is_Minus(a)) {
n = new_rd_Sub(
get_irn_dbg_info(n),
current_ir_graph,
get_Minus_op(a),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B);
- } else if (get_irn_op(b) == op_Minus) {
+ return n;
+ }
+ if (is_Minus(b)) {
n = new_rd_Sub(
get_irn_dbg_info(n),
current_ir_graph,
get_Minus_op(b),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B);
+ return n;
}
- /* do NOT execute this code if reassociation is enabled, it does the inverse! */
- else if (!get_opt_reassociation() && get_irn_op(a) == op_Mul) {
- ir_node *ma = get_Mul_left(a);
- ir_node *mb = get_Mul_right(a);
-
- if (b == ma) {
- ir_node *blk = get_irn_n(n, -1);
- n = new_rd_Mul(
- get_irn_dbg_info(n), current_ir_graph, blk,
- ma,
- new_rd_Add(
- get_irn_dbg_info(n), current_ir_graph, blk,
- mb,
- new_r_Const_long(current_ir_graph, blk, mode, 1),
- mode),
- mode);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
- } else if (b == mb) {
- ir_node *blk = get_irn_n(n, -1);
- n = new_rd_Mul(
- get_irn_dbg_info(n), current_ir_graph, blk,
- mb,
- new_rd_Add(
+ if (! is_reassoc_running()) {
+ /* do NOT execute this code if reassociation is enabled, it does the inverse! */
+ if (is_Mul(a)) {
+ ir_node *ma = get_Mul_left(a);
+ ir_node *mb = get_Mul_right(a);
+
+ if (b == ma) {
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_rd_Mul(
get_irn_dbg_info(n), current_ir_graph, blk,
ma,
- new_r_Const_long(current_ir_graph, blk, mode, 1),
- mode),
- mode);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
- }
- }
- /* do NOT execute this code if reassociation is enabled, it does the inverse! */
- else if (!get_opt_reassociation() && get_irn_op(b) == op_Mul) {
- ir_node *ma = get_Mul_left(b);
- ir_node *mb = get_Mul_right(b);
-
- if (a == ma) {
- ir_node *blk = get_irn_n(n, -1);
- n = new_rd_Mul(
- get_irn_dbg_info(n), current_ir_graph, blk,
- ma,
- new_rd_Add(
+ new_rd_Add(
+ get_irn_dbg_info(n), current_ir_graph, blk,
+ mb,
+ new_r_Const_long(current_ir_graph, blk, mode, 1),
+ mode),
+ mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
+ return n;
+ } else if (b == mb) {
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_rd_Mul(
get_irn_dbg_info(n), current_ir_graph, blk,
mb,
- new_r_Const_long(current_ir_graph, blk, mode, 1),
- mode),
- mode);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
- } else if (a == mb) {
- ir_node *blk = get_irn_n(n, -1);
- n = new_rd_Mul(
- get_irn_dbg_info(n), current_ir_graph, blk,
- mb,
- new_rd_Add(
+ new_rd_Add(
+ get_irn_dbg_info(n), current_ir_graph, blk,
+ ma,
+ new_r_Const_long(current_ir_graph, blk, mode, 1),
+ mode),
+ mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
+ return n;
+ }
+ }
+ if (is_Mul(b)) {
+ ir_node *ma = get_Mul_left(b);
+ ir_node *mb = get_Mul_right(b);
+
+ if (a == ma) {
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_rd_Mul(
get_irn_dbg_info(n), current_ir_graph, blk,
ma,
- new_r_Const_long(current_ir_graph, blk, mode, 1),
- mode),
- mode);
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
+ new_rd_Add(
+ get_irn_dbg_info(n), current_ir_graph, blk,
+ mb,
+ new_r_Const_long(current_ir_graph, blk, mode, 1),
+ mode),
+ mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
+ return n;
+ }
+ if (a == mb) {
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_rd_Mul(
+ get_irn_dbg_info(n), current_ir_graph, blk,
+ mb,
+ new_rd_Add(
+ get_irn_dbg_info(n), current_ir_graph, blk,
+ ma,
+ new_r_Const_long(current_ir_graph, blk, mode, 1),
+ mode),
+ mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
+ return n;
+ }
+ }
+ }
+ if (get_mode_arithmetic(mode) == irma_twos_complement) {
+ /* Here we rely on constants be on the RIGHT side */
+ if (is_Not(a)) {
+ ir_node *op = get_Not_op(a);
+
+ if (is_Const(b) && is_Const_one(b)) {
+ /* ~x + 1 = -x */
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, op, mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_PLUS_1);
+ return n;
+ }
+ if (op == b) {
+ /* ~x + x = -1 */
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_r_Const(current_ir_graph, blk, mode, get_mode_minus_one(mode));
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_X_NOT_X);
+ return n;
+ }
+ }
+ if (is_Not(b)) {
+ ir_node *op = get_Not_op(b);
+
+ if (op == a) {
+ /* x + ~x = -1 */
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_r_Const(current_ir_graph, blk, mode, get_mode_minus_one(mode));
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_X_NOT_X);
+ return n;
+ }
}
}
}
return n;
} /* transform_node_Add */
+/* returns -cnst */
+static ir_node *const_negate(ir_node *cnst) {
+ tarval *tv = tarval_neg(get_Const_tarval(cnst));
+ dbg_info *dbgi = get_irn_dbg_info(cnst);
+ ir_graph *irg = get_irn_irg(cnst);
+ ir_node *block = get_nodes_block(cnst);
+ ir_mode *mode = get_irn_mode(cnst);
+ if (tv == tarval_bad) return NULL;
+ return new_rd_Const(dbgi, irg, block, mode, tv);
+}
+
/**
* Do the AddSub optimization, then Transform
* Constant folding on Phi
* Sub(0,a) -> Minus(a)
* Sub(Mul(a, x), a) -> Mul(a, x-1)
* Sub(Sub(x, y), b) -> Sub(x, Add(y,b))
+ * Sub(Add(a, x), x) -> a
+ * Sub(x, Add(x, a)) -> -a
+ * Sub(x, Const) -> Add(x, -Const)
*/
static ir_node *transform_node_Sub(ir_node *n) {
ir_mode *mode;
a = get_Sub_left(n);
b = get_Sub_right(n);
- HANDLE_BINOP_PHI(tarval_sub, a,b,c);
-
mode = get_irn_mode(n);
+restart:
+ HANDLE_BINOP_PHI(tarval_sub, a,b,c);
+
/* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
return n;
+ if (is_Const(b) && get_irn_mode(b) != mode_P) {
+ /* a - C -> a + (-C) */
+ ir_node *cnst = const_negate(b);
+ if (cnst != NULL) {
+ ir_node *block = get_nodes_block(n);
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_graph *irg = get_irn_irg(n);
+
+ n = new_rd_Add(dbgi, irg, block, a, cnst, mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
+ return n;
+ }
+ }
+
+ if (is_Minus(a)) { /* (-a) - b -> -(a + b) */
+ ir_graph *irg = current_ir_graph;
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_node *left = get_Minus_op(a);
+ ir_mode *mode = get_irn_mode(n);
+ ir_node *add = new_rd_Add(dbg, irg, block, left, b, mode);
+
+ n = new_rd_Minus(dbg, irg, block, add, mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
+ return n;
+ } else if (is_Minus(b)) { /* a - (-b) -> a + b */
+ ir_graph *irg = current_ir_graph;
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_node *right = get_Minus_op(b);
+ ir_mode *mode = get_irn_mode(n);
+
+ n = new_rd_Add(dbg, irg, block, a, right, mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MINUS);
+ return n;
+ } else if (is_Sub(b)) { /* a - (b - c) -> a + (c - b) */
+ ir_graph *irg = current_ir_graph;
+ dbg_info *s_dbg = get_irn_dbg_info(b);
+ ir_node *s_block = get_nodes_block(b);
+ ir_node *s_left = get_Sub_right(b);
+ ir_node *s_right = get_Sub_left(b);
+ ir_mode *s_mode = get_irn_mode(b);
+ ir_node *sub = new_rd_Sub(s_dbg, irg, s_block, s_left, s_right, s_mode);
+ dbg_info *a_dbg = get_irn_dbg_info(n);
+ ir_node *a_block = get_nodes_block(n);
+ ir_mode *a_mode = get_irn_mode(n);
+
+ n = new_rd_Add(a_dbg, irg, a_block, a, sub, a_mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
+ return n;
+ } else if (is_Mul(b)) { /* a - (b * C) -> a + (b * -C) */
+ ir_node *m_right = get_Mul_right(b);
+ if (is_Const(m_right)) {
+ ir_node *cnst2 = const_negate(m_right);
+ if (cnst2 != NULL) {
+ ir_graph *irg = current_ir_graph;
+ dbg_info *m_dbg = get_irn_dbg_info(b);
+ ir_node *m_block = get_nodes_block(b);
+ ir_node *m_left = get_Mul_left(b);
+ ir_mode *m_mode = get_irn_mode(b);
+ ir_node *mul = new_rd_Mul(m_dbg, irg, m_block, m_left, cnst2, m_mode);
+ dbg_info *a_dbg = get_irn_dbg_info(n);
+ ir_node *a_block = get_nodes_block(n);
+ ir_mode *a_mode = get_irn_mode(n);
+
+ n = new_rd_Add(a_dbg, irg, a_block, a, mul, a_mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
+ return n;
+ }
+ }
+ }
+
/* Beware of Sub(P, P) which cannot be optimized into a simple Minus ... */
- if (mode_is_num(mode) && mode == get_irn_mode(a) && (classify_Const(a) == CNST_NULL)) {
+ if (mode_is_num(mode) && mode == get_irn_mode(a) && is_Const(a) && is_Const_null(a)) {
n = new_rd_Minus(
get_irn_dbg_info(n),
current_ir_graph,
b,
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_0_A);
+ return n;
+ }
+ if (is_Add(a)) {
+ if (mode_wrap_around(mode)) {
+ ir_node *left = get_Add_left(a);
+ ir_node *right = get_Add_right(a);
+
+ /* FIXME: Does the Conv's work only for two complement or generally? */
+ if (left == b) {
+ if (mode != get_irn_mode(right)) {
+ /* This Sub is an effective Cast */
+ right = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), right, mode);
+ }
+ n = right;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
+ return n;
+ } else if (right == b) {
+ if (mode != get_irn_mode(left)) {
+ /* This Sub is an effective Cast */
+ left = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), left, mode);
+ }
+ n = left;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
+ return n;
+ }
+ }
+ }
+ if (is_Add(b)) {
+ if (mode_wrap_around(mode)) {
+ ir_node *left = get_Add_left(b);
+ ir_node *right = get_Add_right(b);
+
+ /* FIXME: Does the Conv's work only for two complement or generally? */
+ if (left == a) {
+ ir_mode *r_mode = get_irn_mode(right);
+
+ n = new_r_Minus(get_irn_irg(n), get_irn_n(n, -1), right, r_mode);
+ if (mode != r_mode) {
+ /* This Sub is an effective Cast */
+ n = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), n, mode);
+ }
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
+ return n;
+ } else if (right == a) {
+ ir_mode *l_mode = get_irn_mode(left);
+
+ n = new_r_Minus(get_irn_irg(n), get_irn_n(n, -1), left, l_mode);
+ if (mode != l_mode) {
+ /* This Sub is an effective Cast */
+ n = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), n, mode);
+ }
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
+ return n;
+ }
+ }
+ }
+ if (mode_is_int(mode) && is_Conv(a) && is_Conv(b)) {
+ ir_mode *mode = get_irn_mode(a);
+
+ if (mode == get_irn_mode(b)) {
+ ir_mode *ma, *mb;
+
+ a = get_Conv_op(a);
+ b = get_Conv_op(b);
+
+ /* check if it's allowed to skip the conv */
+ ma = get_irn_mode(a);
+ mb = get_irn_mode(b);
+
+ if (mode_is_reference(ma) && mode_is_reference(mb)) {
+ /* SubInt(ConvInt(aP), ConvInt(bP)) -> SubInt(aP,bP) */
+ set_Sub_left(n, a);
+ set_Sub_right(n, b);
+
+ goto restart;
+ }
+ }
}
/* do NOT execute this code if reassociation is enabled, it does the inverse! */
- else if (get_opt_reassociation() && get_irn_op(a) == op_Mul) {
+ if (!is_reassoc_running() && is_Mul(a)) {
ir_node *ma = get_Mul_left(a);
ir_node *mb = get_Mul_right(a);
mode),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A);
+ return n;
} else if (mb == b) {
ir_node *blk = get_irn_n(n, -1);
n = new_rd_Mul(
mode),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A);
+ return n;
}
- } else if (get_irn_op(a) == op_Sub) {
- ir_node *x = get_Sub_left(a);
- ir_node *y = get_Sub_right(a);
- ir_node *blk = get_irn_n(n, -1);
- ir_mode *m_b = get_irn_mode(b);
- ir_mode *m_y = get_irn_mode(y);
+ }
+ if (is_Sub(a)) { /* (x - y) - b -> x - (y + b) */
+ ir_node *x = get_Sub_left(a);
+ ir_node *y = get_Sub_right(a);
+ ir_node *blk = get_irn_n(n, -1);
+ ir_mode *m_b = get_irn_mode(b);
+ ir_mode *m_y = get_irn_mode(y);
+ ir_mode *add_mode;
ir_node *add;
/* Determine the right mode for the Add. */
if (m_b == m_y)
- mode = m_b;
+ add_mode = m_b;
else if (mode_is_reference(m_b))
- mode = m_b;
+ add_mode = m_b;
else if (mode_is_reference(m_y))
- mode = m_y;
+ add_mode = m_y;
else {
/*
* Both modes are different but none is reference,
return n;
}
- add = new_r_Add(current_ir_graph, blk, y, b, mode);
+ add = new_r_Add(current_ir_graph, blk, y, b, add_mode);
- set_Sub_left(n, x);
- set_Sub_right(n, add);
- DBG_OPT_ALGSIM0(n, n, FS_OPT_SUB_SUB_X_Y_Z);
+ n = new_rd_Sub(get_irn_dbg_info(n), current_ir_graph, blk, x, add, mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_SUB_X_Y_Z);
+ return n;
}
+ if (get_mode_arithmetic(mode) == irma_twos_complement) {
+ if (is_Const(a) && is_Not(b)) {
+ /* c - ~X = X + (c+1) */
+ tarval *tv = get_Const_tarval(a);
+
+ tv = tarval_add(tv, get_mode_one(mode));
+ if (tv != tarval_bad) {
+ ir_node *blk = get_irn_n(n, -1);
+ ir_node *c = new_r_Const(current_ir_graph, blk, mode, tv);
+ n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, blk, get_Not_op(b), c, mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_C_NOT_X);
+ return n;
+ }
+ }
+ }
return n;
} /* transform_node_Sub */
+/**
+ * Several transformation done on n*n=2n bits mul.
+ * These transformations must be done here because new nodes may be produced.
+ */
+static ir_node *transform_node_Mul2n(ir_node *n, ir_mode *mode) {
+ ir_node *oldn = n;
+ ir_node *a = get_Mul_left(n);
+ ir_node *b = get_Mul_right(n);
+ tarval *ta = value_of(a);
+ tarval *tb = value_of(b);
+ ir_mode *smode = get_irn_mode(a);
+
+ if (ta == get_mode_one(smode)) {
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, b, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
+ return n;
+ }
+ else if (ta == get_mode_minus_one(smode)) {
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, b, smode);
+ n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, n, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
+ return n;
+ }
+ if (tb == get_mode_one(smode)) {
+ ir_node *blk = get_irn_n(a, -1);
+ n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, a, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
+ return n;
+ }
+ else if (tb == get_mode_minus_one(smode)) {
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, a, smode);
+ n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, n, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
+ return n;
+ }
+ return n;
+}
+
/**
* Transform Mul(a,-1) into -a.
* Do constant evaluation of Phi nodes.
*/
static ir_node *transform_node_Mul(ir_node *n) {
ir_node *c, *oldn = n;
+ ir_mode *mode = get_irn_mode(n);
ir_node *a = get_Mul_left(n);
ir_node *b = get_Mul_right(n);
- ir_mode *mode;
+
+ if (is_Bad(a) || is_Bad(b))
+ return n;
+
+ if (mode != get_irn_mode(a))
+ return transform_node_Mul2n(n, mode);
HANDLE_BINOP_PHI(tarval_mul, a,b,c);
- mode = get_irn_mode(n);
if (mode_is_signed(mode)) {
ir_node *r = NULL;
return n;
}
}
+ if (is_Minus(a)) {
+ if (is_Const(b)) { /* (-a) * const -> a * -const */
+ ir_node *cnst = const_negate(b);
+ if (cnst != NULL) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ n = new_rd_Mul(dbgi, current_ir_graph, block, get_Minus_op(a), cnst, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
+ return n;
+ }
+ } else if (is_Minus(b)) { /* (-a) * (-b) -> a * b */
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ n = new_rd_Mul(dbgi, current_ir_graph, block, get_Minus_op(a), get_Minus_op(b), mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_MINUS);
+ return n;
+ } else if (is_Sub(b)) { /* (-a) * (b - c) -> a * (c - b) */
+ ir_node *sub_l = get_Sub_left(b);
+ ir_node *sub_r = get_Sub_right(b);
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(n);
+ ir_node *new_b = new_rd_Sub(dbgi, irg, block, sub_r, sub_l, mode);
+ n = new_rd_Mul(dbgi, irg, block, get_Minus_op(a), new_b, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS);
+ return n;
+ }
+ } else if (is_Minus(b)) {
+ if (is_Sub(a)) { /* (a - b) * (-c) -> (b - a) * c */
+ ir_node *sub_l = get_Sub_left(a);
+ ir_node *sub_r = get_Sub_right(a);
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(n);
+ ir_node *new_a = new_rd_Sub(dbgi, irg, block, sub_r, sub_l, mode);
+ n = new_rd_Mul(dbgi, irg, block, new_a, get_Minus_op(b), mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS);
+ return n;
+ }
+ }
+ if (get_mode_arithmetic(mode) == irma_ieee754) {
+ if (is_Const(a)) {
+ tarval *tv = get_Const_tarval(a);
+ if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)) {
+ n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), b, b, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
+ return n;
+ }
+ }
+ else if (is_Const(b)) {
+ tarval *tv = get_Const_tarval(b);
+ if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)) {
+ n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, a, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
+ return n;
+ }
+ }
+ }
return arch_dep_replace_mul_with_shifts(n);
} /* transform_node_Mul */
*/
static ir_node *transform_node_Div(ir_node *n) {
tarval *tv = value_of(n);
+ ir_mode *mode = get_Div_resmode(n);
ir_node *value = n;
- /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
-
if (tv != tarval_bad) {
value = new_Const(get_tarval_mode(tv), tv);
DBG_OPT_CSTEVAL(n, value);
- } else /* Try architecture dependent optimization */
- value = arch_dep_replace_div_by_const(n);
+ goto make_tuple;
+ } else {
+ ir_node *a = get_Div_left(n);
+ ir_node *b = get_Div_right(n);
+ ir_node *dummy;
+
+ if (a == b && value_not_zero(a, &dummy)) {
+ /* BEWARE: we can optimize a/a to 1 only if this cannot cause a exception */
+ value = new_Const(mode, get_mode_one(mode));
+ DBG_OPT_CSTEVAL(n, value);
+ goto make_tuple;
+ } else {
+ if (mode_is_signed(mode) && is_Const(b)) {
+ tarval *tv = get_Const_tarval(b);
+
+ if (tv == get_mode_minus_one(mode)) {
+ /* a / -1 */
+ value = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, mode);
+ DBG_OPT_CSTEVAL(n, value);
+ goto make_tuple;
+ }
+ }
+ /* Try architecture dependent optimization */
+ value = arch_dep_replace_div_by_const(n);
+ }
+ }
if (value != n) {
- /* Turn Div into a tuple (mem, bad, value) */
- ir_node *mem = get_Div_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *mem, *blk;
+make_tuple:
+ /* Turn Div into a tuple (mem, jmp, bad, value) */
+ mem = get_Div_mem(n);
+ blk = get_irn_n(n, -1);
+
+ /* skip a potential Pin */
+ if (is_Pin(mem))
+ mem = get_Pin_op(mem);
turn_into_tuple(n, pn_Div_max);
set_Tuple_pred(n, pn_Div_M, mem);
set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk));
*/
static ir_node *transform_node_Mod(ir_node *n) {
tarval *tv = value_of(n);
+ ir_mode *mode = get_Mod_resmode(n);
ir_node *value = n;
- /* BEWARE: it is NOT possible to optimize a%a to 0, as this may cause a exception */
-
if (tv != tarval_bad) {
value = new_Const(get_tarval_mode(tv), tv);
DBG_OPT_CSTEVAL(n, value);
- } else /* Try architecture dependent optimization */
- value = arch_dep_replace_mod_by_const(n);
+ goto make_tuple;
+ } else {
+ ir_node *a = get_Mod_left(n);
+ ir_node *b = get_Mod_right(n);
+ ir_node *dummy;
+
+ if (a == b && value_not_zero(a, &dummy)) {
+ /* BEWARE: we can optimize a%a to 0 only if this cannot cause a exception */
+ value = new_Const(mode, get_mode_null(mode));
+ DBG_OPT_CSTEVAL(n, value);
+ goto make_tuple;
+ } else {
+ if (mode_is_signed(mode) && is_Const(b)) {
+ tarval *tv = get_Const_tarval(b);
+
+ if (tv == get_mode_minus_one(mode)) {
+ /* a % -1 = 0 */
+ value = new_Const(mode, get_mode_null(mode));
+ DBG_OPT_CSTEVAL(n, value);
+ goto make_tuple;
+ }
+ }
+ /* Try architecture dependent optimization */
+ value = arch_dep_replace_mod_by_const(n);
+ }
+ }
if (value != n) {
- /* Turn Mod into a tuple (mem, bad, value) */
- ir_node *mem = get_Mod_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *mem, *blk;
+
+make_tuple:
+ /* Turn Mod into a tuple (mem, jmp, bad, value) */
+ mem = get_Mod_mem(n);
+ blk = get_irn_n(n, -1);
+ /* skip a potential Pin */
+ if (is_Pin(mem))
+ mem = get_Pin_op(mem);
turn_into_tuple(n, pn_Mod_max);
set_Tuple_pred(n, pn_Mod_M, mem);
set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(current_ir_graph, blk));
* Transform a DivMod node.
*/
static ir_node *transform_node_DivMod(ir_node *n) {
- int evaluated = 0;
-
+ ir_node *dummy;
ir_node *a = get_DivMod_left(n);
ir_node *b = get_DivMod_right(n);
- ir_mode *mode = get_irn_mode(a);
+ ir_mode *mode = get_DivMod_resmode(n);
tarval *ta = value_of(a);
tarval *tb = value_of(b);
-
- if (!(mode_is_int(mode) && mode_is_int(get_irn_mode(b))))
- return n;
-
- /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
+ int evaluated = 0;
if (tb != tarval_bad) {
if (tb == get_mode_one(get_tarval_mode(tb))) {
- b = new_Const (mode, get_mode_null(mode));
- evaluated = 1;
-
+ b = new_Const(mode, get_mode_null(mode));
DBG_OPT_CSTEVAL(n, b);
+ goto make_tuple;
} else if (ta != tarval_bad) {
tarval *resa, *resb;
- resa = tarval_div (ta, tb);
+ resa = tarval_div(ta, tb);
if (resa == tarval_bad) return n; /* Causes exception!!! Model by replacing through
Jmp for X result!? */
- resb = tarval_mod (ta, tb);
+ resb = tarval_mod(ta, tb);
if (resb == tarval_bad) return n; /* Causes exception! */
- a = new_Const (mode, resa);
- b = new_Const (mode, resb);
- evaluated = 1;
-
+ a = new_Const(mode, resa);
+ b = new_Const(mode, resb);
DBG_OPT_CSTEVAL(n, a);
DBG_OPT_CSTEVAL(n, b);
+ goto make_tuple;
+ } else if (mode_is_signed(mode) && tb == get_mode_minus_one(mode)) {
+ a = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, mode);
+ b = new_Const(mode, get_mode_null(mode));
+ DBG_OPT_CSTEVAL(n, a);
+ DBG_OPT_CSTEVAL(n, b);
+ goto make_tuple;
} else { /* Try architecture dependent optimization */
arch_dep_replace_divmod_by_const(&a, &b, n);
evaluated = a != NULL;
}
- } else if (ta == get_mode_null(mode)) {
+ } else if (a == b) {
+ if (value_not_zero(a, &dummy)) {
+ /* a/a && a != 0 */
+ a = new_Const(mode, get_mode_one(mode));
+ b = new_Const(mode, get_mode_null(mode));
+ DBG_OPT_CSTEVAL(n, a);
+ DBG_OPT_CSTEVAL(n, b);
+ goto make_tuple;
+ } else {
+ /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
+ return n;
+ }
+ } else if (ta == get_mode_null(mode) && value_not_zero(b, &dummy)) {
/* 0 / non-Const = 0 */
b = a;
- evaluated = 1;
+ goto make_tuple;
}
if (evaluated) { /* replace by tuple */
- ir_node *mem = get_DivMod_mem(n);
- ir_node *blk = get_nodes_block(n);
+ ir_node *mem, *blk;
+
+make_tuple:
+ mem = get_DivMod_mem(n);
+ /* skip a potential Pin */
+ if (is_Pin(mem))
+ mem = get_Pin_op(mem);
+
+ blk = get_irn_n(n, -1);
turn_into_tuple(n, pn_DivMod_max);
set_Tuple_pred(n, pn_DivMod_M, mem);
set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk));
return n;
} /* transform_node_DivMod */
+/**
+ * Optimize x / c to x * (1/c)
+ */
+static ir_node *transform_node_Quot(ir_node *n) {
+ ir_mode *mode = get_Quot_resmode(n);
+ ir_node *oldn = n;
+
+ if (get_mode_arithmetic(mode) == irma_ieee754) {
+ ir_node *b = get_Quot_right(n);
+
+ if (is_Const(b)) {
+ tarval *tv = get_Const_tarval(b);
+
+ tv = tarval_quo(get_mode_one(mode), tv);
+
+ /* Do the transformation if the result is either exact or we are not
+ using strict rules. */
+ if (tv != tarval_bad &&
+ (tarval_ieee754_get_exact() || (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic) == 0)) {
+ ir_node *blk = get_irn_n(n, -1);
+ ir_node *c = new_r_Const(current_ir_graph, blk, mode, tv);
+ ir_node *a = get_Quot_left(n);
+ ir_node *m = new_rd_Mul(get_irn_dbg_info(n), current_ir_graph, blk, a, c, mode);
+ ir_node *mem = get_Quot_mem(n);
+
+ /* skip a potential Pin */
+ if (is_Pin(mem))
+ mem = get_Pin_op(mem);
+ turn_into_tuple(n, pn_Quot_max);
+ set_Tuple_pred(n, pn_Quot_M, mem);
+ set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_Quot_X_except, new_r_Bad(current_ir_graph));
+ set_Tuple_pred(n, pn_Quot_res, m);
+ DBG_OPT_ALGSIM1(oldn, a, b, m, FS_OPT_FP_INV_MUL);
+ }
+ }
+ }
+ return n;
+} /* transform_node_Quot */
+
/**
* Optimize Abs(x) into x if x is Confirmed >= 0
* Optimize Abs(x) into -x if x is Confirmed <= 0
return n;
} /* transform_node_Cond */
+typedef ir_node* (*recursive_transform) (ir_node *n);
+
+/**
+ * makes use of distributive laws for and, or, eor
+ * and(a OP c, b OP c) -> and(a, b) OP c
+ * note, might return a different op than n
+ */
+static ir_node *transform_bitwise_distributive(ir_node *n,
+ recursive_transform trans_func)
+{
+ ir_node *oldn = n;
+ ir_node *a = get_binop_left(n);
+ ir_node *b = get_binop_right(n);
+ ir_op *op = get_irn_op(a);
+ ir_op *op_root = get_irn_op(n);
+
+ if(op != get_irn_op(b))
+ return n;
+
+ if (op == op_Conv) {
+ ir_node *a_op = get_Conv_op(a);
+ ir_node *b_op = get_Conv_op(b);
+ ir_mode *a_mode = get_irn_mode(a_op);
+ ir_mode *b_mode = get_irn_mode(b_op);
+ if(a_mode == b_mode && (mode_is_int(a_mode) || a_mode == mode_b)) {
+ ir_node *blk = get_irn_n(n, -1);
+
+ n = exact_copy(n);
+ set_binop_left(n, a_op);
+ set_binop_right(n, b_op);
+ set_irn_mode(n, a_mode);
+ n = trans_func(n);
+ n = new_r_Conv(current_ir_graph, blk, n, get_irn_mode(oldn));
+
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_SHIFT_AND);
+ return n;
+ }
+ }
+
+ if (op == op_Eor) {
+ /* nothing to gain here */
+ return n;
+ }
+
+ if (op == op_Shrs || op == op_Shr || op == op_Shl
+ || op == op_And || op == op_Or || op == op_Eor) {
+ ir_node *a_left = get_binop_left(a);
+ ir_node *a_right = get_binop_right(a);
+ ir_node *b_left = get_binop_left(b);
+ ir_node *b_right = get_binop_right(b);
+ ir_node *c = NULL;
+ ir_node *op1, *op2;
+
+ if (is_op_commutative(op)) {
+ if (a_left == b_left) {
+ c = a_left;
+ op1 = a_right;
+ op2 = b_right;
+ } else if(a_left == b_right) {
+ c = a_left;
+ op1 = a_right;
+ op2 = b_left;
+ } else if(a_right == b_left) {
+ c = a_right;
+ op1 = a_left;
+ op2 = b_right;
+ }
+ }
+ if(a_right == b_right) {
+ c = a_right;
+ op1 = a_left;
+ op2 = b_left;
+ }
+
+ if (c != NULL) {
+ /* (a sop c) & (b sop c) => (a & b) sop c */
+ ir_node *blk = get_irn_n(n, -1);
+
+ ir_node *new_n = exact_copy(n);
+ set_binop_left(new_n, op1);
+ set_binop_right(new_n, op2);
+ new_n = trans_func(new_n);
+
+ if(op_root == op_Eor && op == op_Or) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_graph *irg = current_ir_graph;
+ ir_mode *mode = get_irn_mode(c);
+
+ c = new_rd_Not(dbgi, irg, blk, c, mode);
+ n = new_rd_And(dbgi, irg, blk, new_n, c, mode);
+ } else {
+ n = exact_copy(a);
+ set_irn_n(n, -1, blk);
+ set_binop_left(n, new_n);
+ set_binop_right(n, c);
+ }
+
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_SHIFT_AND);
+ return n;
+ }
+ }
+
+ return n;
+}
+
/**
* Transform an And.
*/
ir_node *c, *oldn = n;
ir_node *a = get_And_left(n);
ir_node *b = get_And_right(n);
+ ir_mode *mode;
HANDLE_BINOP_PHI(tarval_and, a,b,c);
+
+ mode = get_irn_mode(n);
+
+ /* we can evaluate 2 Projs of the same Cmp */
+ if (mode == mode_b && is_Proj(a) && is_Proj(b)) {
+ ir_node *pred_a = get_Proj_pred(a);
+ ir_node *pred_b = get_Proj_pred(b);
+ if (pred_a == pred_b) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(pred_a);
+ pn_Cmp pn_a = get_Proj_proj(a);
+ pn_Cmp pn_b = get_Proj_proj(b);
+ /* yes, we can simply calculate with pncs */
+ pn_Cmp new_pnc = pn_a & pn_b;
+
+ return new_rd_Proj(dbgi, current_ir_graph, block, pred_a, mode_b, new_pnc);
+ }
+ }
+ if (is_Or(a)) {
+ if (is_Not(b)) {
+ ir_node *op = get_Not_op(b);
+ if (is_And(op)) {
+ ir_node *ba = get_And_left(op);
+ ir_node *bb = get_And_right(op);
+
+ /* it's enough to test the following cases due to normalization! */
+ if (get_Or_left(a) == ba && get_Or_right(a) == bb) {
+ /* (a|b) & ~(a&b) = a^b */
+ ir_node *block = get_nodes_block(n);
+
+ n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph, block, ba, bb, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_TO_EOR);
+ return n;
+ }
+ }
+ }
+ }
+ if (is_Or(b)) {
+ if (is_Not(a)) {
+ ir_node *op = get_Not_op(a);
+ if (is_And(op)) {
+ ir_node *aa = get_And_left(op);
+ ir_node *ab = get_And_right(op);
+
+ /* it's enough to test the following cases due to normalization! */
+ if (get_Or_left(b) == aa && get_Or_right(b) == ab) {
+ /* (a|b) & ~(a&b) = a^b */
+ ir_node *block = get_nodes_block(n);
+
+ n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph, block, aa, ab, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_TO_EOR);
+ return n;
+ }
+ }
+ }
+ }
+ if (is_Eor(a)) {
+ ir_node *al = get_Eor_left(a);
+ ir_node *ar = get_Eor_right(a);
+
+ if (al == b) {
+ /* (b ^ a) & b -> ~a & b */
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+
+ ar = new_rd_Not(dbg, current_ir_graph, block, ar, mode);
+ n = new_rd_And(dbg, current_ir_graph, block, ar, b, mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
+ return n;
+ }
+ if (ar == b) {
+ /* (a ^ b) & b -> ~a & b */
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+
+ al = new_rd_Not(dbg, current_ir_graph, block, al, mode);
+ n = new_rd_And(dbg, current_ir_graph, block, al, b, mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
+ return n;
+ }
+ }
+ if (is_Eor(b)) {
+ ir_node *bl = get_Eor_left(b);
+ ir_node *br = get_Eor_right(b);
+
+ if (bl == a) {
+ /* a & (a ^ b) -> a & ~b */
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+
+ br = new_rd_Not(dbg, current_ir_graph, block, br, mode);
+ n = new_rd_And(dbg, current_ir_graph, block, br, a, mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
+ return n;
+ }
+ if (br == a) {
+ /* a & (b ^ a) -> a & ~b */
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+
+ bl = new_rd_Not(dbg, current_ir_graph, block, bl, mode);
+ n = new_rd_And(dbg, current_ir_graph, block, bl, a, mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
+ return n;
+ }
+ }
+ if (is_Not(a) && is_Not(b)) {
+ /* ~a & ~b = ~(a|b) */
+ ir_node *block = get_nodes_block(n);
+ ir_mode *mode = get_irn_mode(n);
+
+ a = get_Not_op(a);
+ b = get_Not_op(b);
+ n = new_rd_Or(get_irn_dbg_info(n), current_ir_graph, block, a, b, mode);
+ n = new_rd_Not(get_irn_dbg_info(n), current_ir_graph, block, n, mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_DEMORGAN);
+ return n;
+ }
+
+ n = transform_bitwise_distributive(n, transform_node_And);
+
return n;
} /* transform_node_And */
HANDLE_BINOP_PHI(tarval_eor, a,b,c);
+ /* we can evaluate 2 Projs of the same Cmp */
+ if (mode == mode_b && is_Proj(a) && is_Proj(b)) {
+ ir_node *pred_a = get_Proj_pred(a);
+ ir_node *pred_b = get_Proj_pred(b);
+ if(pred_a == pred_b) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(pred_a);
+ pn_Cmp pn_a = get_Proj_proj(a);
+ pn_Cmp pn_b = get_Proj_proj(b);
+ /* yes, we can simply calculate with pncs */
+ pn_Cmp new_pnc = pn_a ^ pn_b;
+
+ return new_rd_Proj(dbgi, current_ir_graph, block, pred_a, mode_b,
+ new_pnc);
+ }
+ }
+
if (a == b) {
/* a ^ a = 0 */
n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1),
mode, get_mode_null(mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_A_A);
- } else if ((mode == mode_b)
- && (get_irn_op(a) == op_Proj)
- && (get_irn_mode(a) == mode_b)
- && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)
- && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) {
+ } else if (mode == mode_b &&
+ is_Proj(a) &&
+ is_Const(b) && is_Const_one(b) &&
+ is_Cmp(get_Proj_pred(a))) {
/* The Eor negates a Cmp. The Cmp has the negated result anyways! */
n = new_r_Proj(current_ir_graph, get_irn_n(n, -1), get_Proj_pred(a),
mode_b, get_negated_pnc(get_Proj_proj(a), mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT_BOOL);
- } else if ((mode == mode_b)
- && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)) {
- /* The Eor is a Not. Replace it by a Not. */
- /* ????!!!Extend to bitfield 1111111. */
- n = new_r_Not(current_ir_graph, get_irn_n(n, -1), a, mode_b);
-
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
+ } else if (is_Const(b)) {
+ if (is_Not(a)) { /* ~x ^ const -> x ^ ~const */
+ ir_node *cnst = new_Const(mode, tarval_not(get_Const_tarval(b)));
+ ir_node *not_op = get_Not_op(a);
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(n);
+ ir_mode *mode = get_irn_mode(n);
+ n = new_rd_Eor(dbg, irg, block, not_op, cnst, mode);
+ return n;
+ } else if (is_Const_all_one(b)) { /* x ^ 1...1 -> ~1 */
+ n = new_r_Not(current_ir_graph, get_nodes_block(n), a, mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
+ }
+ } else {
+ n = transform_bitwise_distributive(n, transform_node_Eor);
}
return n;
*/
static ir_node *transform_node_Not(ir_node *n) {
ir_node *c, *oldn = n;
- ir_node *a = get_Not_op(n);
+ ir_node *a = get_Not_op(n);
+ ir_mode *mode = get_irn_mode(n);
HANDLE_UNOP_PHI(tarval_not,a,c);
/* check for a boolean Not */
- if ( (get_irn_mode(n) == mode_b)
- && (get_irn_op(a) == op_Proj)
- && (get_irn_mode(a) == mode_b)
- && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) {
+ if (mode == mode_b &&
+ is_Proj(a) &&
+ is_Cmp(get_Proj_pred(a))) {
/* We negate a Cmp. The Cmp has the negated result anyways! */
n = new_r_Proj(current_ir_graph, get_irn_n(n, -1), get_Proj_pred(a),
mode_b, get_negated_pnc(get_Proj_proj(a), mode_b));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_CMP);
+ return n;
+ }
+ if (is_Eor(a)) {
+ ir_node *eor_b = get_Eor_right(a);
+ if (is_Const(eor_b)) { /* ~(x ^ const) -> x ^ ~const */
+ ir_node *cnst = new_Const(mode, tarval_not(get_Const_tarval(eor_b)));
+ ir_node *eor_a = get_Eor_left(a);
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(n);
+ ir_mode *mode = get_irn_mode(n);
+ n = new_rd_Eor(dbg, irg, block, eor_a, cnst, mode);
+ return n;
+ }
+ }
+ if (get_mode_arithmetic(mode) == irma_twos_complement) {
+ if (is_Minus(a)) { /* ~-x -> x + -1 */
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(n);
+ ir_node *add_l = get_Minus_op(a);
+ ir_node *add_r = new_rd_Const(dbg, irg, block, mode, get_mode_minus_one(mode));
+ n = new_rd_Add(dbg, irg, block, add_l, add_r, mode);
+ } else if (is_Add(a)) {
+ ir_node *add_r = get_Add_right(a);
+ if (is_Const(add_r) && is_Const_all_one(add_r)) {
+ /* ~(x + -1) = -x */
+ ir_node *op = get_Add_left(a);
+ ir_node *blk = get_irn_n(n, -1);
+ n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, op, get_irn_mode(n));
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_MINUS_1);
+ }
+ }
}
return n;
} /* transform_node_Not */
/**
* Transform a Minus.
+ * Optimize:
+ * -(~x) = x + 1
+ * -(a-b) = b - a
*/
static ir_node *transform_node_Minus(ir_node *n) {
ir_node *c, *oldn = n;
ir_node *a = get_Minus_op(n);
+ ir_mode *mode;
HANDLE_UNOP_PHI(tarval_neg,a,c);
+
+ mode = get_irn_mode(a);
+ if (get_mode_arithmetic(mode) == irma_twos_complement) {
+ /* the following rules are only to twos-complement */
+ if (is_Not(a)) {
+ /* -(~x) = x + 1 */
+ ir_node *op = get_Not_op(a);
+ tarval *tv = get_mode_one(mode);
+ ir_node *blk = get_irn_n(n, -1);
+ ir_node *c = new_r_Const(current_ir_graph, blk, mode, tv);
+ n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, blk, op, c, mode);
+ DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_NOT);
+ return n;
+ }
+ if (is_Shr(a)) {
+ ir_node *c = get_Shr_right(a);
+
+ if (is_Const(c)) {
+ tarval *tv = get_Const_tarval(c);
+
+ if (tarval_is_long(tv) && get_tarval_long(tv) == get_mode_size_bits(mode) - 1) {
+ /* -(a >>u (size-1)) = a >>s (size-1) */
+ ir_node *v = get_Shr_left(a);
+
+ n = new_rd_Shrs(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), v, c, mode);
+ DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_PREDICATE);
+ return n;
+ }
+ }
+ }
+ if (is_Shrs(a)) {
+ ir_node *c = get_Shrs_right(a);
+
+ if (is_Const(c)) {
+ tarval *tv = get_Const_tarval(c);
+
+ if (tarval_is_long(tv) && get_tarval_long(tv) == get_mode_size_bits(mode) - 1) {
+ /* -(a >>s (size-1)) = a >>u (size-1) */
+ ir_node *v = get_Shrs_left(a);
+
+ n = new_rd_Shr(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), v, c, mode);
+ DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_PREDICATE);
+ return n;
+ }
+ }
+ }
+ }
+ if (is_Sub(a)) {
+ /* - (a-b) = b - a */
+ ir_node *la = get_Sub_left(a);
+ ir_node *ra = get_Sub_right(a);
+ ir_node *blk = get_irn_n(n, -1);
+
+ n = new_rd_Sub(get_irn_dbg_info(n), current_ir_graph, blk, ra, la, mode);
+ DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_SUB);
+ return n;
+ }
+
+ if (is_Mul(a)) { /* -(a * const) -> a * -const */
+ ir_node *mul_l = get_Mul_left(a);
+ ir_node *mul_r = get_Mul_right(a);
+ if (is_Const(mul_r)) {
+ tarval *tv = tarval_neg(get_Const_tarval(mul_r));
+ ir_node *cnst = new_Const(mode, tv);
+ dbg_info *dbg = get_irn_dbg_info(a);
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(a);
+ n = new_rd_Mul(dbg, irg, block, mul_l, cnst, mode);
+ return n;
+ }
+ }
+
return n;
} /* transform_node_Minus */
ir_node *pred = get_Cast_op(n);
ir_type *tp = get_irn_type(n);
- if (get_irn_op(pred) == op_Const && get_Const_type(pred) != tp) {
+ if (is_Const(pred) && get_Const_type(pred) != tp) {
n = new_rd_Const_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_irn_mode(pred),
get_Const_tarval(pred), tp);
DBG_OPT_CSTEVAL(oldn, n);
- } else if ((get_irn_op(pred) == op_SymConst) && (get_SymConst_value_type(pred) != tp)) {
+ } else if (is_SymConst(pred) && get_SymConst_value_type(pred) != tp) {
n = new_rd_SymConst_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_SymConst_symbol(pred),
get_SymConst_kind(pred), tp);
DBG_OPT_CSTEVAL(oldn, n);
* Removes the exceptions and routes the memory to the NoMem node.
*/
static ir_node *transform_node_Proj_Div(ir_node *proj) {
- ir_node *n = get_Proj_pred(proj);
- ir_node *b = get_Div_right(n);
- ir_node *confirm;
+ ir_node *div = get_Proj_pred(proj);
+ ir_node *b = get_Div_right(div);
+ ir_node *confirm, *res, *new_mem;
long proj_nr;
if (value_not_zero(b, &confirm)) {
/* div(x, y) && y != 0 */
+ if (confirm == NULL) {
+ /* we are sure we have a Const != 0 */
+ new_mem = get_Div_mem(div);
+ if (is_Pin(new_mem))
+ new_mem = get_Pin_op(new_mem);
+ set_Div_mem(div, new_mem);
+ set_irn_pinned(div, op_pin_state_floats);
+ }
+
proj_nr = get_Proj_proj(proj);
- if (proj_nr == pn_Div_X_except) {
+ switch (proj_nr) {
+ case pn_Div_X_regular:
+ return new_r_Jmp(current_ir_graph, get_irn_n(div, -1));
+
+ case pn_Div_X_except:
/* we found an exception handler, remove it */
DBG_OPT_EXC_REM(proj);
return new_Bad();
- } else if (proj_nr == pn_Div_M) {
- ir_node *res = get_Div_mem(n);
- ir_node *new_mem = get_irg_no_mem(current_ir_graph);
+
+ case pn_Div_M:
+ res = get_Div_mem(div);
+ new_mem = get_irg_no_mem(current_ir_graph);
if (confirm) {
/* This node can only float up to the Confirm block */
new_mem = new_r_Pin(current_ir_graph, get_nodes_block(confirm), new_mem);
}
- set_irn_pinned(n, op_pin_state_floats);
+ set_irn_pinned(div, op_pin_state_floats);
/* this is a Div without exception, we can remove the memory edge */
- set_Div_mem(n, new_mem);
+ set_Div_mem(div, new_mem);
return res;
}
}
* Removes the exceptions and routes the memory to the NoMem node.
*/
static ir_node *transform_node_Proj_Mod(ir_node *proj) {
- ir_node *n = get_Proj_pred(proj);
- ir_node *b = get_Mod_right(n);
- ir_node *confirm;
+ ir_node *mod = get_Proj_pred(proj);
+ ir_node *b = get_Mod_right(mod);
+ ir_node *confirm, *res, *new_mem;
long proj_nr;
if (value_not_zero(b, &confirm)) {
/* mod(x, y) && y != 0 */
proj_nr = get_Proj_proj(proj);
- if (proj_nr == pn_Mod_X_except) {
+ if (confirm == NULL) {
+ /* we are sure we have a Const != 0 */
+ new_mem = get_Mod_mem(mod);
+ if (is_Pin(new_mem))
+ new_mem = get_Pin_op(new_mem);
+ set_Mod_mem(mod, new_mem);
+ set_irn_pinned(mod, op_pin_state_floats);
+ }
+
+ switch (proj_nr) {
+
+ case pn_Mod_X_regular:
+ return new_r_Jmp(current_ir_graph, get_irn_n(mod, -1));
+
+ case pn_Mod_X_except:
/* we found an exception handler, remove it */
DBG_OPT_EXC_REM(proj);
return new_Bad();
- } else if (proj_nr == pn_Mod_M) {
- ir_node *res = get_Mod_mem(n);
- ir_node *new_mem = get_irg_no_mem(current_ir_graph);
+
+ case pn_Mod_M:
+ res = get_Mod_mem(mod);
+ new_mem = get_irg_no_mem(current_ir_graph);
if (confirm) {
/* This node can only float up to the Confirm block */
new_mem = new_r_Pin(current_ir_graph, get_nodes_block(confirm), new_mem);
}
- set_irn_pinned(n, op_pin_state_floats);
/* this is a Mod without exception, we can remove the memory edge */
- set_Mod_mem(n, get_irg_no_mem(current_ir_graph));
- return res;
- } else if (proj_nr == pn_Mod_res && get_Mod_left(n) == b) {
- /* a % a = 0 if a != 0 */
- ir_mode *mode = get_irn_mode(proj);
- ir_node *res = new_Const(mode, get_mode_null(mode));
-
- DBG_OPT_CSTEVAL(n, res);
+ set_Mod_mem(mod, new_mem);
return res;
+ case pn_Mod_res:
+ if (get_Mod_left(mod) == b) {
+ /* a % a = 0 if a != 0 */
+ ir_mode *mode = get_irn_mode(proj);
+ ir_node *res = new_Const(mode, get_mode_null(mode));
+
+ DBG_OPT_CSTEVAL(mod, res);
+ return res;
+ }
}
}
return proj;
* Removes the exceptions and routes the memory to the NoMem node.
*/
static ir_node *transform_node_Proj_DivMod(ir_node *proj) {
- ir_node *n = get_Proj_pred(proj);
- ir_node *b = get_DivMod_right(n);
- ir_node *confirm;
+ ir_node *divmod = get_Proj_pred(proj);
+ ir_node *b = get_DivMod_right(divmod);
+ ir_node *confirm, *res, *new_mem;
long proj_nr;
if (value_not_zero(b, &confirm)) {
/* DivMod(x, y) && y != 0 */
proj_nr = get_Proj_proj(proj);
- if (proj_nr == pn_DivMod_X_except) {
+ if (confirm == NULL) {
+ /* we are sure we have a Const != 0 */
+ new_mem = get_DivMod_mem(divmod);
+ if (is_Pin(new_mem))
+ new_mem = get_Pin_op(new_mem);
+ set_DivMod_mem(divmod, new_mem);
+ set_irn_pinned(divmod, op_pin_state_floats);
+ }
+
+ switch (proj_nr) {
+
+ case pn_DivMod_X_regular:
+ return new_r_Jmp(current_ir_graph, get_irn_n(divmod, -1));
+
+ case pn_DivMod_X_except:
/* we found an exception handler, remove it */
DBG_OPT_EXC_REM(proj);
return new_Bad();
- } else if (proj_nr == pn_DivMod_M) {
- ir_node *res = get_DivMod_mem(n);
- ir_node *new_mem = get_irg_no_mem(current_ir_graph);
+
+ case pn_DivMod_M:
+ res = get_DivMod_mem(divmod);
+ new_mem = get_irg_no_mem(current_ir_graph);
if (confirm) {
/* This node can only float up to the Confirm block */
new_mem = new_r_Pin(current_ir_graph, get_nodes_block(confirm), new_mem);
}
- set_irn_pinned(n, op_pin_state_floats);
/* this is a DivMod without exception, we can remove the memory edge */
- set_DivMod_mem(n, get_irg_no_mem(current_ir_graph));
+ set_DivMod_mem(divmod, new_mem);
return res;
- } else if (proj_nr == pn_DivMod_res_mod && get_DivMod_left(n) == b) {
- /* a % a = 0 if a != 0 */
- ir_mode *mode = get_irn_mode(proj);
- ir_node *res = new_Const(mode, get_mode_null(mode));
- DBG_OPT_CSTEVAL(n, res);
- return res;
+ case pn_DivMod_res_mod:
+ if (get_DivMod_left(divmod) == b) {
+ /* a % a = 0 if a != 0 */
+ ir_mode *mode = get_irn_mode(proj);
+ ir_node *res = new_Const(mode, get_mode_null(mode));
+
+ DBG_OPT_CSTEVAL(divmod, res);
+ return res;
+ }
}
}
return proj;
* Normalizes and optimizes Cmp nodes.
*/
static ir_node *transform_node_Proj_Cmp(ir_node *proj) {
- if (get_opt_reassociation()) {
- ir_node *n = get_Proj_pred(proj);
- ir_node *left = get_Cmp_left(n);
- ir_node *right = get_Cmp_right(n);
- ir_node *c = NULL;
- tarval *tv = NULL;
- int changed = 0;
- ir_mode *mode = NULL;
- long proj_nr = get_Proj_proj(proj);
+ ir_node *n = get_Proj_pred(proj);
+ ir_node *left = get_Cmp_left(n);
+ ir_node *right = get_Cmp_right(n);
+ ir_node *c = NULL;
+ tarval *tv = NULL;
+ int changed = 0;
+ ir_mode *mode = NULL;
+ long proj_nr = get_Proj_proj(proj);
+
+ /* we can evaluate this direct */
+ switch(proj_nr) {
+ case pn_Cmp_False:
+ return new_Const(mode_b, get_tarval_b_false());
+ case pn_Cmp_True:
+ return new_Const(mode_b, get_tarval_b_true());
+ case pn_Cmp_Leg:
+ if(!mode_is_float(get_irn_mode(left)))
+ return new_Const(mode_b, get_tarval_b_true());
+ break;
+ default:
+ break;
+ }
- /*
- * First step: normalize the compare op
- * by placing the constant on the right site
- * or moving the lower address node to the left.
- * We ignore the case that both are constants
- * this case should be optimized away.
- */
- if (get_irn_op(right) == op_Const) {
- c = right;
- } else if (get_irn_op(left) == op_Const) {
- c = left;
- left = right;
- right = c;
+ /* Remove unnecessary conversions */
+ /* TODO handle constants */
+ if (is_Conv(left) && is_Conv(right)) {
+ ir_mode *mode = get_irn_mode(left);
+ ir_node *op_left = get_Conv_op(left);
+ ir_node *op_right = get_Conv_op(right);
+ ir_mode *mode_left = get_irn_mode(op_left);
+ ir_mode *mode_right = get_irn_mode(op_right);
+
+ if (smaller_mode(mode_left, mode) && smaller_mode(mode_right, mode)) {
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(n);
+
+ if (mode_left == mode_right) {
+ left = op_left;
+ right = op_right;
+ changed |= 1;
+ } else if (smaller_mode(mode_left, mode_right)) {
+ left = new_r_Conv(irg, block, op_left, mode_right);
+ right = op_right;
+ changed |= 1;
+ } else if (smaller_mode(mode_right, mode_left)) {
+ left = op_left;
+ right = new_r_Conv(irg, block, op_right, mode_left);
+ changed |= 1;
+ }
+ }
+ }
- proj_nr = get_inversed_pnc(proj_nr);
- changed |= 1;
- } else if (get_irn_idx(left) > get_irn_idx(right)) {
- ir_node *t = left;
+ /* TODO extend to arbitrary constants */
+ if (is_Conv(left) && is_Const(right) && is_Const_null(right)) {
+ ir_mode* mode = get_irn_mode(left);
+ ir_node* op = get_Conv_op(left);
+ ir_mode* op_mode = get_irn_mode(op);
+
+ if (get_mode_size_bits(mode) > get_mode_size_bits(op_mode) &&
+ (mode_is_signed(mode) || !mode_is_signed(op_mode))) {
+ ir_node *null = new_Const(op_mode, get_mode_null(op_mode));
+ set_Cmp_left( n, op);
+ set_Cmp_right(n, null);
+ return proj;
+ }
+ }
- left = right;
- right = t;
+ /* remove Casts */
+ if (is_Cast(left))
+ left = get_Cast_op(left);
+ if (is_Cast(right))
+ right = get_Cast_op(right);
+
+ /* remove operation of both sides if possible */
+ if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
+ ir_opcode lop = get_irn_opcode(left);
+
+ if (lop == get_irn_opcode(right)) {
+ ir_node *ll, *lr, *rl, *rr;
+
+ /* same operation on both sides, try to remove */
+ switch (lop) {
+ case iro_Not:
+ case iro_Minus:
+ /* ~a CMP ~b => a CMP b, -a CMP -b ==> a CMP b */
+ left = get_unop_op(left);
+ right = get_unop_op(right);
+ changed |= 1;
+ break;
+ case iro_Add:
+ ll = get_Add_left(left);
+ lr = get_Add_right(left);
+ rl = get_Add_left(right);
+ rr = get_Add_right(right);
+
+ if (ll == rl) {
+ /* X + a CMP X + b ==> a CMP b */
+ left = lr;
+ right = rr;
+ changed |= 1;
+ } else if (ll == rr) {
+ /* X + a CMP b + X ==> a CMP b */
+ left = lr;
+ right = rl;
+ changed |= 1;
+ } else if (lr == rl) {
+ /* a + X CMP X + b ==> a CMP b */
+ left = ll;
+ right = rr;
+ changed |= 1;
+ } else if (lr == rr) {
+ /* a + X CMP b + X ==> a CMP b */
+ left = ll;
+ right = rl;
+ changed |= 1;
+ }
+ break;
+ case iro_Sub:
+ ll = get_Sub_left(left);
+ lr = get_Sub_right(left);
+ rl = get_Sub_left(right);
+ rr = get_Sub_right(right);
+
+ if (ll == rl) {
+ /* X - a CMP X - b ==> a CMP b */
+ left = lr;
+ right = rr;
+ changed |= 1;
+ } else if (lr == rr) {
+ /* a - X CMP b - X ==> a CMP b */
+ left = ll;
+ right = rl;
+ changed |= 1;
+ }
+ break;
+ case iro_Rot:
+ if (get_Rot_right(left) == get_Rot_right(right)) {
+ /* a ROT X CMP b ROT X */
+ left = get_Rot_left(left);
+ right = get_Rot_left(right);
+ changed |= 1;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
- proj_nr = get_inversed_pnc(proj_nr);
- changed |= 1;
+ if (get_irn_mode(left) == mode_b) {
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(n);
+
+ switch (proj_nr) {
+ case pn_Cmp_Le: return new_r_Or( irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b);
+ case pn_Cmp_Lt: return new_r_And(irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b);
+ case pn_Cmp_Ge: return new_r_Or( irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b);
+ case pn_Cmp_Gt: return new_r_And(irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b);
+ case pn_Cmp_Lg: return new_r_Eor(irg, block, left, right, mode_b);
+ case pn_Cmp_Eq: return new_r_Not(irg, block, new_r_Eor(irg, block, left, right, mode_b), mode_b);
}
+ }
- /*
- * Second step: Try to reduce the magnitude
- * of a constant. This may help to generate better code
- * later and may help to normalize more compares.
- * Of course this is only possible for integer values.
- */
- if (c) {
- mode = get_irn_mode(c);
- tv = get_Const_tarval(c);
+ if (!get_opt_reassociation())
+ return proj;
- if (tv != tarval_bad) {
- /* the following optimization is possible on modes without Overflow
- * on Unary Minus or on == and !=:
- * -a CMP c ==> a swap(CMP) -c
- *
- * Beware: for two-complement Overflow may occur, so only == and != can
- * be optimized, see this:
- * -MININT < 0 =/=> MININT > 0 !!!
- */
- if (get_opt_constant_folding() && get_irn_op(left) == op_Minus &&
- (!mode_overflow_on_unary_Minus(mode) ||
- (mode_is_int(mode) && (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg)))) {
- left = get_Minus_op(left);
- tv = tarval_sub(get_mode_null(mode), tv);
+ /*
+ * First step: normalize the compare op
+ * by placing the constant on the right side
+ * or moving the lower address node to the left.
+ * We ignore the case that both are constants
+ * this case should be optimized away.
+ */
+ if (is_Const(right)) {
+ c = right;
+ } else if (is_Const(left)) {
+ c = left;
+ left = right;
+ right = c;
+
+ proj_nr = get_inversed_pnc(proj_nr);
+ changed |= 1;
+ } else if (get_irn_idx(left) > get_irn_idx(right)) {
+ ir_node *t = left;
+
+ left = right;
+ right = t;
+
+ proj_nr = get_inversed_pnc(proj_nr);
+ changed |= 1;
+ }
+
+ /*
+ * Second step: Try to reduce the magnitude
+ * of a constant. This may help to generate better code
+ * later and may help to normalize more compares.
+ * Of course this is only possible for integer values.
+ */
+ if (c) {
+ mode = get_irn_mode(c);
+ tv = get_Const_tarval(c);
+
+ if (tv != tarval_bad) {
+ /* the following optimization is possible on modes without Overflow
+ * on Unary Minus or on == and !=:
+ * -a CMP c ==> a swap(CMP) -c
+ *
+ * Beware: for two-complement Overflow may occur, so only == and != can
+ * be optimized, see this:
+ * -MININT < 0 =/=> MININT > 0 !!!
+ */
+ if (is_Minus(left) &&
+ (!mode_overflow_on_unary_Minus(mode) ||
+ (mode_is_int(mode) && (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg)))) {
+ tv = tarval_neg(tv);
+ if (tv != tarval_bad) {
+ left = get_Minus_op(left);
proj_nr = get_inversed_pnc(proj_nr);
changed |= 2;
}
+ } else if (is_Not(left) && (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg)) {
+ tv = tarval_not(tv);
- /* for integer modes, we have more */
- if (mode_is_int(mode)) {
- /* Ne includes Unordered which is not possible on integers.
- * However, frontends often use this wrong, so fix it here */
- if (proj_nr & pn_Cmp_Uo) {
- proj_nr &= ~pn_Cmp_Uo;
- set_Proj_proj(proj, proj_nr);
- }
+ if (tv != tarval_bad) {
+ left = get_Not_op(left);
+ changed |= 2;
+ }
+ }
- /* c > 0 : a < c ==> a <= (c-1) a >= c ==> a > (c-1) */
- if ((proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Ge) &&
- tarval_cmp(tv, get_mode_null(mode)) == pn_Cmp_Gt) {
- tv = tarval_sub(tv, get_mode_one(mode));
+ /* for integer modes, we have more */
+ if (mode_is_int(mode)) {
+ /* Ne includes Unordered which is not possible on integers.
+ * However, frontends often use this wrong, so fix it here */
+ if (proj_nr & pn_Cmp_Uo) {
+ proj_nr &= ~pn_Cmp_Uo;
+ set_Proj_proj(proj, proj_nr);
+ }
+
+ /* c > 0 : a < c ==> a <= (c-1) a >= c ==> a > (c-1) */
+ if ((proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Ge) &&
+ tarval_cmp(tv, get_mode_null(mode)) == pn_Cmp_Gt) {
+ tv = tarval_sub(tv, get_mode_one(mode));
+ if (tv != tarval_bad) {
proj_nr ^= pn_Cmp_Eq;
changed |= 2;
}
- /* c < 0 : a > c ==> a >= (c+1) a <= c ==> a < (c+1) */
- else if ((proj_nr == pn_Cmp_Gt || proj_nr == pn_Cmp_Le) &&
- tarval_cmp(tv, get_mode_null(mode)) == pn_Cmp_Lt) {
- tv = tarval_add(tv, get_mode_one(mode));
+ }
+ /* c < 0 : a > c ==> a >= (c+1) a <= c ==> a < (c+1) */
+ else if ((proj_nr == pn_Cmp_Gt || proj_nr == pn_Cmp_Le) &&
+ tarval_cmp(tv, get_mode_null(mode)) == pn_Cmp_Lt) {
+ tv = tarval_add(tv, get_mode_one(mode));
+ if (tv != tarval_bad) {
proj_nr ^= pn_Cmp_Eq;
changed |= 2;
}
+ }
- /* the following reassociations work only for == and != */
- if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
+ /* the following reassociations work only for == and != */
+ if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
- /* a-b == 0 ==> a == b, a-b != 0 ==> a != b */
- if (classify_tarval(tv) == TV_CLASSIFY_NULL && get_irn_op(left) == op_Sub) {
- right = get_Sub_right(left);
- left = get_Sub_left(left);
+ /* a-b == 0 ==> a == b, a-b != 0 ==> a != b */
+ if (tarval_is_null(tv) && is_Sub(left)) {
+ right = get_Sub_right(left);
+ left = get_Sub_left(left);
- tv = value_of(right);
- changed = 1;
- }
+ tv = value_of(right);
+ changed = 1;
+ }
- if (tv != tarval_bad) {
- ir_op *op = get_irn_op(left);
+ if (tv != tarval_bad) {
+ /* a-c1 == c2 ==> a == c2+c1, a-c1 != c2 ==> a != c2+c1 */
+ if (is_Sub(left)) {
+ ir_node *c1 = get_Sub_right(left);
+ tarval *tv2 = value_of(c1);
- /* a-c1 == c2 ==> a == c2+c1, a-c1 != c2 ==> a != c2+c1 */
- if (op == op_Sub) {
- ir_node *c1 = get_Sub_right(left);
- tarval *tv2 = value_of(c1);
+ if (tv2 != tarval_bad) {
+ tv2 = tarval_add(tv, value_of(c1));
if (tv2 != tarval_bad) {
- tv2 = tarval_add(tv, value_of(c1));
-
- if (tv2 != tarval_bad) {
- left = get_Sub_left(left);
- tv = tv2;
- changed |= 2;
- }
+ left = get_Sub_left(left);
+ tv = tv2;
+ changed |= 2;
}
}
- /* a+c1 == c2 ==> a == c2-c1, a+c1 != c2 ==> a != c2-c1 */
- else if (op == op_Add) {
- ir_node *a_l = get_Add_left(left);
- ir_node *a_r = get_Add_right(left);
- ir_node *a;
- tarval *tv2;
-
- if (get_irn_op(a_l) == op_Const) {
- a = a_r;
- tv2 = value_of(a_l);
- } else {
- a = a_l;
- tv2 = value_of(a_r);
- }
-
- if (tv2 != tarval_bad) {
- tv2 = tarval_sub(tv, tv2);
-
- if (tv2 != tarval_bad) {
- left = a;
- tv = tv2;
- changed |= 2;
- }
- }
+ }
+ /* a+c1 == c2 ==> a == c2-c1, a+c1 != c2 ==> a != c2-c1 */
+ else if (is_Add(left)) {
+ ir_node *a_l = get_Add_left(left);
+ ir_node *a_r = get_Add_right(left);
+ ir_node *a;
+ tarval *tv2;
+
+ if (is_Const(a_l)) {
+ a = a_r;
+ tv2 = value_of(a_l);
+ } else {
+ a = a_l;
+ tv2 = value_of(a_r);
}
- /* -a == c ==> a == -c, -a != c ==> a != -c */
- else if (op == op_Minus) {
- tarval *tv2 = tarval_sub(get_mode_null(mode), tv);
+
+ if (tv2 != tarval_bad) {
+ tv2 = tarval_sub(tv, tv2);
if (tv2 != tarval_bad) {
- left = get_Minus_op(left);
+ left = a;
tv = tv2;
changed |= 2;
}
}
}
- } /* == or != */
- /* the following reassociations work only for <= */
- else if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
- if (tv != tarval_bad) {
- ir_op *op = get_irn_op(left);
-
- /* c >= 0 : Abs(a) <= c ==> (unsigned)(a + c) <= 2*c */
- if (op == op_Abs) {
+ /* -a == c ==> a == -c, -a != c ==> a != -c */
+ else if (is_Minus(left)) {
+ tarval *tv2 = tarval_sub(get_mode_null(mode), tv);
+
+ if (tv2 != tarval_bad) {
+ left = get_Minus_op(left);
+ tv = tv2;
+ changed |= 2;
}
}
}
- } /* mode_is_int */
-
- /*
- * optimization for AND:
- * Optimize:
- * And(x, C) == C ==> And(x, C) != 0
- * And(x, C) != C ==> And(X, C) == 0
- *
- * if C is a single Bit constant.
- */
- if ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) &&
- (get_irn_op(left) == op_And)) {
- if (is_single_bit_tarval(tv)) {
- /* check for Constant's match. We have check hare the tarvals,
- because our const might be changed */
- ir_node *la = get_And_left(left);
- ir_node *ra = get_And_right(left);
- if ((is_Const(la) && get_Const_tarval(la) == tv) ||
- (is_Const(ra) && get_Const_tarval(ra) == tv)) {
- /* fine: do the transformation */
- tv = get_mode_null(get_tarval_mode(tv));
- proj_nr ^= pn_Cmp_Leg;
- changed |= 2;
+ } /* == or != */
+ /* the following reassociations work only for <= */
+ else if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
+ if (tv != tarval_bad) {
+ /* c >= 0 : Abs(a) <= c ==> (unsigned)(a + c) <= 2*c */
+ if (get_irn_op(left) == op_Abs) { // TODO something is missing here
}
}
}
- } /* tarval != bad */
- }
+ } /* mode_is_int */
+
+ /*
+ * optimization for AND:
+ * Optimize:
+ * And(x, C) == C ==> And(x, C) != 0
+ * And(x, C) != C ==> And(X, C) == 0
+ *
+ * if C is a single Bit constant.
+ */
+ if ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) && is_And(left)) {
+ if (tarval_is_single_bit(tv)) {
+ /* check for Constant's match. We have check hare the tarvals,
+ because our const might be changed */
+ ir_node *la = get_And_left(left);
+ ir_node *ra = get_And_right(left);
+ if ((is_Const(la) && get_Const_tarval(la) == tv) ||
+ (is_Const(ra) && get_Const_tarval(ra) == tv)) {
+ /* fine: do the transformation */
+ tv = get_mode_null(get_tarval_mode(tv));
+ proj_nr ^= pn_Cmp_Leg;
+ changed |= 2;
+ }
+ }
+ }
+ } /* tarval != bad */
+ }
+
+ if (changed & 2) /* need a new Const */
+ right = new_Const(mode, tv);
- if (changed) {
- ir_node *block = get_irn_n(n, -1); /* Beware of get_nodes_Block() */
+ if ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) && is_Const(right) && is_Const_null(right) && is_Proj(left)) {
+ ir_node *op = get_Proj_pred(left);
- if (changed & 2) /* need a new Const */
- right = new_Const(mode, tv);
+ if ((is_Mod(op) && get_Proj_proj(left) == pn_Mod_res) ||
+ (is_DivMod(op) && get_Proj_proj(left) == pn_DivMod_res_mod)) {
+ ir_node *c = get_binop_right(op);
- /* create a new compare */
- n = new_rd_Cmp(get_irn_dbg_info(n), current_ir_graph, block,
- left, right);
+ if (is_Const(c)) {
+ tarval *tv = get_Const_tarval(c);
- set_Proj_pred(proj, n);
- set_Proj_proj(proj, proj_nr);
+ if (tarval_is_single_bit(tv)) {
+ /* special case: (x % 2^n) CMP 0 ==> x & (2^n-1) CMP 0 */
+ ir_node *v = get_binop_left(op);
+ ir_node *blk = get_irn_n(op, -1);
+ ir_mode *mode = get_irn_mode(v);
+
+ tv = tarval_sub(tv, get_mode_one(mode));
+ left = new_rd_And(get_irn_dbg_info(op), current_ir_graph, blk, v, new_Const(mode, tv), mode);
+ changed |= 1;
+ }
+ }
}
}
+
+ if (changed) {
+ ir_node *block = get_irn_n(n, -1); /* Beware of get_nodes_Block() */
+
+ /* create a new compare */
+ n = new_rd_Cmp(get_irn_dbg_info(n), current_ir_graph, block, left, right);
+
+ set_Proj_pred(proj, n);
+ set_Proj_proj(proj, proj_nr);
+ }
+
return proj;
} /* transform_node_Proj_Cmp */
assert(is_op_commutative(get_irn_op(binop)));
- if (get_irn_op(op_a) == op_Const) {
+ if (is_Const(op_a)) {
*a = op_b;
*c = op_a;
} else {
* OR c2 ===> OR
* AND c1
* OR
+ *
+ *
+ * value c2 value c1
+ * AND c1 ===> OR if (c1 | c2) == 0x111..11
+ * OR
*/
static ir_node *transform_node_Or_bf_store(ir_node *or) {
ir_node *and, *c1;
tarval *tv1, *tv2, *tv3, *tv4, *tv, *n_tv4, *n_tv2;
- get_comm_Binop_Ops(or, &and, &c1);
- if ((get_irn_op(c1) != op_Const) || (get_irn_op(and) != op_And))
- return or;
+ while (1) {
+ get_comm_Binop_Ops(or, &and, &c1);
+ if (!is_Const(c1) || !is_And(and))
+ return or;
- get_comm_Binop_Ops(and, &or_l, &c2);
- if ((get_irn_op(c2) != op_Const) || (get_irn_op(or_l) != op_Or))
- return or;
+ get_comm_Binop_Ops(and, &or_l, &c2);
+ if (!is_Const(c2))
+ return or;
- get_comm_Binop_Ops(or_l, &and_l, &c3);
- if ((get_irn_op(c3) != op_Const) || (get_irn_op(and_l) != op_And))
- return or;
+ tv1 = get_Const_tarval(c1);
+ tv2 = get_Const_tarval(c2);
- get_comm_Binop_Ops(and_l, &value, &c4);
- if (get_irn_op(c4) != op_Const)
- return or;
+ tv = tarval_or(tv1, tv2);
+ if (tarval_is_all_one(tv)) {
+ /* the AND does NOT clear a bit with isn't set by the OR */
+ set_Or_left(or, or_l);
+ set_Or_right(or, c1);
- /* ok, found the pattern, check for conditions */
- assert(mode == get_irn_mode(and));
- assert(mode == get_irn_mode(or_l));
- assert(mode == get_irn_mode(and_l));
+ /* check for more */
+ continue;
+ }
- tv1 = get_Const_tarval(c1);
- tv2 = get_Const_tarval(c2);
- tv3 = get_Const_tarval(c3);
- tv4 = get_Const_tarval(c4);
+ if (!is_Or(or_l))
+ return or;
- tv = tarval_or(tv4, tv2);
- if (classify_tarval(tv) != TV_CLASSIFY_ALL_ONE) {
- /* have at least one 0 at the same bit position */
- return or;
- }
+ get_comm_Binop_Ops(or_l, &and_l, &c3);
+ if (!is_Const(c3) || !is_And(and_l))
+ return or;
- n_tv4 = tarval_not(tv4);
- if (tv3 != tarval_and(tv3, n_tv4)) {
- /* bit in the or_mask is outside the and_mask */
- return or;
- }
+ get_comm_Binop_Ops(and_l, &value, &c4);
+ if (!is_Const(c4))
+ return or;
- n_tv2 = tarval_not(tv2);
- if (tv1 != tarval_and(tv1, n_tv2)) {
- /* bit in the or_mask is outside the and_mask */
- return or;
- }
+ /* ok, found the pattern, check for conditions */
+ assert(mode == get_irn_mode(and));
+ assert(mode == get_irn_mode(or_l));
+ assert(mode == get_irn_mode(and_l));
- /* ok, all conditions met */
- block = get_irn_n(or, -1);
+ tv3 = get_Const_tarval(c3);
+ tv4 = get_Const_tarval(c4);
- new_and = new_r_And(current_ir_graph, block,
- value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode);
+ tv = tarval_or(tv4, tv2);
+ if (!tarval_is_all_one(tv)) {
+ /* have at least one 0 at the same bit position */
+ return or;
+ }
+
+ n_tv4 = tarval_not(tv4);
+ if (tv3 != tarval_and(tv3, n_tv4)) {
+ /* bit in the or_mask is outside the and_mask */
+ return or;
+ }
+
+ n_tv2 = tarval_not(tv2);
+ if (tv1 != tarval_and(tv1, n_tv2)) {
+ /* bit in the or_mask is outside the and_mask */
+ return or;
+ }
+
+ /* ok, all conditions met */
+ block = get_irn_n(or, -1);
- new_const = new_r_Const(current_ir_graph, block, mode, tarval_or(tv3, tv1));
+ new_and = new_r_And(current_ir_graph, block,
+ value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode);
- set_Or_left(or, new_and);
- set_Or_right(or, new_const);
+ new_const = new_r_Const(current_ir_graph, block, mode, tarval_or(tv3, tv1));
- /* check for more */
- return transform_node_Or_bf_store(or);
+ set_Or_left(or, new_and);
+ set_Or_right(or, new_const);
+
+ /* check for more */
+ }
} /* transform_node_Or_bf_store */
/**
shl = get_binop_left(or);
shr = get_binop_right(or);
- if (get_irn_op(shl) == op_Shr) {
- if (get_irn_op(shr) != op_Shl)
+ if (is_Shr(shl)) {
+ if (!is_Shl(shr))
return or;
irn = shl;
shl = shr;
shr = irn;
- } else if (get_irn_op(shl) != op_Shl) {
+ } else if (!is_Shl(shl)) {
return or;
- } else if (get_irn_op(shr) != op_Shr) {
+ } else if (!is_Shr(shr)) {
return or;
}
x = get_Shl_left(shl);
c1 = get_Shl_right(shl);
c2 = get_Shr_right(shr);
- if (get_irn_op(c1) == op_Const && get_irn_op(c2) == op_Const) {
+ if (is_Const(c1) && is_Const(c2)) {
tv1 = get_Const_tarval(c1);
if (! tarval_is_long(tv1))
return or;
DBG_OPT_ALGSIM1(or, shl, shr, n, FS_OPT_OR_SHFT_TO_ROT);
return n;
- } else if (get_irn_op(c1) == op_Sub) {
+ } else if (is_Sub(c1)) {
v = c2;
sub = c1;
return or;
c1 = get_Sub_left(sub);
- if (get_irn_op(c1) != op_Const)
+ if (!is_Const(c1))
return or;
tv1 = get_Const_tarval(c1);
DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROT);
return n;
- } else if (get_irn_op(c2) == op_Sub) {
+ } else if (is_Sub(c2)) {
v = c1;
sub = c2;
c1 = get_Sub_left(sub);
- if (get_irn_op(c1) != op_Const)
+ if (!is_Const(c1))
return or;
tv1 = get_Const_tarval(c1);
ir_node *a = get_Or_left(n);
ir_node *b = get_Or_right(n);
+ if (is_Not(a) && is_Not(b)) {
+ /* ~a | ~b = ~(a&b) */
+ ir_node *block = get_nodes_block(n);
+ ir_mode *mode = get_irn_mode(n);
+
+ a = get_Not_op(a);
+ b = get_Not_op(b);
+ n = new_rd_And(get_irn_dbg_info(n), current_ir_graph, block, a, b, mode);
+ n = new_rd_Not(get_irn_dbg_info(n), current_ir_graph, block, n, mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_DEMORGAN);
+ return n;
+ }
+
+ /* we can evaluate 2 Projs of the same Cmp */
+ if (get_irn_mode(n) == mode_b && is_Proj(a) && is_Proj(b)) {
+ ir_node *pred_a = get_Proj_pred(a);
+ ir_node *pred_b = get_Proj_pred(b);
+ if (pred_a == pred_b) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(pred_a);
+ pn_Cmp pn_a = get_Proj_proj(a);
+ pn_Cmp pn_b = get_Proj_proj(b);
+ /* yes, we can simply calculate with pncs */
+ pn_Cmp new_pnc = pn_a | pn_b;
+
+ return new_rd_Proj(dbgi, current_ir_graph, block, pred_a, mode_b,
+ new_pnc);
+ }
+ }
+
HANDLE_BINOP_PHI(tarval_or, a,b,c);
n = transform_node_Or_bf_store(n);
n = transform_node_Or_Rot(n);
+ if (n != oldn)
+ return n;
+
+ n = transform_bitwise_distributive(n, transform_node_Or);
return n;
} /* transform_node_Or */
return n;
} /* transform_node_End */
+/** returns 1 if a == -b */
+static int is_negated_value(ir_node *a, ir_node *b) {
+ if(is_Minus(a) && get_Minus_op(a) == b)
+ return 1;
+ if(is_Minus(b) && get_Minus_op(b) == a)
+ return 1;
+ if(is_Sub(a) && is_Sub(b)) {
+ ir_node *a_left = get_Sub_left(a);
+ ir_node *a_right = get_Sub_right(a);
+ ir_node *b_left = get_Sub_left(b);
+ ir_node *b_right = get_Sub_right(b);
+
+ if(a_left == b_right && a_right == b_left)
+ return 1;
+ }
+
+ return 0;
+}
+
/**
* Optimize a Mux into some simpler cases.
*/
ir_node *oldn = n, *sel = get_Mux_sel(n);
ir_mode *mode = get_irn_mode(n);
- if (get_irn_op(sel) == op_Proj && !mode_honor_signed_zeros(mode)) {
- ir_node *cmp = get_Proj_pred(sel);
- long proj_nr = get_Proj_proj(sel);
- ir_node *f = get_Mux_false(n);
- ir_node *t = get_Mux_true(n);
-
- if (get_irn_op(cmp) == op_Cmp && classify_Const(get_Cmp_right(cmp)) == CNST_NULL) {
- ir_node *block = get_irn_n(n, -1);
-
- /*
- * Note: normalization puts the constant on the right site,
- * so we check only one case.
- *
- * Note further that these optimization work even for floating point
- * with NaN's because -NaN == NaN.
- * However, if +0 and -0 is handled differently, we cannot use the first one.
- */
- if (get_irn_op(f) == op_Minus &&
- get_Minus_op(f) == t &&
- get_Cmp_left(cmp) == t) {
-
- if (proj_nr == pn_Cmp_Ge || proj_nr == pn_Cmp_Gt) {
- /* Mux(a >=/> 0, -a, a) ==> Abs(a) */
- n = new_rd_Abs(get_irn_dbg_info(n),
- current_ir_graph,
- block,
- t, mode);
- DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
- return n;
- } else if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
- /* Mux(a <=/< 0, -a, a) ==> Minus(Abs(a)) */
- n = new_rd_Abs(get_irn_dbg_info(n),
- current_ir_graph,
- block,
- t, mode);
- n = new_rd_Minus(get_irn_dbg_info(n),
- current_ir_graph,
- block,
- n, mode);
-
- DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
- return n;
+ if (mode == mode_b) {
+ ir_node *t = get_Mux_true(n);
+ ir_node *f = get_Mux_false(n);
+ dbg_info *dbg = get_irn_dbg_info(n);
+ ir_node *block = get_irn_n(n, -1);
+ ir_graph *irg = current_ir_graph;
+
+ if (is_Const(t)) {
+ tarval *tv_t = get_Const_tarval(t);
+ if (tv_t == tarval_b_true) {
+ if (is_Const(f)) {
+ assert(get_Const_tarval(f) == tarval_b_false);
+ return sel;
+ } else {
+ return new_rd_Or(dbg, irg, block, sel, f, mode_b);
}
- } else if (get_irn_op(t) == op_Minus &&
- get_Minus_op(t) == f &&
- get_Cmp_left(cmp) == f) {
-
- if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
- /* Mux(a <=/< 0, a, -a) ==> Abs(a) */
- n = new_rd_Abs(get_irn_dbg_info(n),
- current_ir_graph,
- block,
- f, mode);
- DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
- return n;
- } else if (proj_nr == pn_Cmp_Ge || proj_nr == pn_Cmp_Gt) {
- /* Mux(a >=/> 0, a, -a) ==> Minus(Abs(a)) */
- n = new_rd_Abs(get_irn_dbg_info(n),
- current_ir_graph,
- block,
- f, mode);
- n = new_rd_Minus(get_irn_dbg_info(n),
- current_ir_graph,
- block,
- n, mode);
-
- DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
- return n;
+ } else {
+ ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b);
+ assert(tv_t == tarval_b_false);
+ if (is_Const(f)) {
+ assert(get_Const_tarval(f) == tarval_b_true);
+ return not_sel;
+ } else {
+ return new_rd_And(dbg, irg, block, not_sel, f, mode_b);
}
}
+ } else if (is_Const(f)) {
+ tarval *tv_f = get_Const_tarval(f);
+ if (tv_f == tarval_b_true) {
+ ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b);
+ return new_rd_Or(dbg, irg, block, not_sel, t, mode_b);
+ } else {
+ assert(tv_f == tarval_b_false);
+ return new_rd_And(dbg, irg, block, sel, t, mode_b);
+ }
+ }
+ }
- if (mode_is_int(mode) && mode_is_signed(mode) &&
- get_mode_arithmetic(mode) == irma_twos_complement) {
- ir_node *x = get_Cmp_left(cmp);
-
- /* the following optimization works only with signed integer two-complement mode */
+ if (is_Proj(sel) && !mode_honor_signed_zeros(mode)) {
+ ir_node *cmp = get_Proj_pred(sel);
+ long pn = get_Proj_proj(sel);
+ ir_node *f = get_Mux_false(n);
+ ir_node *t = get_Mux_true(n);
- if (mode == get_irn_mode(x)) {
- /*
- * FIXME: this restriction is two rigid, as it would still
- * work if mode(x) = Hs and mode == Is, but at least it removes
- * all wrong cases.
- */
- if ((proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Le) &&
- classify_Const(t) == CNST_ALL_ONE &&
- classify_Const(f) == CNST_NULL) {
- /*
- * Mux(x:T </<= 0, 0, -1) -> Shrs(x, sizeof_bits(T) - 1)
- * Conditions:
- * T must be signed.
- */
- n = new_rd_Shrs(get_irn_dbg_info(n),
- current_ir_graph, block, x,
- new_r_Const_long(current_ir_graph, block, mode_Iu,
- get_mode_size_bits(mode) - 1),
- mode);
- DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_SHR);
+ /*
+ * Note: normalization puts the constant on the right side,
+ * so we check only one case.
+ *
+ * Note further that these optimization work even for floating point
+ * with NaN's because -NaN == NaN.
+ * However, if +0 and -0 is handled differently, we cannot use the first
+ * one.
+ */
+ if (is_Cmp(cmp)) {
+ ir_node *cmp_r = get_Cmp_right(cmp);
+ if (is_Const(cmp_r) && is_Const_null(cmp_r)) {
+ ir_node *block = get_irn_n(n, -1);
+
+ if(is_negated_value(f, t)) {
+ ir_node *cmp_left = get_Cmp_left(cmp);
+
+ /* Psi(a >= 0, a, -a) = Psi(a <= 0, -a, a) ==> Abs(a) */
+ if ( (cmp_left == t && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt))
+ || (cmp_left == f && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt)))
+ {
+ n = new_rd_Abs(get_irn_dbg_info(n), current_ir_graph, block,
+ cmp_left, mode);
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
return n;
- } else if ((proj_nr == pn_Cmp_Gt || proj_nr == pn_Cmp_Ge) &&
- classify_Const(t) == CNST_ONE &&
- classify_Const(f) == CNST_NULL) {
- /*
- * Mux(x:T >/>= 0, 0, 1) -> Shr(-x, sizeof_bits(T) - 1)
- * Conditions:
- * T must be signed.
- */
- n = new_rd_Shr(get_irn_dbg_info(n),
- current_ir_graph, block,
- new_r_Minus(current_ir_graph, block, x, mode),
- new_r_Const_long(current_ir_graph, block, mode_Iu,
- get_mode_size_bits(mode) - 1),
- mode);
- DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_SHR);
+ /* Psi(a <= 0, a, -a) = Psi(a >= 0, -a, a) ==> -Abs(a) */
+ } else if ((cmp_left == t && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt))
+ || (cmp_left == f && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt)))
+ {
+ n = new_rd_Abs(get_irn_dbg_info(n), current_ir_graph, block,
+ cmp_left, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph,
+ block, n, mode);
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
return n;
}
}
+
}
}
}
return n;
} /* transform_node_Psi */
+/**
+ * optimize sync nodes that have other syncs as input we simply add the inputs
+ * of the other sync to our own inputs
+ */
+static ir_node *transform_node_Sync(ir_node *n) {
+ int i, arity;
+
+ arity = get_irn_arity(n);
+ for(i = 0; i < get_irn_arity(n); /*empty*/) {
+ int i2, arity2;
+ ir_node *in = get_irn_n(n, i);
+ if(!is_Sync(in)) {
+ ++i;
+ continue;
+ }
+
+ /* set sync input 0 instead of the sync */
+ set_irn_n(n, i, get_irn_n(in, 0));
+ /* so we check this input again for syncs */
+
+ /* append all other inputs of the sync to our sync */
+ arity2 = get_irn_arity(in);
+ for(i2 = 1; i2 < arity2; ++i2) {
+ ir_node *in_in = get_irn_n(in, i2);
+ add_irn_n(n, in_in);
+ /* increase arity so we also check the new inputs for syncs */
+ arity++;
+ }
+ }
+
+ /* rehash the sync node */
+ add_identities(current_ir_graph->value_table, n);
+
+ return n;
+}
+
/**
* Tries several [inplace] [optimizing] transformations and returns an
* equivalent node. The difference to equivalent_node() is that these
* not be freed even if the equivalent node isn't the old one.
*/
static ir_node *transform_node(ir_node *n) {
- if (n->op->ops.transform_node)
- n = n->op->ops.transform_node(n);
+ ir_node *oldn;
+
+ /*
+ * Transform_node is the only "optimizing transformation" that might
+ * return a node with a different opcode. We iterate HERE until fixpoint
+ * to get the final result.
+ */
+ do {
+ oldn = n;
+ if (n->op->ops.transform_node)
+ n = n->op->ops.transform_node(n);
+ } while (oldn != n);
+
return n;
} /* transform_node */
CASE(Div);
CASE(Mod);
CASE(DivMod);
+ CASE(Quot);
CASE(Abs);
CASE(Cond);
CASE(And);
CASE(End);
CASE(Mux);
CASE(Psi);
+ CASE(Sync);
default:
/* leave NULL */;
}
/** Compares the attributes of two Proj nodes. */
static int node_cmp_attr_Proj(ir_node *a, ir_node *b) {
- return get_irn_proj_attr (a) != get_irn_proj_attr (b);
+ return get_irn_proj_attr(a) != get_irn_proj_attr(b);
} /* node_cmp_attr_Proj */
/** Compares the attributes of two Filter nodes. */
/** Compares the attributes of two Alloc nodes. */
static int node_cmp_attr_Alloc(ir_node *a, ir_node *b) {
- return (get_irn_alloc_attr(a).where != get_irn_alloc_attr(b).where)
- || (get_irn_alloc_attr(a).type != get_irn_alloc_attr(b).type);
+ const alloc_attr *pa = get_irn_alloc_attr(a);
+ const alloc_attr *pb = get_irn_alloc_attr(b);
+ return (pa->where != pb->where) || (pa->type != pb->type);
} /* node_cmp_attr_Alloc */
/** Compares the attributes of two Free nodes. */
static int node_cmp_attr_Free(ir_node *a, ir_node *b) {
- return (get_irn_free_attr(a).where != get_irn_free_attr(b).where)
- || (get_irn_free_attr(a).type != get_irn_free_attr(b).type);
+ const free_attr *pa = get_irn_free_attr(a);
+ const free_attr *pb = get_irn_free_attr(b);
+ return (pa->where != pb->where) || (pa->type != pb->type);
} /* node_cmp_attr_Free */
/** Compares the attributes of two SymConst nodes. */
static int node_cmp_attr_SymConst(ir_node *a, ir_node *b) {
- return (get_irn_symconst_attr(a).num != get_irn_symconst_attr(b).num)
- || (get_irn_symconst_attr(a).sym.type_p != get_irn_symconst_attr(b).sym.type_p)
- || (get_irn_symconst_attr(a).tp != get_irn_symconst_attr(b).tp);
+ const symconst_attr *pa = get_irn_symconst_attr(a);
+ const symconst_attr *pb = get_irn_symconst_attr(b);
+ return (pa->num != pb->num)
+ || (pa->sym.type_p != pb->sym.type_p)
+ || (pa->tp != pb->tp);
} /* node_cmp_attr_SymConst */
/** Compares the attributes of two Call nodes. */
/** Compares the attributes of two Sel nodes. */
static int node_cmp_attr_Sel(ir_node *a, ir_node *b) {
- return (get_irn_sel_attr(a).ent->kind != get_irn_sel_attr(b).ent->kind)
- || (get_irn_sel_attr(a).ent->name != get_irn_sel_attr(b).ent->name)
- || (get_irn_sel_attr(a).ent->owner != get_irn_sel_attr(b).ent->owner)
- || (get_irn_sel_attr(a).ent->ld_name != get_irn_sel_attr(b).ent->ld_name)
- || (get_irn_sel_attr(a).ent->type != get_irn_sel_attr(b).ent->type);
+ const ir_entity *a_ent = get_Sel_entity(a);
+ const ir_entity *b_ent = get_Sel_entity(b);
+ return
+ (a_ent->kind != b_ent->kind) ||
+ (a_ent->name != b_ent->name) ||
+ (a_ent->owner != b_ent->owner) ||
+ (a_ent->ld_name != b_ent->ld_name) ||
+ (a_ent->type != b_ent->type);
} /* node_cmp_attr_Sel */
/** Compares the attributes of two Phi nodes. */
get_Load_volatility(b) == volatility_is_volatile)
/* NEVER do CSE on volatile Loads */
return 1;
+ /* do not CSE Loads with different alignment. Be conservative. */
+ if (get_Load_align(a) != get_Load_align(b))
+ return 1;
return get_Load_mode(a) != get_Load_mode(b);
} /* node_cmp_attr_Load */
/** Compares the attributes of two Store nodes. */
static int node_cmp_attr_Store(ir_node *a, ir_node *b) {
+ /* do not CSE Stores with different alignment. Be conservative. */
+ if (get_Store_align(a) != get_Store_align(b))
+ return 1;
+
/* NEVER do CSE on volatile Stores */
return (get_Store_volatility(a) == volatility_is_volatile ||
get_Store_volatility(b) == volatility_is_volatile);
/** Compares the attributes of two ASM nodes. */
static int node_cmp_attr_ASM(ir_node *a, ir_node *b) {
int i, n;
- ir_asm_constraint *ca, *cb;
+ const ir_asm_constraint *ca;
+ const ir_asm_constraint *cb;
ident **cla, **clb;
- if (get_ASM_text(a) != get_ASM_text(b));
+ if (get_ASM_text(a) != get_ASM_text(b))
return 1;
/* Should we really check the constraints here? Should be better, but is strange. */
del_pset(value_table);
} /* del_identities */
+/**
+ * Normalize a node by putting constants (and operands with smaller
+ * node index) on the right
+ *
+ * @param n The node to normalize
+ */
+static void normalize_node(ir_node *n) {
+ if (get_opt_reassociation()) {
+ if (is_op_commutative(get_irn_op(n))) {
+ ir_node *l = get_binop_left(n);
+ ir_node *r = get_binop_right(n);
+ int l_idx = get_irn_idx(l);
+ int r_idx = get_irn_idx(r);
+
+ /* For commutative operators perform a OP b == b OP a but keep
+ constants on the RIGHT side. This helps greatly in some optimizations.
+ Moreover we use the idx number to make the form deterministic. */
+ if (is_irn_constlike(l))
+ l_idx = -l_idx;
+ if (is_irn_constlike(r))
+ r_idx = -r_idx;
+ if (l_idx < r_idx) {
+ set_binop_left(n, r);
+ set_binop_right(n, l);
+ }
+ }
+ }
+} /* normalize_node */
+
/**
* Return the canonical node computing the same value as n.
*
if (!value_table) return n;
- if (get_opt_reassociation()) {
- if (is_op_commutative(get_irn_op(n))) {
- ir_node *l = get_binop_left(n);
- ir_node *r = get_binop_right(n);
-
- /* for commutative operators perform a OP b == b OP a */
- if (get_irn_idx(l) > get_irn_idx(r)) {
- set_binop_left(n, r);
- set_binop_right(n, l);
- }
- }
- }
+ normalize_node(n);
o = pset_find(value_table, n, ir_node_hash(n));
if (!o) return n;
if (!value_table) return n;
- if (get_opt_reassociation()) {
- if (is_op_commutative(get_irn_op(n))) {
- ir_node *l = get_binop_left(n);
- ir_node *r = get_binop_right(n);
- int l_idx = get_irn_idx(l);
- int r_idx = get_irn_idx(r);
-
- /* For commutative operators perform a OP b == b OP a but keep
- constants on the RIGHT side. This helps greatly in some optimizations.
- Moreover we use the idx number to make the form deterministic. */
- if (is_irn_constlike(l))
- l_idx = -l_idx;
- if (is_irn_constlike(r))
- r_idx = -r_idx;
- if (l_idx < r_idx) {
- set_binop_left(n, r);
- set_binop_right(n, l);
- }
- }
- }
-
+ normalize_node(n);
/* lookup or insert in hash table with given hash key. */
o = pset_insert(value_table, n, ir_node_hash(n));
* Garbage in, garbage out. If a node has a dead input, i.e., the
* Bad node is input to the node, return the Bad node.
*/
-static INLINE ir_node *gigo(ir_node *node) {
+static ir_node *gigo(ir_node *node) {
int i, irn_arity;
ir_op *op = get_irn_op(node);
if (get_opt_constant_folding()) {
/* neither constants nor Tuple values can be evaluated */
if (iro != iro_Const && (get_irn_mode(n) != mode_T)) {
+ unsigned fp_model = get_irg_fp_model(current_ir_graph);
+ int old_fp_mode = tarval_enable_fp_ops((fp_model & fp_strict_algebraic) == 0);
/* try to evaluate */
tv = computed_value(n);
if (tv != tarval_bad) {
/* evaluation was successful -- replace the node. */
irg_kill_node(current_ir_graph, n);
- nw = new_Const(get_tarval_mode (tv), tv);
+ nw = new_Const(get_tarval_mode(tv), tv);
- if (old_tp && get_type_mode(old_tp) == get_tarval_mode (tv))
+ if (old_tp && get_type_mode(old_tp) == get_tarval_mode(tv))
set_Const_type(nw, old_tp);
DBG_OPT_CSTEVAL(oldn, nw);
+ tarval_enable_fp_ops(old_fp_mode);
return nw;
}
+ tarval_enable_fp_ops(old_fp_mode);
}
}
(iro == iro_Block) ) /* Flags tested local. */
n = equivalent_node(n);
- optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
-
/* Common Subexpression Elimination.
*
* Checks whether n is already available.
ir_node *oldn = n;
ir_opcode iro = get_irn_opcode(n);
- if (!get_opt_optimize() && (get_irn_op(n) != op_Phi)) return n;
+ if (!get_opt_optimize() && !is_Phi(n)) return n;
/* constant expression evaluation / constant folding */
if (get_opt_constant_folding()) {
/* neither constants nor Tuple values can be evaluated */
if (iro != iro_Const && get_irn_mode(n) != mode_T) {
+ unsigned fp_model = get_irg_fp_model(current_ir_graph);
+ int old_fp_mode = tarval_enable_fp_ops((fp_model & fp_strict_algebraic) == 0);
/* try to evaluate */
tv = computed_value(n);
if (tv != tarval_bad) {
set_Const_type(n, old_tp);
DBG_OPT_CSTEVAL(oldn, n);
+ tarval_enable_fp_ops(old_fp_mode);
return n;
}
+ tarval_enable_fp_ops(old_fp_mode);
}
}
(iro == iro_Block) ) /* Flags tested local. */
n = equivalent_node(n);
- optimize_preds(n); /* do node specific optimizations of nodes predecessors. */
-
/** common subexpression elimination **/
/* Checks whether n is already available. */
/* The block input is used to distinguish different subexpressions. Right