* @author Christian Schaefer, Goetz Lindenmaier, Michael Beck
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
#include <string.h>
#include "irhooks.h"
#include "irarch.h"
#include "hashptr.h"
-#include "archop.h"
#include "opt_confirms.h"
#include "opt_polymorphy.h"
#include "irtools.h"
-#include "xmalloc.h"
+#include "irhooks.h"
+#include "array_t.h"
/* Make types visible to allow most efficient access */
#include "entity_t.h"
tarval *ta;
tarval *tb;
- /* a - a */
- if (a == b && !is_Bad(a))
- return get_mode_null(mode);
+ /* NaN - NaN != 0 */
+ if (! mode_is_float(mode)) {
+ /* a - a = 0 */
+ if (a == b)
+ return get_mode_null(mode);
+ }
ta = value_of(a);
tb = value_of(b);
if (ta != tarval_bad && tb != tarval_bad) {
return tarval_mul(ta, tb);
} else {
- /* a*0 = 0 or 0*b = 0 */
- if (ta == get_mode_null(mode))
- return ta;
- if (tb == get_mode_null(mode))
- return tb;
+ /* a * 0 != 0 if a == NaN or a == Inf */
+ if (!mode_is_float(mode)) {
+ /* a*0 = 0 or 0*b = 0 */
+ if (ta == get_mode_null(mode))
+ return ta;
+ if (tb == get_mode_null(mode))
+ return tb;
+ }
}
return tarval_bad;
} /* computed_value_Mul */
static tarval *computed_value_Confirm(const ir_node *n) {
/*
* Beware: we might produce Phi(Confirm(x == true), Confirm(x == false)).
- * Do NOT optimize them away (CondEval wants them), so wait until
+ * Do NOT optimize them away (jump threading wants them), so wait until
* remove_confirm is activated.
*/
if (get_opt_remove_confirm()) {
ir_node *oldn = n;
int n_preds;
- /* don't optimize dead blocks */
- if (is_Block_dead(n))
+ /* don't optimize dead or labeled blocks */
+ if (is_Block_dead(n) || has_Block_entity(n))
return n;
n_preds = get_Block_n_cfgpreds(n);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_AND);
return n;
}
- /* constants are cormalized to right, check this site first */
+ /* constants are normalized to right, check this site first */
tv = value_of(b);
if (tarval_is_all_one(tv)) {
n = a;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_AND);
return n;
}
+ if (tv != get_tarval_bad()) {
+ ir_mode *mode = get_irn_mode(n);
+ if (!mode_is_signed(mode) && is_Conv(a)) {
+ ir_node *convop = get_Conv_op(a);
+ ir_mode *convopmode = get_irn_mode(convop);
+ if (!mode_is_signed(convopmode)) {
+ if (tarval_is_all_one(tarval_convert_to(tv, convopmode))) {
+ /* Conv(X) & all_one(mode(X)) = Conv(X) */
+ n = a;
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_AND);
+ return n;
+ }
+ }
+ }
+ }
tv = value_of(a);
if (tarval_is_all_one(tv)) {
n = b;
restart:
if (n_mode == a_mode) { /* No Conv necessary */
if (get_Conv_strict(n)) {
- /* special case: the predecessor might be a also a Conv */
+ ir_node *p = a;
+
+ /* neither Minus nor Abs nor Confirm change the precision,
+ so we can "look-through" */
+ for (;;) {
+ if (is_Minus(p)) {
+ p = get_Minus_op(p);
+ } else if (is_Abs(p)) {
+ p = get_Abs_op(p);
+ } else if (is_Confirm(p)) {
+ p = get_Confirm_value(p);
+ } else {
+ /* stop here */
+ break;
+ }
+ }
+ if (is_Conv(p) && get_Conv_strict(p)) {
+ /* we known already, that a_mode == n_mode, and neither
+ Abs nor Minus change the mode, so the second Conv
+ can be kicked */
+ assert(get_irn_mode(p) == n_mode);
+ n = a;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
+ return n;
+ }
+ if (is_Proj(p)) {
+ ir_node *pred = get_Proj_pred(p);
+ if (is_Load(pred)) {
+ /* Loads always return with the exact precision of n_mode */
+ assert(get_Load_mode(pred) == n_mode);
+ n = a;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
+ return n;
+ }
+ if (is_Proj(pred) && get_Proj_proj(pred) == pn_Start_T_args) {
+ pred = get_Proj_pred(pred);
+ if (is_Start(pred)) {
+ /* Arguments always return with the exact precision,
+ as strictConv's are place before Call -- if the
+ caller was compiled with the same setting.
+ Otherwise, the semantics is probably still right. */
+ assert(get_irn_mode(p) == n_mode);
+ n = a;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
+ return n;
+ }
+ }
+ }
if (is_Conv(a)) {
+ /* special case: the immediate predecessor is also a Conv */
if (! get_Conv_strict(a)) {
/* first one is not strict, kick it */
a = get_Conv_op(a);
goto restart;
}
/* else both are strict conv, second is superfluous */
- } else {
- if (is_Proj(a)) {
- ir_node *pred = get_Proj_pred(a);
- if (is_Load(pred)) {
- /* loads always return with the exact precision of n_mode */
- assert(get_Load_mode(pred) == n_mode);
- return a;
- }
- }
- /* leave strict floating point Conv's */
+ n = a;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
return n;
}
+ } else {
+ n = a;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
+ return n;
}
- n = a;
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
} else if (is_Conv(a)) { /* Conv(Conv(b)) */
ir_node *b = get_Conv_op(a);
ir_mode *b_mode = get_irn_mode(b);
if (n_mode == mode_b) {
n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
+ return n;
} else if (get_mode_arithmetic(n_mode) == get_mode_arithmetic(a_mode)) {
- if (smaller_mode(b_mode, a_mode)) {
+ if (values_in_mode(b_mode, a_mode)) {
n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
+ return n;
}
}
}
if (mode_is_int(n_mode) && get_mode_arithmetic(a_mode) == irma_ieee754) {
/* ConvI(ConvF(I)) -> I, iff float mantissa >= int mode */
- size_t int_mantissa = get_mode_size_bits(n_mode) - (mode_is_signed(n_mode) ? 1 : 0);
- size_t float_mantissa = tarval_ieee754_get_mantissa_size(a_mode);
+ unsigned int_mantissa = get_mode_size_bits(n_mode) - (mode_is_signed(n_mode) ? 1 : 0);
+ unsigned float_mantissa = tarval_ieee754_get_mantissa_size(a_mode);
if (float_mantissa >= int_mantissa) {
n = b;
set_Conv_strict(b, 1);
n = b; /* ConvA(ConvB(ConvA(...))) == ConvA(...) */
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
+ return n;
}
}
}
static ir_node *equivalent_node_Mux(ir_node *n)
{
ir_node *oldn = n, *sel = get_Mux_sel(n);
+ ir_node *n_t, *n_f;
tarval *ts = value_of(sel);
/* Mux(true, f, t) == t */
if (ts == tarval_b_true) {
n = get_Mux_true(n);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_C);
+ return n;
}
/* Mux(false, f, t) == f */
- else if (ts == tarval_b_false) {
+ if (ts == tarval_b_false) {
n = get_Mux_false(n);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_C);
+ return n;
+ }
+ n_t = get_Mux_true(n);
+ n_f = get_Mux_false(n);
+
+ /* Mux(v, x, T) == x */
+ if (is_Unknown(n_f)) {
+ n = n_t;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_EQ);
+ return n;
}
+ /* Mux(v, T, x) == x */
+ if (is_Unknown(n_t)) {
+ n = n_f;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_EQ);
+ return n;
+ }
+
/* Mux(v, x, x) == x */
- else if (get_Mux_false(n) == get_Mux_true(n)) {
- n = get_Mux_true(n);
+ if (n_t == n_f) {
+ n = n_t;
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_EQ);
+ return n;
}
- else if (is_Proj(sel) && !mode_honor_signed_zeros(get_irn_mode(n))) {
+ if (is_Proj(sel) && !mode_honor_signed_zeros(get_irn_mode(n))) {
ir_node *cmp = get_Proj_pred(sel);
long proj_nr = get_Proj_proj(sel);
ir_node *f = get_Mux_false(n);
if (! is_Phi(n) || get_irn_arity(n) == 0)
return 0;
- for (i = get_irn_arity(n) - 1; i >= 0; --i)
+ for (i = get_irn_arity(n) - 1; i >= 0; --i) {
if (! is_Const(get_irn_n(n, i)))
return 0;
- return 1;
+ }
+ return 1;
} /* is_const_Phi */
typedef tarval *(*tarval_sub_type)(tarval *a, tarval *b, ir_mode *mode);
irg = current_ir_graph;
for (i = 0; i < n; ++i) {
pred = get_irn_n(phi, i);
- res[i] = new_r_Const_type(irg, get_irg_start_block(irg),
- mode, res[i], get_Const_type(pred));
+ res[i] = new_r_Const_type(irg, res[i], get_Const_type(pred));
}
- return new_r_Phi(irg, get_nodes_block(phi), n, (ir_node **)res, mode);
+ return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
} /* apply_binop_on_phi */
/**
irg = current_ir_graph;
for (i = 0; i < n; ++i) {
pred = get_irn_n(a, i);
- res[i] = new_r_Const_type(irg, get_irg_start_block(irg), mode, res[i], get_Const_type(pred));
+ res[i] = new_r_Const_type(irg, res[i], get_Const_type(pred));
}
- return new_r_Phi(irg, get_nodes_block(a), n, (ir_node **)res, mode);
+ return new_r_Phi(get_nodes_block(a), n, (ir_node **)res, mode);
} /* apply_binop_on_2_phis */
/**
irg = current_ir_graph;
for (i = 0; i < n; ++i) {
pred = get_irn_n(phi, i);
- res[i] = new_r_Const_type(irg, get_irg_start_block(irg),
- mode, res[i], get_Const_type(pred));
+ res[i] = new_r_Const_type(irg, res[i], get_Const_type(pred));
}
- return new_r_Phi(irg, get_nodes_block(phi), n, (ir_node **)res, mode);
+ return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
} /* apply_unop_on_phi */
/**
irg = current_ir_graph;
for (i = 0; i < n; ++i) {
pred = get_irn_n(phi, i);
- res[i] = new_r_Const_type(irg, get_irg_start_block(irg),
- mode, res[i], get_Const_type(pred));
+ res[i] = new_r_Const_type(irg, res[i], get_Const_type(pred));
}
- return new_r_Phi(irg, get_nodes_block(phi), n, (ir_node **)res, mode);
+ return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
} /* apply_conv_on_phi */
/**
/* convert a AddP(P, *s) into AddP(P, *u) */
ir_mode *nm = get_reference_mode_unsigned_eq(mode);
- ir_node *pre = new_r_Conv(current_ir_graph, get_nodes_block(n), right, nm);
+ ir_node *pre = new_r_Conv(get_nodes_block(n), right, nm);
set_binop_right(n, pre);
}
}
if (is_Const(b) && is_Const_null(b) && mode_is_int(lmode)) {
/* an Add(a, NULL) is a hidden Conv */
dbg_info *dbg = get_irn_dbg_info(n);
- return new_rd_Conv(dbg, current_ir_graph, get_nodes_block(n), a, mode);
+ return new_rd_Conv(dbg, get_nodes_block(n), a, mode);
}
}
if (mode_is_num(mode)) {
/* the following code leads to endless recursion when Mul are replaced by a simple instruction chain */
- if (!is_arch_dep_running() && a == b && mode_is_int(mode)) {
+ if (!is_irg_state(current_ir_graph, IR_GRAPH_STATE_ARCH_DEP)
+ && a == b && mode_is_int(mode)) {
ir_node *block = get_nodes_block(n);
n = new_rd_Mul(
get_irn_dbg_info(n),
- current_ir_graph,
block,
a,
- new_r_Const_long(current_ir_graph, block, mode, 2),
+ new_Const_long(mode, 2),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_A);
return n;
if (is_Minus(a)) {
n = new_rd_Sub(
get_irn_dbg_info(n),
- current_ir_graph,
get_nodes_block(n),
b,
get_Minus_op(a),
if (is_Minus(b)) {
n = new_rd_Sub(
get_irn_dbg_info(n),
- current_ir_graph,
get_nodes_block(n),
a,
get_Minus_op(b),
if (is_Const(b) && is_Const_one(b)) {
/* ~x + 1 = -x */
ir_node *blk = get_nodes_block(n);
- n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, op, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n), blk, op, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_PLUS_1);
return n;
}
if (op == b) {
/* ~x + x = -1 */
- ir_node *blk = get_nodes_block(n);
- n = new_r_Const(current_ir_graph, blk, mode, get_mode_minus_one(mode));
+ n = new_Const(get_mode_minus_one(mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_X_NOT_X);
return n;
}
if (op == a) {
/* x + ~x = -1 */
- ir_node *blk = get_nodes_block(n);
- n = new_r_Const(current_ir_graph, blk, mode, get_mode_minus_one(mode));
+ n = new_Const(get_mode_minus_one(mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_X_NOT_X);
return n;
}
tarval *tv = tarval_neg(get_Const_tarval(cnst));
dbg_info *dbgi = get_irn_dbg_info(cnst);
ir_graph *irg = get_irn_irg(cnst);
- ir_node *block = get_nodes_block(cnst);
- ir_mode *mode = get_irn_mode(cnst);
if (tv == tarval_bad) return NULL;
- return new_rd_Const(dbgi, irg, block, mode, tv);
+ return new_rd_Const(dbgi, irg, tv);
}
/**
if (is_Const(b) && is_Const_null(b) && mode_is_reference(lmode)) {
/* a Sub(a, NULL) is a hidden Conv */
dbg_info *dbg = get_irn_dbg_info(n);
- n = new_rd_Conv(dbg, current_ir_graph, get_nodes_block(n), a, mode);
+ n = new_rd_Conv(dbg, get_nodes_block(n), a, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_CONV);
return n;
}
get_Const_tarval(a) == get_mode_minus_one(mode)) {
/* -1 - x -> ~x */
dbg_info *dbg = get_irn_dbg_info(n);
- n = new_rd_Not(dbg, current_ir_graph, get_nodes_block(n), b, mode);
+ n = new_rd_Not(dbg, get_nodes_block(n), b, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_NOT);
return n;
}
if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
return n;
- if (is_Const(b) && get_irn_mode(b) != mode_P) {
+ if (is_Const(b) && !mode_is_reference(get_irn_mode(b))) {
/* a - C -> a + (-C) */
ir_node *cnst = const_negate(b);
if (cnst != NULL) {
ir_node *block = get_nodes_block(n);
dbg_info *dbgi = get_irn_dbg_info(n);
- ir_graph *irg = get_irn_irg(n);
- n = new_rd_Add(dbgi, irg, block, a, cnst, mode);
+ n = new_rd_Add(dbgi, block, a, cnst, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
return n;
}
}
if (is_Minus(a)) { /* (-a) - b -> -(a + b) */
- ir_graph *irg = current_ir_graph;
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
ir_node *left = get_Minus_op(a);
- ir_node *add = new_rd_Add(dbg, irg, block, left, b, mode);
+ ir_node *add = new_rd_Add(dbg, block, left, b, mode);
- n = new_rd_Minus(dbg, irg, block, add, mode);
+ n = new_rd_Minus(dbg, block, add, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
return n;
} else if (is_Minus(b)) { /* a - (-b) -> a + b */
- ir_graph *irg = current_ir_graph;
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
ir_node *right = get_Minus_op(b);
- n = new_rd_Add(dbg, irg, block, a, right, mode);
+ n = new_rd_Add(dbg, block, a, right, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MINUS);
return n;
} else if (is_Sub(b)) {
/* a - (b - c) -> a + (c - b)
* -> (a - b) + c iff (b - c) is a pointer */
- ir_graph *irg = current_ir_graph;
dbg_info *s_dbg = get_irn_dbg_info(b);
ir_node *s_block = get_nodes_block(b);
ir_node *s_left = get_Sub_left(b);
ir_node *s_right = get_Sub_right(b);
ir_mode *s_mode = get_irn_mode(b);
- if (s_mode == mode_P) {
- ir_node *sub = new_rd_Sub(s_dbg, irg, s_block, a, s_left, mode);
+ if (mode_is_reference(s_mode)) {
+ ir_node *sub = new_rd_Sub(s_dbg, s_block, a, s_left, mode);
dbg_info *a_dbg = get_irn_dbg_info(n);
ir_node *a_block = get_nodes_block(n);
if (s_mode != mode)
- s_right = new_r_Conv(irg, a_block, s_right, mode);
- n = new_rd_Add(a_dbg, irg, a_block, sub, s_right, mode);
+ s_right = new_r_Conv(a_block, s_right, mode);
+ n = new_rd_Add(a_dbg, a_block, sub, s_right, mode);
} else {
- ir_node *sub = new_rd_Sub(s_dbg, irg, s_block, s_right, s_left, s_mode);
+ ir_node *sub = new_rd_Sub(s_dbg, s_block, s_right, s_left, s_mode);
dbg_info *a_dbg = get_irn_dbg_info(n);
ir_node *a_block = get_nodes_block(n);
- n = new_rd_Add(a_dbg, irg, a_block, a, sub, mode);
+ n = new_rd_Add(a_dbg, a_block, a, sub, mode);
}
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
return n;
if (is_Const(m_right)) {
ir_node *cnst2 = const_negate(m_right);
if (cnst2 != NULL) {
- ir_graph *irg = current_ir_graph;
dbg_info *m_dbg = get_irn_dbg_info(b);
ir_node *m_block = get_nodes_block(b);
ir_node *m_left = get_Mul_left(b);
ir_mode *m_mode = get_irn_mode(b);
- ir_node *mul = new_rd_Mul(m_dbg, irg, m_block, m_left, cnst2, m_mode);
+ ir_node *mul = new_rd_Mul(m_dbg, m_block, m_left, cnst2, m_mode);
dbg_info *a_dbg = get_irn_dbg_info(n);
ir_node *a_block = get_nodes_block(n);
- n = new_rd_Add(a_dbg, irg, a_block, a, mul, mode);
+ n = new_rd_Add(a_dbg, a_block, a, mul, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
return n;
}
if (mode_is_num(mode) && mode == get_irn_mode(a) && is_Const(a) && is_Const_null(a)) {
n = new_rd_Minus(
get_irn_dbg_info(n),
- current_ir_graph,
get_nodes_block(n),
b,
mode);
if (left == b) {
if (mode != get_irn_mode(right)) {
/* This Sub is an effective Cast */
- right = new_r_Conv(get_irn_irg(n), get_nodes_block(n), right, mode);
+ right = new_r_Conv(get_nodes_block(n), right, mode);
}
n = right;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
} else if (right == b) {
if (mode != get_irn_mode(left)) {
/* This Sub is an effective Cast */
- left = new_r_Conv(get_irn_irg(n), get_nodes_block(n), left, mode);
+ left = new_r_Conv(get_nodes_block(n), left, mode);
}
n = left;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
if (left == a) {
ir_mode *r_mode = get_irn_mode(right);
- n = new_r_Minus(get_irn_irg(n), get_nodes_block(n), right, r_mode);
+ n = new_r_Minus(get_nodes_block(n), right, r_mode);
if (mode != r_mode) {
/* This Sub is an effective Cast */
- n = new_r_Conv(get_irn_irg(n), get_nodes_block(n), n, mode);
+ n = new_r_Conv(get_nodes_block(n), n, mode);
}
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
return n;
} else if (right == a) {
ir_mode *l_mode = get_irn_mode(left);
- n = new_r_Minus(get_irn_irg(n), get_nodes_block(n), left, l_mode);
+ n = new_r_Minus(get_nodes_block(n), left, l_mode);
if (mode != l_mode) {
/* This Sub is an effective Cast */
- n = new_r_Conv(get_irn_irg(n), get_nodes_block(n), n, mode);
+ n = new_r_Conv(get_nodes_block(n), n, mode);
}
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
return n;
ir_node *blk = get_nodes_block(n);
n = new_rd_Mul(
get_irn_dbg_info(n),
- current_ir_graph, blk,
+ blk,
ma,
new_rd_Sub(
get_irn_dbg_info(n),
- current_ir_graph, blk,
+ blk,
mb,
- new_r_Const_long(current_ir_graph, blk, mode, 1),
+ new_Const_long(mode, 1),
mode),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A);
ir_node *blk = get_nodes_block(n);
n = new_rd_Mul(
get_irn_dbg_info(n),
- current_ir_graph, blk,
+ blk,
mb,
new_rd_Sub(
get_irn_dbg_info(n),
- current_ir_graph, blk,
+ blk,
ma,
- new_r_Const_long(current_ir_graph, blk, mode, 1),
+ new_Const_long(mode, 1),
mode),
mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_MUL_A_X_A);
}
}
if (is_Sub(a)) { /* (x - y) - b -> x - (y + b) */
- ir_node *x = get_Sub_left(a);
+ ir_node *x = get_Sub_left(a);
ir_node *y = get_Sub_right(a);
ir_node *blk = get_nodes_block(n);
ir_mode *m_b = get_irn_mode(b);
return n;
}
- add = new_r_Add(current_ir_graph, blk, y, b, add_mode);
+ add = new_r_Add(blk, y, b, add_mode);
- n = new_rd_Sub(get_irn_dbg_info(n), current_ir_graph, blk, x, add, mode);
+ n = new_rd_Sub(get_irn_dbg_info(n), blk, x, add, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_SUB_X_Y_Z);
return n;
}
tv = tarval_add(tv, get_mode_one(mode));
if (tv != tarval_bad) {
ir_node *blk = get_nodes_block(n);
- ir_node *c = new_r_Const(current_ir_graph, blk, mode, tv);
- n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, blk, get_Not_op(b), c, mode);
+ ir_node *c = new_Const(tv);
+ n = new_rd_Add(get_irn_dbg_info(n), blk, get_Not_op(b), c, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_C_NOT_X);
return n;
}
if (ta == get_mode_one(smode)) {
/* (L)1 * (L)b = (L)b */
ir_node *blk = get_nodes_block(n);
- n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, b, mode);
+ n = new_rd_Conv(get_irn_dbg_info(n), blk, b, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
return n;
}
else if (ta == get_mode_minus_one(smode)) {
/* (L)-1 * (L)b = (L)b */
ir_node *blk = get_nodes_block(n);
- n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, b, smode);
- n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, n, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n), blk, b, smode);
+ n = new_rd_Conv(get_irn_dbg_info(n), blk, n, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
return n;
}
if (tb == get_mode_one(smode)) {
/* (L)a * (L)1 = (L)a */
ir_node *blk = get_irn_n(a, -1);
- n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, a, mode);
+ n = new_rd_Conv(get_irn_dbg_info(n), blk, a, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_1);
return n;
}
else if (tb == get_mode_minus_one(smode)) {
/* (L)a * (L)-1 = (L)-a */
ir_node *blk = get_nodes_block(n);
- n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, a, smode);
- n = new_rd_Conv(get_irn_dbg_info(n), current_ir_graph, blk, n, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n), blk, a, smode);
+ n = new_rd_Conv(get_irn_dbg_info(n), blk, n, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
return n;
}
else if (value_of(b) == get_mode_minus_one(mode))
r = a;
if (r) {
- n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), r, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n), get_nodes_block(n), r, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
return n;
}
if (cnst != NULL) {
dbg_info *dbgi = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- n = new_rd_Mul(dbgi, current_ir_graph, block, get_Minus_op(a), cnst, mode);
+ n = new_rd_Mul(dbgi, block, get_Minus_op(a), cnst, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
return n;
}
} else if (is_Minus(b)) { /* (-a) * (-b) -> a * b */
dbg_info *dbgi = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- n = new_rd_Mul(dbgi, current_ir_graph, block, get_Minus_op(a), get_Minus_op(b), mode);
+ n = new_rd_Mul(dbgi, block, get_Minus_op(a), get_Minus_op(b), mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_MINUS);
return n;
} else if (is_Sub(b)) { /* (-a) * (b - c) -> a * (c - b) */
ir_node *sub_l = get_Sub_left(b);
ir_node *sub_r = get_Sub_right(b);
dbg_info *dbgi = get_irn_dbg_info(n);
- ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(n);
- ir_node *new_b = new_rd_Sub(dbgi, irg, block, sub_r, sub_l, mode);
- n = new_rd_Mul(dbgi, irg, block, get_Minus_op(a), new_b, mode);
+ ir_node *new_b = new_rd_Sub(dbgi, block, sub_r, sub_l, mode);
+ n = new_rd_Mul(dbgi, block, get_Minus_op(a), new_b, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS);
return n;
}
ir_node *sub_l = get_Sub_left(a);
ir_node *sub_r = get_Sub_right(a);
dbg_info *dbgi = get_irn_dbg_info(n);
- ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(n);
- ir_node *new_a = new_rd_Sub(dbgi, irg, block, sub_r, sub_l, mode);
- n = new_rd_Mul(dbgi, irg, block, new_a, get_Minus_op(b), mode);
+ ir_node *new_a = new_rd_Sub(dbgi, block, sub_r, sub_l, mode);
+ n = new_rd_Mul(dbgi, block, new_a, get_Minus_op(b), mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS);
return n;
}
+ } else if (is_Shl(a)) {
+ ir_node *const shl_l = get_Shl_left(a);
+ if (is_Const(shl_l) && is_Const_one(shl_l)) {
+ /* (1 << x) * b -> b << x */
+ dbg_info *const dbgi = get_irn_dbg_info(n);
+ ir_node *const block = get_nodes_block(n);
+ ir_node *const shl_r = get_Shl_right(a);
+ n = new_rd_Shl(dbgi, block, b, shl_r, mode);
+ // TODO add me DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_SHIFT);
+ return n;
+ }
+ } else if (is_Shl(b)) {
+ ir_node *const shl_l = get_Shl_left(b);
+ if (is_Const(shl_l) && is_Const_one(shl_l)) {
+ /* a * (1 << x) -> a << x */
+ dbg_info *const dbgi = get_irn_dbg_info(n);
+ ir_node *const block = get_nodes_block(n);
+ ir_node *const shl_r = get_Shl_right(b);
+ n = new_rd_Shl(dbgi, block, a, shl_r, mode);
+ // TODO add me DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_SHIFT);
+ return n;
+ }
}
if (get_mode_arithmetic(mode) == irma_ieee754) {
if (is_Const(a)) {
if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)
&& !tarval_is_negative(tv)) {
/* 2.0 * b = b + b */
- n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), b, b, mode);
+ n = new_rd_Add(get_irn_dbg_info(n), get_nodes_block(n), b, b, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
return n;
}
if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)
&& !tarval_is_negative(tv)) {
/* a * 2.0 = a + a */
- n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), a, a, mode);
+ n = new_rd_Add(get_irn_dbg_info(n), get_nodes_block(n), a, a, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
return n;
}
ir_node *a = get_Div_left(n);
ir_node *b = get_Div_right(n);
ir_node *value;
- tarval *tv;
+ const ir_node *dummy;
if (is_Const(b) && is_const_Phi(a)) {
/* check for Div(Phi, Const) */
}
value = n;
- tv = value_of(n);
- if (tv != tarval_bad) {
- value = new_Const(get_tarval_mode(tv), tv);
+ if (a == b && value_not_zero(a, &dummy)) {
+ /* BEWARE: we can optimize a/a to 1 only if this cannot cause a exception */
+ value = new_Const(get_mode_one(mode));
DBG_OPT_CSTEVAL(n, value);
goto make_tuple;
} else {
- ir_node *a = get_Div_left(n);
- ir_node *b = get_Div_right(n);
- const ir_node *dummy;
-
- if (a == b && value_not_zero(a, &dummy)) {
- /* BEWARE: we can optimize a/a to 1 only if this cannot cause a exception */
- value = new_Const(mode, get_mode_one(mode));
- DBG_OPT_CSTEVAL(n, value);
- goto make_tuple;
- } else {
- if (mode_is_signed(mode) && is_Const(b)) {
- tarval *tv = get_Const_tarval(b);
+ if (mode_is_signed(mode) && is_Const(b)) {
+ tarval *tv = get_Const_tarval(b);
- if (tv == get_mode_minus_one(mode)) {
- /* a / -1 */
- value = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), a, mode);
- DBG_OPT_CSTEVAL(n, value);
- goto make_tuple;
- }
+ if (tv == get_mode_minus_one(mode)) {
+ /* a / -1 */
+ value = new_rd_Minus(get_irn_dbg_info(n), get_nodes_block(n), a, mode);
+ DBG_OPT_CSTEVAL(n, value);
+ goto make_tuple;
}
- /* Try architecture dependent optimization */
- value = arch_dep_replace_div_by_const(n);
}
+ /* Try architecture dependent optimization */
+ value = arch_dep_replace_div_by_const(n);
}
if (value != n) {
mem = skip_Pin(mem);
turn_into_tuple(n, pn_Div_max);
set_Tuple_pred(n, pn_Div_M, mem);
- set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(blk));
set_Tuple_pred(n, pn_Div_X_except, new_Bad());
set_Tuple_pred(n, pn_Div_res, value);
}
value = n;
tv = value_of(n);
if (tv != tarval_bad) {
- value = new_Const(get_tarval_mode(tv), tv);
+ value = new_Const(tv);
DBG_OPT_CSTEVAL(n, value);
goto make_tuple;
if (a == b && value_not_zero(a, &dummy)) {
/* BEWARE: we can optimize a%a to 0 only if this cannot cause a exception */
- value = new_Const(mode, get_mode_null(mode));
+ value = new_Const(get_mode_null(mode));
DBG_OPT_CSTEVAL(n, value);
goto make_tuple;
} else {
if (tv == get_mode_minus_one(mode)) {
/* a % -1 = 0 */
- value = new_Const(mode, get_mode_null(mode));
+ value = new_Const(get_mode_null(mode));
DBG_OPT_CSTEVAL(n, value);
goto make_tuple;
}
mem = skip_Pin(mem);
turn_into_tuple(n, pn_Mod_max);
set_Tuple_pred(n, pn_Mod_M, mem);
- set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(blk));
set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
set_Tuple_pred(n, pn_Mod_res, value);
}
if (tb != tarval_bad) {
if (tb == get_mode_one(get_tarval_mode(tb))) {
va = a;
- vb = new_Const(mode, get_mode_null(mode));
+ vb = new_Const(get_mode_null(mode));
DBG_OPT_CSTEVAL(n, vb);
goto make_tuple;
} else if (ta != tarval_bad) {
Jmp for X result!? */
resb = tarval_mod(ta, tb);
if (resb == tarval_bad) return n; /* Causes exception! */
- va = new_Const(mode, resa);
- vb = new_Const(mode, resb);
+ va = new_Const(resa);
+ vb = new_Const(resb);
DBG_OPT_CSTEVAL(n, va);
DBG_OPT_CSTEVAL(n, vb);
goto make_tuple;
} else if (mode_is_signed(mode) && tb == get_mode_minus_one(mode)) {
- va = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), a, mode);
- vb = new_Const(mode, get_mode_null(mode));
+ va = new_rd_Minus(get_irn_dbg_info(n), get_nodes_block(n), a, mode);
+ vb = new_Const(get_mode_null(mode));
DBG_OPT_CSTEVAL(n, va);
DBG_OPT_CSTEVAL(n, vb);
goto make_tuple;
} else if (a == b) {
if (value_not_zero(a, &dummy)) {
/* a/a && a != 0 */
- va = new_Const(mode, get_mode_one(mode));
- vb = new_Const(mode, get_mode_null(mode));
+ va = new_Const(get_mode_one(mode));
+ vb = new_Const(get_mode_null(mode));
DBG_OPT_CSTEVAL(n, va);
DBG_OPT_CSTEVAL(n, vb);
goto make_tuple;
blk = get_nodes_block(n);
turn_into_tuple(n, pn_DivMod_max);
set_Tuple_pred(n, pn_DivMod_M, mem);
- set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk));
+ set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(blk));
set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
set_Tuple_pred(n, pn_DivMod_res_div, va);
set_Tuple_pred(n, pn_DivMod_res_mod, vb);
tarval *tv = value_of(b);
if (tv != tarval_bad) {
- int rem;
+ int rem = tarval_fp_ops_enabled();
/*
* Floating point constant folding might be disabled here to
* However, as we check for exact result, doing it is safe.
* Switch it on.
*/
- rem = tarval_enable_fp_ops(1);
+ tarval_enable_fp_ops(1);
tv = tarval_quo(get_mode_one(mode), tv);
- (void)tarval_enable_fp_ops(rem);
+ tarval_enable_fp_ops(rem);
/* Do the transformation if the result is either exact or we are not
using strict rules. */
if (tv != tarval_bad &&
(tarval_ieee754_get_exact() || (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic) == 0)) {
ir_node *blk = get_nodes_block(n);
- ir_node *c = new_r_Const(current_ir_graph, blk, mode, tv);
+ ir_node *c = new_Const(tv);
ir_node *a = get_Quot_left(n);
- ir_node *m = new_rd_Mul(get_irn_dbg_info(n), current_ir_graph, blk, a, c, mode);
+ ir_node *m = new_rd_Mul(get_irn_dbg_info(n), blk, a, c, mode);
ir_node *mem = get_Quot_mem(n);
/* skip a potential Pin */
mem = skip_Pin(mem);
turn_into_tuple(n, pn_Quot_max);
set_Tuple_pred(n, pn_Quot_M, mem);
- set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(current_ir_graph, blk));
- set_Tuple_pred(n, pn_Quot_X_except, new_r_Bad(current_ir_graph));
+ set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(blk));
+ set_Tuple_pred(n, pn_Quot_X_except, new_Bad());
set_Tuple_pred(n, pn_Quot_res, m);
DBG_OPT_ALGSIM1(oldn, a, b, m, FS_OPT_FP_INV_MUL);
}
* Note that -x would create a new node, so we could
* not run it in the equivalent_node() context.
*/
- n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph,
- get_nodes_block(n), a, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n), get_nodes_block(n), a, mode);
DBG_OPT_CONFIRM(oldn, n);
return n;
if (is_Minus(a)) {
/* Abs(-x) = Abs(x) */
mode = get_irn_mode(n);
- n = new_rd_Abs(get_irn_dbg_info(n), current_ir_graph,
- get_nodes_block(n), get_Minus_op(a), mode);
+ n = new_rd_Abs(get_irn_dbg_info(n), get_nodes_block(n), get_Minus_op(a), mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ABS_MINUS_X);
return n;
}
!mode_overflow_on_unary_Minus(get_irn_mode(left))) {
ir_node *const new_left = get_Minus_op(right);
ir_node *const new_right = get_Minus_op(left);
- n = new_rd_Cmp(get_irn_dbg_info(n), current_ir_graph,
- get_nodes_block(n), new_left, new_right);
+ n = new_rd_Cmp(get_irn_dbg_info(n), get_nodes_block(n), new_left, new_right);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CMP_OP_OP);
}
return n;
/* It's a boolean Cond, branching on a boolean constant.
Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
ir_node *blk = get_nodes_block(n);
- jmp = new_r_Jmp(current_ir_graph, blk);
+ jmp = new_r_Jmp(blk);
turn_into_tuple(n, pn_Cond_max);
if (ta == tarval_b_true) {
set_Tuple_pred(n, pn_Cond_false, new_Bad());
if(op != get_irn_op(b))
return n;
+ /* and(conv(a), conv(b)) -> conv(and(a,b)) */
if (op == op_Conv) {
ir_node *a_op = get_Conv_op(a);
ir_node *b_op = get_Conv_op(b);
set_binop_right(n, b_op);
set_irn_mode(n, a_mode);
n = trans_func(n);
- n = new_r_Conv(current_ir_graph, blk, n, get_irn_mode(oldn));
+ n = new_r_Conv(blk, n, get_irn_mode(oldn));
- DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_SHIFT_AND);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
return n;
}
}
if(op_root == op_Eor && op == op_Or) {
dbg_info *dbgi = get_irn_dbg_info(n);
- ir_graph *irg = current_ir_graph;
ir_mode *mode = get_irn_mode(c);
- c = new_rd_Not(dbgi, irg, blk, c, mode);
- n = new_rd_And(dbgi, irg, blk, new_n, c, mode);
+ c = new_rd_Not(dbgi, blk, c, mode);
+ n = new_rd_And(dbgi, blk, new_n, c, mode);
} else {
n = exact_copy(a);
set_nodes_block(n, blk);
/* yes, we can simply calculate with pncs */
pn_Cmp new_pnc = pn_a & pn_b;
- return new_rd_Proj(dbgi, current_ir_graph, block, pred_a, mode_b, new_pnc);
+ return new_rd_Proj(dbgi, block, pred_a, mode_b, new_pnc);
}
}
if (is_Or(a)) {
/* (a|b) & ~(a&b) = a^b */
ir_node *block = get_nodes_block(n);
- n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph, block, ba, bb, mode);
+ n = new_rd_Eor(get_irn_dbg_info(n), block, ba, bb, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_TO_EOR);
return n;
}
/* (a|b) & ~(a&b) = a^b */
ir_node *block = get_nodes_block(n);
- n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph, block, aa, ab, mode);
+ n = new_rd_Eor(get_irn_dbg_info(n), block, aa, ab, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_TO_EOR);
return n;
}
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- ar = new_rd_Not(dbg, current_ir_graph, block, ar, mode);
- n = new_rd_And(dbg, current_ir_graph, block, ar, b, mode);
+ ar = new_rd_Not(dbg, block, ar, mode);
+ n = new_rd_And(dbg, block, ar, b, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
return n;
}
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- al = new_rd_Not(dbg, current_ir_graph, block, al, mode);
- n = new_rd_And(dbg, current_ir_graph, block, al, b, mode);
+ al = new_rd_Not(dbg, block, al, mode);
+ n = new_rd_And(dbg, block, al, b, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
return n;
}
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- br = new_rd_Not(dbg, current_ir_graph, block, br, mode);
- n = new_rd_And(dbg, current_ir_graph, block, br, a, mode);
+ br = new_rd_Not(dbg, block, br, mode);
+ n = new_rd_And(dbg, block, br, a, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
return n;
}
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- bl = new_rd_Not(dbg, current_ir_graph, block, bl, mode);
- n = new_rd_And(dbg, current_ir_graph, block, bl, a, mode);
+ bl = new_rd_Not(dbg, block, bl, mode);
+ n = new_rd_And(dbg, block, bl, a, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
return n;
}
a = get_Not_op(a);
b = get_Not_op(b);
- n = new_rd_Or(get_irn_dbg_info(n), current_ir_graph, block, a, b, mode);
- n = new_rd_Not(get_irn_dbg_info(n), current_ir_graph, block, n, mode);
+ n = new_rd_Or(get_irn_dbg_info(n), block, a, b, mode);
+ n = new_rd_Not(get_irn_dbg_info(n), block, n, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_DEMORGAN);
return n;
}
/* yes, we can simply calculate with pncs */
pn_Cmp new_pnc = pn_a ^ pn_b;
- return new_rd_Proj(dbgi, current_ir_graph, block, pred_a, mode_b,
- new_pnc);
+ return new_rd_Proj(dbgi, block, pred_a, mode_b, new_pnc);
}
}
if (a == b) {
/* a ^ a = 0 */
- n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n),
- mode, get_mode_null(mode));
+ n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph,
+ get_mode_null(mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_A_A);
} else if (mode == mode_b &&
is_Proj(a) &&
is_Const(b) && is_Const_one(b) &&
is_Cmp(get_Proj_pred(a))) {
/* The Eor negates a Cmp. The Cmp has the negated result anyways! */
- n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
+ n = new_r_Proj(get_nodes_block(n), get_Proj_pred(a),
mode_b, get_negated_pnc(get_Proj_proj(a), mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT_BOOL);
} else if (is_Const(b)) {
if (is_Not(a)) { /* ~x ^ const -> x ^ ~const */
- ir_node *cnst = new_Const(mode, tarval_not(get_Const_tarval(b)));
+ ir_node *cnst = new_Const(tarval_not(get_Const_tarval(b)));
ir_node *not_op = get_Not_op(a);
dbg_info *dbg = get_irn_dbg_info(n);
- ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(n);
ir_mode *mode = get_irn_mode(n);
- n = new_rd_Eor(dbg, irg, block, not_op, cnst, mode);
+ n = new_rd_Eor(dbg, block, not_op, cnst, mode);
return n;
} else if (is_Const_all_one(b)) { /* x ^ 1...1 -> ~1 */
- n = new_r_Not(current_ir_graph, get_nodes_block(n), a, mode);
+ n = new_r_Not(get_nodes_block(n), a, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
}
} else {
HANDLE_UNOP_PHI(tarval_not,a,c);
/* check for a boolean Not */
- if (mode == mode_b &&
- is_Proj(a) &&
- is_Cmp(get_Proj_pred(a))) {
- /* We negate a Cmp. The Cmp has the negated result anyways! */
- n = new_r_Proj(current_ir_graph, get_nodes_block(n), get_Proj_pred(a),
- mode_b, get_negated_pnc(get_Proj_proj(a), mode_b));
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_CMP);
- return n;
+ if (mode == mode_b && is_Proj(a)) {
+ ir_node *a_pred = get_Proj_pred(a);
+ if (is_Cmp(a_pred)) {
+ ir_node *cmp_block = get_nodes_block(a_pred);
+ /* We negate a Cmp. The Cmp has the negated result anyways! */
+ n = new_r_Proj(cmp_block, get_Proj_pred(a),
+ mode_b, get_negated_pnc(get_Proj_proj(a), mode_b));
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_CMP);
+ return n;
+ }
}
if (is_Eor(a)) {
ir_node *eor_b = get_Eor_right(a);
if (is_Const(eor_b)) { /* ~(x ^ const) -> x ^ ~const */
- ir_node *cnst = new_Const(mode, tarval_not(get_Const_tarval(eor_b)));
+ ir_node *cnst = new_Const(tarval_not(get_Const_tarval(eor_b)));
ir_node *eor_a = get_Eor_left(a);
dbg_info *dbg = get_irn_dbg_info(n);
- ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(n);
ir_mode *mode = get_irn_mode(n);
- n = new_rd_Eor(dbg, irg, block, eor_a, cnst, mode);
+ n = new_rd_Eor(dbg, block, eor_a, cnst, mode);
return n;
}
}
ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(n);
ir_node *add_l = get_Minus_op(a);
- ir_node *add_r = new_rd_Const(dbg, irg, block, mode, get_mode_minus_one(mode));
- n = new_rd_Add(dbg, irg, block, add_l, add_r, mode);
+ ir_node *add_r = new_rd_Const(dbg, irg, get_mode_minus_one(mode));
+ n = new_rd_Add(dbg, block, add_l, add_r, mode);
} else if (is_Add(a)) {
ir_node *add_r = get_Add_right(a);
if (is_Const(add_r) && is_Const_all_one(add_r)) {
/* ~(x + -1) = -x */
ir_node *op = get_Add_left(a);
ir_node *blk = get_nodes_block(n);
- n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, op, get_irn_mode(n));
+ n = new_rd_Minus(get_irn_dbg_info(n), blk, op, get_irn_mode(n));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_MINUS_1);
}
}
ir_node *op = get_Not_op(a);
tarval *tv = get_mode_one(mode);
ir_node *blk = get_nodes_block(n);
- ir_node *c = new_r_Const(current_ir_graph, blk, mode, tv);
- n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, blk, op, c, mode);
+ ir_node *c = new_Const(tv);
+ n = new_rd_Add(get_irn_dbg_info(n), blk, op, c, mode);
DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_NOT);
return n;
}
/* -(a >>u (size-1)) = a >>s (size-1) */
ir_node *v = get_Shr_left(a);
- n = new_rd_Shrs(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), v, c, mode);
+ n = new_rd_Shrs(get_irn_dbg_info(n), get_nodes_block(n), v, c, mode);
DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_PREDICATE);
return n;
}
/* -(a >>s (size-1)) = a >>u (size-1) */
ir_node *v = get_Shrs_left(a);
- n = new_rd_Shr(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), v, c, mode);
+ n = new_rd_Shr(get_irn_dbg_info(n), get_nodes_block(n), v, c, mode);
DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_PREDICATE);
return n;
}
ir_node *ra = get_Sub_right(a);
ir_node *blk = get_nodes_block(n);
- n = new_rd_Sub(get_irn_dbg_info(n), current_ir_graph, blk, ra, la, mode);
+ n = new_rd_Sub(get_irn_dbg_info(n), blk, ra, la, mode);
DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_SUB);
return n;
}
if (tv != tarval_bad) {
tv = tarval_neg(tv);
if (tv != tarval_bad) {
- ir_node *cnst = new_Const(mode, tv);
+ ir_node *cnst = new_Const(tv);
dbg_info *dbg = get_irn_dbg_info(a);
- ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(a);
- n = new_rd_Mul(dbg, irg, block, mul_l, cnst, mode);
+ n = new_rd_Mul(dbg, block, mul_l, cnst, mode);
DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_MUL_C);
return n;
}
ir_type *tp = get_irn_type(n);
if (is_Const(pred) && get_Const_type(pred) != tp) {
- n = new_rd_Const_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_irn_mode(pred),
- get_Const_tarval(pred), tp);
+ n = new_rd_Const_type(NULL, current_ir_graph, get_Const_tarval(pred), tp);
DBG_OPT_CSTEVAL(oldn, n);
} else if (is_SymConst(pred) && get_SymConst_value_type(pred) != tp) {
- n = new_rd_SymConst_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_irn_mode(pred),
+ n = new_rd_SymConst_type(NULL, current_ir_graph, get_irn_mode(pred),
get_SymConst_symbol(pred), get_SymConst_kind(pred), tp);
DBG_OPT_CSTEVAL(oldn, n);
}
return get_irg_bad(current_ir_graph);
} else {
ir_node *blk = get_nodes_block(load);
- return new_r_Jmp(current_ir_graph, blk);
+ return new_r_Jmp(blk);
}
}
}
return get_irg_bad(current_ir_graph);
} else {
ir_node *blk = get_nodes_block(store);
- return new_r_Jmp(current_ir_graph, blk);
+ return new_r_Jmp(blk);
}
}
}
proj_nr = get_Proj_proj(proj);
switch (proj_nr) {
case pn_Div_X_regular:
- return new_r_Jmp(current_ir_graph, get_irn_n(div, -1));
+ return new_r_Jmp(get_nodes_block(div));
case pn_Div_X_except:
/* we found an exception handler, remove it */
if (confirm) {
/* This node can only float up to the Confirm block */
- new_mem = new_r_Pin(current_ir_graph, get_nodes_block(confirm), new_mem);
+ new_mem = new_r_Pin(get_nodes_block(confirm), new_mem);
}
set_irn_pinned(div, op_pin_state_floats);
/* this is a Div without exception, we can remove the memory edge */
switch (proj_nr) {
case pn_Mod_X_regular:
- return new_r_Jmp(current_ir_graph, get_irn_n(mod, -1));
+ return new_r_Jmp(get_irn_n(mod, -1));
case pn_Mod_X_except:
/* we found an exception handler, remove it */
if (confirm) {
/* This node can only float up to the Confirm block */
- new_mem = new_r_Pin(current_ir_graph, get_nodes_block(confirm), new_mem);
+ new_mem = new_r_Pin(get_nodes_block(confirm), new_mem);
}
/* this is a Mod without exception, we can remove the memory edge */
set_Mod_mem(mod, new_mem);
if (get_Mod_left(mod) == b) {
/* a % a = 0 if a != 0 */
ir_mode *mode = get_irn_mode(proj);
- ir_node *res = new_Const(mode, get_mode_null(mode));
+ ir_node *res = new_Const(get_mode_null(mode));
DBG_OPT_CSTEVAL(mod, res);
return res;
switch (proj_nr) {
case pn_DivMod_X_regular:
- return new_r_Jmp(current_ir_graph, get_irn_n(divmod, -1));
+ return new_r_Jmp(get_nodes_block(divmod));
case pn_DivMod_X_except:
/* we found an exception handler, remove it */
if (confirm) {
/* This node can only float up to the Confirm block */
- new_mem = new_r_Pin(current_ir_graph, get_nodes_block(confirm), new_mem);
+ new_mem = new_r_Pin(get_nodes_block(confirm), new_mem);
}
/* this is a DivMod without exception, we can remove the memory edge */
set_DivMod_mem(divmod, new_mem);
if (get_DivMod_left(divmod) == b) {
/* a % a = 0 if a != 0 */
ir_mode *mode = get_irn_mode(proj);
- ir_node *res = new_Const(mode, get_mode_null(mode));
+ ir_node *res = new_Const(get_mode_null(mode));
DBG_OPT_CSTEVAL(divmod, res);
return res;
/* we have a constant switch */
long num = get_Proj_proj(proj);
- if (num != get_Cond_defaultProj(n)) { /* we cannot optimize default Proj's yet */
+ if (num != get_Cond_default_proj(n)) { /* we cannot optimize default Proj's yet */
if (get_tarval_long(tb) == num) {
/* Do NOT create a jump here, or we will have 2 control flow ops
* in a block. This case is optimized away in optimize_cf(). */
*/
static ir_node *create_zero_const(ir_mode *mode) {
tarval *tv = get_mode_null(mode);
- ir_node *cnst = new_Const(mode, tv);
+ ir_node *cnst = new_Const(tv);
return cnst;
}
/* we can evaluate some cases directly */
switch (proj_nr) {
case pn_Cmp_False:
- return new_Const(mode_b, get_tarval_b_false());
+ return new_Const(get_tarval_b_false());
case pn_Cmp_True:
- return new_Const(mode_b, get_tarval_b_true());
+ return new_Const(get_tarval_b_true());
case pn_Cmp_Leg:
if (!mode_is_float(get_irn_mode(left)))
- return new_Const(mode_b, get_tarval_b_true());
+ return new_Const(get_tarval_b_true());
break;
default:
break;
if (smaller_mode(mode_left, mode) && smaller_mode(mode_right, mode)
&& mode_left != mode_b && mode_right != mode_b) {
- ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(n);
if (mode_left == mode_right) {
changed |= 1;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV_CONV);
} else if (smaller_mode(mode_left, mode_right)) {
- left = new_r_Conv(irg, block, op_left, mode_right);
+ left = new_r_Conv(block, op_left, mode_right);
right = op_right;
changed |= 1;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
} else if (smaller_mode(mode_right, mode_left)) {
left = op_left;
- right = new_r_Conv(irg, block, op_right, mode_left);
+ right = new_r_Conv(block, op_right, mode_left);
changed |= 1;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_CONV);
}
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_OP_OP);
}
}
+ if (is_And(left) && is_Const(right)) {
+ ir_node *ll = get_binop_left(left);
+ ir_node *lr = get_binop_right(left);
+ if (is_Shr(ll) && is_Const(lr)) {
+ /* Cmp((x >>u c1) & c2, c3) = Cmp(x & (c2 << c1), c3 << c1) */
+ ir_node *block = get_nodes_block(n);
+ ir_mode *mode = get_irn_mode(left);
+
+ ir_node *llr = get_Shr_right(ll);
+ if (is_Const(llr)) {
+ dbg_info *dbg = get_irn_dbg_info(left);
+
+ tarval *c1 = get_Const_tarval(llr);
+ tarval *c2 = get_Const_tarval(lr);
+ tarval *c3 = get_Const_tarval(right);
+ tarval *mask = tarval_shl(c2, c1);
+ tarval *value = tarval_shl(c3, c1);
+
+ left = new_rd_And(dbg, block, get_Shr_left(ll), new_Const(mask), mode);
+ right = new_Const(value);
+ changed |= 1;
+ }
+ }
+ }
} /* mode_is_int(...) */
} /* proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg */
/* replace mode_b compares with ands/ors */
if (get_irn_mode(left) == mode_b) {
- ir_graph *irg = current_ir_graph;
ir_node *block = get_nodes_block(n);
ir_node *bres;
switch (proj_nr) {
- case pn_Cmp_Le: bres = new_r_Or( irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b); break;
- case pn_Cmp_Lt: bres = new_r_And(irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b); break;
- case pn_Cmp_Ge: bres = new_r_Or( irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b); break;
- case pn_Cmp_Gt: bres = new_r_And(irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b); break;
- case pn_Cmp_Lg: bres = new_r_Eor(irg, block, left, right, mode_b); break;
- case pn_Cmp_Eq: bres = new_r_Not(irg, block, new_r_Eor(irg, block, left, right, mode_b), mode_b); break;
+ case pn_Cmp_Le: bres = new_r_Or( block, new_r_Not(block, left, mode_b), right, mode_b); break;
+ case pn_Cmp_Lt: bres = new_r_And(block, new_r_Not(block, left, mode_b), right, mode_b); break;
+ case pn_Cmp_Ge: bres = new_r_Or( block, left, new_r_Not(block, right, mode_b), mode_b); break;
+ case pn_Cmp_Gt: bres = new_r_And(block, left, new_r_Not(block, right, mode_b), mode_b); break;
+ case pn_Cmp_Lg: bres = new_r_Eor(block, left, right, mode_b); break;
+ case pn_Cmp_Eq: bres = new_r_Not(block, new_r_Eor(block, left, right, mode_b), mode_b); break;
default: bres = NULL;
}
if (bres) {
else if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
if (tv != tarval_bad) {
/* c >= 0 : Abs(a) <= c ==> (unsigned)(a + c) <= 2*c */
- if (get_irn_op(left) == op_Abs) { // TODO something is missing here
+ if (is_Abs(left)) { // TODO something is missing here
}
}
}
if (mask != tv) {
/* TODO: move to constant evaluation */
tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
- c1 = new_Const(mode_b, tv);
+ c1 = new_Const(tv);
DBG_OPT_CSTEVAL(proj, c1);
return c1;
}
if (! tarval_is_null(get_Const_tarval(c1))) {
/* TODO: move to constant evaluation */
tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
- c1 = new_Const(mode_b, tv);
+ c1 = new_Const(tv);
DBG_OPT_CSTEVAL(proj, c1);
return c1;
}
if (tarval_and(tv, cmask) != tv) {
/* condition not met */
tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
- c1 = new_Const(mode_b, tv);
+ c1 = new_Const(tv);
DBG_OPT_CSTEVAL(proj, c1);
return c1;
}
sl = get_Shl_left(left);
blk = get_nodes_block(n);
- left = new_rd_And(get_irn_dbg_info(left), current_ir_graph, blk, sl, new_Const(mode, amask), mode);
+ left = new_rd_And(get_irn_dbg_info(left), blk, sl, new_Const(amask), mode);
tv = tarval_shr(tv, tv1);
changed |= 2;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND);
if (tarval_and(tv, cmask) != tv) {
/* condition not met */
tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
- c1 = new_Const(mode_b, tv);
+ c1 = new_Const(tv);
DBG_OPT_CSTEVAL(proj, c1);
return c1;
}
sl = get_Shr_left(left);
blk = get_nodes_block(n);
- left = new_rd_And(get_irn_dbg_info(left), current_ir_graph, blk, sl, new_Const(mode, amask), mode);
+ left = new_rd_And(get_irn_dbg_info(left), blk, sl, new_Const(amask), mode);
tv = tarval_shl(tv, tv1);
changed |= 2;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND);
if (!tarval_is_all_one(cond) && !tarval_is_null(cond)) {
/* condition not met */
tv = proj_nr == pn_Cmp_Eq ? get_tarval_b_false() : get_tarval_b_true();
- c1 = new_Const(mode_b, tv);
+ c1 = new_Const(tv);
DBG_OPT_CSTEVAL(proj, c1);
return c1;
}
sl = get_Shrs_left(left);
blk = get_nodes_block(n);
- left = new_rd_And(get_irn_dbg_info(left), current_ir_graph, blk, sl, new_Const(mode, amask), mode);
+ left = new_rd_And(get_irn_dbg_info(left), blk, sl, new_Const(amask), mode);
tv = tarval_shl(tv, tv1);
changed |= 2;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_SHF_TO_AND);
}
if (changed & 2) /* need a new Const */
- right = new_Const(mode, tv);
+ right = new_Const(tv);
if ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) && is_Const(right) && is_Const_null(right) && is_Proj(left)) {
ir_node *op = get_Proj_pred(left);
ir_mode *mode = get_irn_mode(v);
tv = tarval_sub(tv, get_mode_one(mode), NULL);
- left = new_rd_And(get_irn_dbg_info(op), current_ir_graph, blk, v, new_Const(mode, tv), mode);
+ left = new_rd_And(get_irn_dbg_info(op), blk, v, new_Const(tv), mode);
changed |= 1;
DBG_OPT_ALGSIM0(n, n, FS_OPT_CMP_MOD_TO_AND);
}
ir_node *block = get_nodes_block(n);
/* create a new compare */
- n = new_rd_Cmp(get_irn_dbg_info(n), current_ir_graph, block, left, right);
- proj = new_rd_Proj(get_irn_dbg_info(proj), current_ir_graph, block, n, get_irn_mode(proj), proj_nr);
+ n = new_rd_Cmp(get_irn_dbg_info(n), block, left, right);
+ proj = new_rd_Proj(get_irn_dbg_info(proj), block, n, get_irn_mode(proj), proj_nr);
}
return proj;
case pn_CopyB_X_regular:
/* Turn CopyB into a tuple (mem, jmp, bad, bad) */
DBG_OPT_EXC_REM(proj);
- proj = new_r_Jmp(current_ir_graph, get_nodes_block(copyb));
+ proj = new_r_Jmp(get_nodes_block(copyb));
break;
case pn_CopyB_M_except:
case pn_CopyB_X_except:
DBG_OPT_EXC_REM(proj);
- proj = get_irg_bad(current_ir_graph);
+ proj = get_irg_bad(get_irn_irg(proj));
break;
default:
break;
break;
case pn_Bound_X_except:
DBG_OPT_EXC_REM(proj);
- proj = get_irg_bad(current_ir_graph);
+ proj = get_irg_bad(get_irn_irg(proj));
break;
case pn_Bound_res:
proj = idx;
break;
case pn_Bound_X_regular:
DBG_OPT_EXC_REM(proj);
- proj = new_r_Jmp(current_ir_graph, get_nodes_block(bound));
+ proj = new_r_Jmp(get_nodes_block(bound));
break;
default:
break;
}
/* move the Confirm nodes "behind" the Phi */
block = get_irn_n(phi, -1);
- new_Phi = new_r_Phi(current_ir_graph, block, n, in, get_irn_mode(phi));
- return new_r_Confirm(current_ir_graph, block, new_Phi, bound, pnc);
+ new_Phi = new_r_Phi(block, n, in, get_irn_mode(phi));
+ return new_r_Confirm(block, new_Phi, bound, pnc);
}
}
return phi;
/* ok, all conditions met */
block = get_irn_n(or, -1);
- new_and = new_r_And(current_ir_graph, block,
- value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode);
+ new_and = new_r_And(block, value, new_Const(tarval_and(tv4, tv2)), mode);
- new_const = new_r_Const(current_ir_graph, block, mode, tarval_or(tv3, tv1));
+ new_const = new_Const(tarval_or(tv3, tv1));
set_Or_left(or, new_and);
set_Or_right(or, new_const);
/* yet, condition met */
block = get_nodes_block(or);
- n = new_r_Rotl(current_ir_graph, block, x, c1, mode);
+ n = new_r_Rotl(block, x, c1, mode);
DBG_OPT_ALGSIM1(or, shl, shr, n, FS_OPT_OR_SHFT_TO_ROTL);
return n;
/* yet, condition met */
block = get_nodes_block(or);
- n = new_r_Rotl(current_ir_graph, block, x, rotval, mode);
+ n = new_r_Rotl(block, x, rotval, mode);
DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROTL);
return n;
mode = get_irn_mode(n);
a = get_Not_op(a);
b = get_Not_op(b);
- n = new_rd_And(get_irn_dbg_info(n), current_ir_graph, block, a, b, mode);
- n = new_rd_Not(get_irn_dbg_info(n), current_ir_graph, block, n, mode);
+ n = new_rd_And(get_irn_dbg_info(n), block, a, b, mode);
+ n = new_rd_Not(get_irn_dbg_info(n), block, n, mode);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_DEMORGAN);
return n;
}
/* yes, we can simply calculate with pncs */
pn_Cmp new_pnc = pn_a | pn_b;
- return new_rd_Proj(dbgi, current_ir_graph, block, pred_a, mode_b,
- new_pnc);
+ return new_rd_Proj(dbgi, block, pred_a, mode_b, new_pnc);
}
}
/* beware: a simple replacement works only, if res < modulo shift */
if (!is_Rotl(n)) {
int modulo_shf = get_mode_modulo_shift(mode);
- assert(modulo_shf >= (int) get_mode_size_bits(mode));
if (modulo_shf > 0) {
tarval *modulo = new_tarval_from_long(modulo_shf,
get_tarval_mode(res));
+ assert(modulo_shf >= (int) get_mode_size_bits(mode));
+
/* shifting too much */
if (!(tarval_cmp(res, modulo) & pn_Cmp_Lt)) {
if (is_Shrs(n)) {
- ir_graph *irg = get_irn_irg(n);
ir_node *block = get_nodes_block(n);
dbg_info *dbgi = get_irn_dbg_info(n);
- ir_node *cnst = new_Const(mode_Iu, new_tarval_from_long(get_mode_size_bits(mode)-1, mode_Iu));
- return new_rd_Shrs(dbgi, irg, block, get_binop_left(left),
- cnst, mode);
+ ir_mode *smode = get_irn_mode(right);
+ ir_node *cnst = new_Const_long(smode, get_mode_size_bits(mode) - 1);
+ return new_rd_Shrs(dbgi, block, get_binop_left(left), cnst, mode);
}
- return new_Const(mode, get_mode_null(mode));
+ return new_Const(get_mode_null(mode));
}
}
} else {
block = get_nodes_block(n);
in[0] = get_binop_left(left);
- in[1] = new_r_Const(current_ir_graph, block, get_tarval_mode(res), res);
+ in[1] = new_Const(res);
- irn = new_ir_node(NULL, current_ir_graph, block, get_irn_op(n), mode, 2, in);
+ irn = new_ir_node(NULL, get_Block_irg(block), block, get_irn_op(n), mode, 2, in);
DBG_OPT_ALGSIM0(n, irn, FS_OPT_REASSOC_SHIFT);
ir_node *bitop_left;
ir_node *bitop_right;
ir_op *op_left;
- ir_graph *irg;
ir_node *block;
dbg_info *dbgi;
ir_node *new_shift;
bitop_left = get_binop_left(left);
- irg = get_irn_irg(n);
block = get_nodes_block(n);
dbgi = get_irn_dbg_info(n);
tv1 = get_Const_tarval(bitop_right);
assert(get_tarval_mode(tv1) == mode);
if (is_Shl(n)) {
- new_shift = new_rd_Shl(dbgi, irg, block, bitop_left, right, mode);
+ new_shift = new_rd_Shl(dbgi, block, bitop_left, right, mode);
tv_shift = tarval_shl(tv1, tv2);
} else if(is_Shr(n)) {
- new_shift = new_rd_Shr(dbgi, irg, block, bitop_left, right, mode);
+ new_shift = new_rd_Shr(dbgi, block, bitop_left, right, mode);
tv_shift = tarval_shr(tv1, tv2);
} else if(is_Shrs(n)) {
- new_shift = new_rd_Shrs(dbgi, irg, block, bitop_left, right, mode);
+ new_shift = new_rd_Shrs(dbgi, block, bitop_left, right, mode);
tv_shift = tarval_shrs(tv1, tv2);
} else {
assert(is_Rotl(n));
- new_shift = new_rd_Rotl(dbgi, irg, block, bitop_left, right, mode);
+ new_shift = new_rd_Rotl(dbgi, block, bitop_left, right, mode);
tv_shift = tarval_rotl(tv1, tv2);
}
assert(get_tarval_mode(tv_shift) == mode);
- new_const = new_Const(mode, tv_shift);
+ new_const = new_Const(tv_shift);
if (op_left == op_And) {
- new_bitop = new_rd_And(dbgi, irg, block, new_shift, new_const, mode);
+ new_bitop = new_rd_And(dbgi, block, new_shift, new_const, mode);
} else if(op_left == op_Or) {
- new_bitop = new_rd_Or(dbgi, irg, block, new_shift, new_const, mode);
+ new_bitop = new_rd_Or(dbgi, block, new_shift, new_const, mode);
} else {
assert(op_left == op_Eor);
- new_bitop = new_rd_Eor(dbgi, irg, block, new_shift, new_const, mode);
+ new_bitop = new_rd_Eor(dbgi, block, new_shift, new_const, mode);
}
return new_bitop;
return n;
}
- assert(get_tarval_mode(tv_shl) == get_tarval_mode(tv_shr));
+ if (get_tarval_mode(tv_shl) != get_tarval_mode(tv_shr)) {
+ tv_shl = tarval_convert_to(tv_shl, get_tarval_mode(tv_shr));
+ }
+
assert(tv_mask != tarval_bad);
assert(get_tarval_mode(tv_mask) == mode);
pnc = tarval_cmp(tv_shl, tv_shr);
if (pnc == pn_Cmp_Lt || pnc == pn_Cmp_Eq) {
tv_shift = tarval_sub(tv_shr, tv_shl, NULL);
- new_const = new_Const(get_tarval_mode(tv_shift), tv_shift);
+ new_const = new_Const(tv_shift);
if (need_shrs) {
- new_shift = new_rd_Shrs(dbgi, irg, block, x, new_const, mode);
+ new_shift = new_rd_Shrs(dbgi, block, x, new_const, mode);
} else {
- new_shift = new_rd_Shr(dbgi, irg, block, x, new_const, mode);
+ new_shift = new_rd_Shr(dbgi, block, x, new_const, mode);
}
} else {
assert(pnc == pn_Cmp_Gt);
tv_shift = tarval_sub(tv_shl, tv_shr, NULL);
- new_const = new_Const(get_tarval_mode(tv_shift), tv_shift);
- new_shift = new_rd_Shl(dbgi, irg, block, x, new_const, mode);
+ new_const = new_Const(tv_shift);
+ new_shift = new_rd_Shl(dbgi, block, x, new_const, mode);
}
- new_const = new_Const(mode, tv_mask);
- new_and = new_rd_And(dbgi, irg, block, new_shift, new_const, mode);
+ new_const = new_Const(tv_mask);
+ new_and = new_rd_And(dbgi, block, new_shift, new_const, mode);
return new_and;
}
*/
static ir_node *transform_node_Conv(ir_node *n) {
ir_node *c, *oldn = n;
- ir_node *a = get_Conv_op(n);
+ ir_mode *mode = get_irn_mode(n);
+ ir_node *a = get_Conv_op(n);
- if (is_const_Phi(a)) {
- c = apply_conv_on_phi(a, get_irn_mode(n));
+ if (mode != mode_b && is_const_Phi(a)) {
+ /* Do NOT optimize mode_b Conv's, this leads to remaining
+ * Phib nodes later, because the conv_b_lower operation
+ * is instantly reverted, when it tries to insert a Convb.
+ */
+ c = apply_conv_on_phi(a, mode);
if (c) {
DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI);
return c;
}
if (is_Unknown(a)) { /* Conv_A(Unknown_B) -> Unknown_A */
- ir_mode *mode = get_irn_mode(n);
return new_r_Unknown(current_ir_graph, mode);
}
+ if (mode_is_reference(mode) &&
+ get_mode_size_bits(mode) == get_mode_size_bits(get_irn_mode(a)) &&
+ is_Add(a)) {
+ ir_node *l = get_Add_left(a);
+ ir_node *r = get_Add_right(a);
+ dbg_info *dbgi = get_irn_dbg_info(a);
+ ir_node *block = get_nodes_block(n);
+ if(is_Conv(l)) {
+ ir_node *lop = get_Conv_op(l);
+ if(get_irn_mode(lop) == mode) {
+ /* ConvP(AddI(ConvI(P), x)) -> AddP(P, x) */
+ n = new_rd_Add(dbgi, block, lop, r, mode);
+ return n;
+ }
+ }
+ if(is_Conv(r)) {
+ ir_node *rop = get_Conv_op(r);
+ if(get_irn_mode(rop) == mode) {
+ /* ConvP(AddI(x, ConvI(P))) -> AddP(x, P) */
+ n = new_rd_Add(dbgi, block, l, rop, mode);
+ return n;
+ }
+ }
+ }
+
return n;
} /* transform_node_Conv */
continue;
} else if (is_irn_pinned_in_irg(ka) && is_Block_dead(get_nodes_block(ka))) {
continue;
+ } else if (is_Bad(ka)) {
+ /* no need to keep Bad */
+ continue;
}
- /* FIXME: beabi need to keep a Proj(M) */
- if (is_Phi(ka) || is_irn_keep(ka) || is_Proj(ka))
- in[j++] = ka;
+ in[j++] = ka;
}
if (j != n_keepalives)
set_End_keepalives(n, j, in);
ir_node *f = get_Mux_false(n);
ir_graph *irg = current_ir_graph;
+ if (is_irg_state(irg, IR_GRAPH_STATE_KEEP_MUX))
+ return n;
+
+ if (is_Mux(t)) {
+ ir_node* block = get_nodes_block(n);
+ ir_node* c0 = sel;
+ ir_node* c1 = get_Mux_sel(t);
+ ir_node* t1 = get_Mux_true(t);
+ ir_node* f1 = get_Mux_false(t);
+ if (f == f1) {
+ /* Mux(cond0, Mux(cond1, x, y), y) -> typical if (cond0 && cond1) x else y */
+ ir_node* and_ = new_r_And(block, c0, c1, mode_b);
+ ir_node* new_mux = new_r_Mux(block, and_, f1, t1, mode);
+ n = new_mux;
+ sel = and_;
+ f = f1;
+ t = t1;
+ DBG_OPT_ALGSIM0(oldn, t, FS_OPT_MUX_COMBINE);
+ } else if (f == t1) {
+ /* Mux(cond0, Mux(cond1, x, y), x) */
+ ir_node* not_c1 = new_r_Not(block, c1, mode_b);
+ ir_node* and_ = new_r_And(block, c0, not_c1, mode_b);
+ ir_node* new_mux = new_r_Mux(block, and_, t1, f1, mode);
+ n = new_mux;
+ sel = and_;
+ f = t1;
+ t = f1;
+ DBG_OPT_ALGSIM0(oldn, t, FS_OPT_MUX_COMBINE);
+ }
+ } else if (is_Mux(f)) {
+ ir_node* block = get_nodes_block(n);
+ ir_node* c0 = sel;
+ ir_node* c1 = get_Mux_sel(f);
+ ir_node* t1 = get_Mux_true(f);
+ ir_node* f1 = get_Mux_false(f);
+ if (t == t1) {
+ /* Mux(cond0, x, Mux(cond1, x, y)) -> typical if (cond0 || cond1) x else y */
+ ir_node* or_ = new_r_Or(block, c0, c1, mode_b);
+ ir_node* new_mux = new_r_Mux(block, or_, f1, t1, mode);
+ n = new_mux;
+ sel = or_;
+ f = f1;
+ t = t1;
+ DBG_OPT_ALGSIM0(oldn, f, FS_OPT_MUX_COMBINE);
+ } else if (t == f1) {
+ /* Mux(cond0, x, Mux(cond1, y, x)) */
+ ir_node* not_c1 = new_r_Not(block, c1, mode_b);
+ ir_node* or_ = new_r_Or(block, c0, not_c1, mode_b);
+ ir_node* new_mux = new_r_Mux(block, or_, t1, f1, mode);
+ n = new_mux;
+ sel = or_;
+ f = t1;
+ t = f1;
+ DBG_OPT_ALGSIM0(oldn, f, FS_OPT_MUX_COMBINE);
+ }
+ }
+
/* first normalization step: move a possible zero to the false case */
if (is_Proj(sel)) {
ir_node *cmp = get_Proj_pred(sel);
/* Mux(x, 0, y) => Mux(x, y, 0) */
pn_Cmp pnc = get_Proj_proj(sel);
- sel = new_r_Proj(irg, get_nodes_block(cmp), cmp, mode_b,
+ sel = new_r_Proj(get_nodes_block(cmp), cmp, mode_b,
get_negated_pnc(pnc, get_irn_mode(get_Cmp_left(cmp))));
- n = new_rd_Mux(get_irn_dbg_info(n), irg, get_nodes_block(n), sel, t, f, mode);
+ n = new_rd_Mux(get_irn_dbg_info(n), get_nodes_block(n), sel, t, f, mode);
tmp = t;
t = f;
f = tmp;
if (mode == mode_b) {
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- ir_graph *irg = current_ir_graph;
if (is_Const(t)) {
tarval *tv_t = get_Const_tarval(t);
return sel;
} else {
/* Muxb(sel, true, x) = Or(sel, x) */
- n = new_rd_Or(dbg, irg, block, sel, f, mode_b);
+ n = new_rd_Or(dbg, block, sel, f, mode_b);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_OR_BOOL);
return n;
}
tarval *tv_f = get_Const_tarval(f);
if (tv_f == tarval_b_true) {
/* Muxb(sel, x, true) = Or(Not(sel), x) */
- ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b);
+ ir_node* not_sel = new_rd_Not(dbg, block, sel, mode_b);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_ORNOT_BOOL);
- n = new_rd_Or(dbg, irg, block, not_sel, t, mode_b);
+ n = new_rd_Or(dbg, block, not_sel, t, mode_b);
return n;
} else {
/* Muxb(sel, x, false) = And(sel, x) */
assert(tv_f == tarval_b_false);
- n = new_rd_And(dbg, irg, block, sel, t, mode_b);
+ n = new_rd_And(dbg, block, sel, t, mode_b);
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_AND_BOOL);
return n;
}
if (is_Const(t) && is_Const(f) && mode_is_int(mode)) {
tarval *a = get_Const_tarval(t);
tarval *b = get_Const_tarval(f);
- tarval *null = get_tarval_null(mode);
tarval *diff, *min;
+ if (tarval_is_one(a) && tarval_is_null(b)) {
+ ir_node *block = get_nodes_block(n);
+ ir_node *conv = new_r_Conv(block, sel, mode);
+ n = conv;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_CONV);
+ return n;
+ } else if (tarval_is_null(a) && tarval_is_one(b)) {
+ ir_node *block = get_nodes_block(n);
+ ir_node *not_ = new_r_Not(block, sel, mode_b);
+ ir_node *conv = new_r_Conv(block, not_, mode);
+ n = conv;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_CONV);
+ return n;
+ }
+ /* TODO: it's not really clear if that helps in general or should be moved
+ * to backend, especially with the MUX->Conv transformation above */
if (tarval_cmp(a, b) & pn_Cmp_Gt) {
diff = tarval_sub(a, b, NULL);
min = b;
min = a;
}
- if (diff == get_tarval_one(mode) && min != null) {
+ if (diff == get_tarval_one(mode)) {
dbg_info *dbg = get_irn_dbg_info(n);
ir_node *block = get_nodes_block(n);
- ir_graph *irg = current_ir_graph;
- ir_node *t = new_Const(mode, tarval_sub(a, min, NULL));
- ir_node *f = new_Const(mode, tarval_sub(b, min, NULL));
- n = new_rd_Mux(dbg, irg, block, sel, f, t, mode);
- n = new_rd_Add(dbg, irg, block, n, new_Const(mode, min), mode);
+ ir_node *t = new_Const(tarval_sub(a, min, NULL));
+ ir_node *f = new_Const(tarval_sub(b, min, NULL));
+ n = new_rd_Mux(dbg, block, sel, f, t, mode);
+ n = new_rd_Add(dbg, block, n, new_Const(min), mode);
return n;
}
}
|| (cmp_l == f && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt)))
{
/* Mux(a >/>= 0, a, -a) = Mux(a </<= 0, -a, a) ==> Abs(a) */
- n = new_rd_Abs(get_irn_dbg_info(n), current_ir_graph, block,
- cmp_l, mode);
+ n = new_rd_Abs(get_irn_dbg_info(n), block, cmp_l, mode);
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
return n;
} else if ((cmp_l == t && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt))
|| (cmp_l == f && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt)))
{
/* Mux(a </<= 0, a, -a) = Mux(a >/>= 0, -a, a) ==> -Abs(a) */
- n = new_rd_Abs(get_irn_dbg_info(n), current_ir_graph, block,
- cmp_l, mode);
- n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph,
- block, n, mode);
+ n = new_rd_Abs(get_irn_dbg_info(n), block, cmp_l, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n), block, n, mode);
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
return n;
}
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
} else {
/* Mux((a & 2^C) == 0, 2^C, 0) */
- n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph,
+ n = new_rd_Eor(get_irn_dbg_info(n),
block, cmp_l, t, mode);
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
}
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
} else {
/* (a & (1 << n)) == 0, (1 << n), 0) */
- n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph,
+ n = new_rd_Eor(get_irn_dbg_info(n),
block, cmp_l, t, mode);
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
}
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
} else {
/* ((1 << n) & a) == 0, (1 << n), 0) */
- n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph,
+ n = new_rd_Eor(get_irn_dbg_info(n),
block, cmp_l, t, mode);
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
}
}
}
}
- return arch_transform_node_Mux(n);
+
+ return n;
} /* transform_node_Mux */
/**
add_identities(current_ir_graph->value_table, n);
return n;
-}
+} /* transform_node_Sync */
+
+/**
+ * optimize a trampoline Call into a direct Call
+ */
+static ir_node *transform_node_Call(ir_node *call) {
+ ir_node *callee = get_Call_ptr(call);
+ ir_node *adr, *mem, *res, *bl, **in;
+ ir_type *ctp, *mtp, *tp;
+ ident *id;
+ dbg_info *db;
+ int i, n_res, n_param;
+ ir_variadicity var;
+
+ if (! is_Proj(callee))
+ return call;
+ callee = get_Proj_pred(callee);
+ if (! is_Builtin(callee))
+ return call;
+ if (get_Builtin_kind(callee) != ir_bk_inner_trampoline)
+ return call;
+
+ mem = get_Call_mem(call);
+
+ if (skip_Proj(mem) == callee) {
+ /* memory is routed to the trampoline, skip */
+ mem = get_Builtin_mem(callee);
+ }
+
+ /* build a new call type */
+ mtp = get_Call_type(call);
+ id = get_type_ident(mtp);
+ id = id_mangle(new_id_from_chars("T_", 2), id);
+ db = get_type_dbg_info(mtp);
+
+ n_res = get_method_n_ress(mtp);
+ n_param = get_method_n_params(mtp);
+ ctp = new_d_type_method(id, n_param + 1, n_res, db);
+
+ for (i = 0; i < n_res; ++i)
+ set_method_res_type(ctp, i, get_method_res_type(mtp, i));
+
+ NEW_ARR_A(ir_node *, in, n_param + 1);
+
+ /* FIXME: we don't need a new pointer type in every step */
+ tp = get_irg_frame_type(current_ir_graph);
+ id = id_mangle(get_type_ident(tp), new_id_from_chars("_ptr", 4));
+ tp = new_type_pointer(id, tp, mode_P_data);
+ set_method_param_type(ctp, 0, tp);
+
+ in[0] = get_Builtin_param(callee, 2);
+ for (i = 0; i < n_param; ++i) {
+ set_method_param_type(ctp, i + 1, get_method_param_type(mtp, i));
+ in[i + 1] = get_Call_param(call, i);
+ }
+ var = get_method_variadicity(mtp);
+ set_method_variadicity(ctp, var);
+ if (var == variadicity_variadic) {
+ set_method_first_variadic_param_index(ctp, get_method_first_variadic_param_index(mtp) + 1);
+ }
+ /* When we resolve a trampoline, the function must be called by a this-call */
+ set_method_calling_convention(ctp, get_method_calling_convention(mtp) | cc_this_call);
+ set_method_additional_properties(ctp, get_method_additional_properties(mtp));
+
+ adr = get_Builtin_param(callee, 1);
+
+ db = get_irn_dbg_info(call);
+ bl = get_nodes_block(call);
+
+ res = new_rd_Call(db, bl, mem, adr, n_param + 1, in, ctp);
+ if (get_irn_pinned(call) == op_pin_state_floats)
+ set_irn_pinned(res, op_pin_state_floats);
+ return res;
+} /* transform_node_Call */
/**
* Tries several [inplace] [optimizing] transformations and returns an
*/
do {
oldn = n;
- if (n->op->ops.transform_node)
+ if (n->op->ops.transform_node != NULL)
n = n->op->ops.transform_node(n);
} while (oldn != n);
CASE(End);
CASE(Mux);
CASE(Sync);
+ CASE(Call);
default:
/* leave NULL */;
}
const divmod_attr *ma = get_irn_divmod_attr(a);
const divmod_attr *mb = get_irn_divmod_attr(b);
return ma->exc.pin_state != mb->exc.pin_state ||
- ma->res_mode != mb->res_mode ||
+ ma->resmode != mb->resmode ||
ma->no_remainder != mb->no_remainder;
} /* node_cmp_attr_Div */
const divmod_attr *ma = get_irn_divmod_attr(a);
const divmod_attr *mb = get_irn_divmod_attr(b);
return ma->exc.pin_state != mb->exc.pin_state ||
- ma->res_mode != mb->res_mode;
+ ma->resmode != mb->resmode;
} /* node_cmp_attr_DivMod */
/** Compares the attributes of two Mod nodes. */
const divmod_attr *ma = get_irn_divmod_attr(a);
const divmod_attr *mb = get_irn_divmod_attr(b);
return ma->exc.pin_state != mb->exc.pin_state ||
- ma->res_mode != mb->res_mode;
+ ma->resmode != mb->resmode;
} /* node_cmp_attr_Mod */
/** Compares the attributes of two Quot nodes. */
const divmod_attr *ma = get_irn_divmod_attr(a);
const divmod_attr *mb = get_irn_divmod_attr(b);
return ma->exc.pin_state != mb->exc.pin_state ||
- ma->res_mode != mb->res_mode;
+ ma->resmode != mb->resmode;
} /* node_cmp_attr_Quot */
/** Compares the attributes of two Confirm nodes. */
static int node_cmp_attr_Confirm(ir_node *a, ir_node *b) {
+ /* no need to compare the bound, as this is a input */
return (get_Confirm_cmp(a) != get_Confirm_cmp(b));
} /* node_cmp_attr_Confirm */
+/** Compares the attributes of two Builtin nodes. */
+static int node_cmp_attr_Builtin(ir_node *a, ir_node *b) {
+ const builtin_attr *ma = get_irn_builtin_attr(a);
+ const builtin_attr *mb = get_irn_builtin_attr(b);
+
+ /* no need to compare the type, equal kind means equal type */
+ return ma->kind != mb->kind;
+} /* node_cmp_attr_Builtin */
+
/** Compares the attributes of two ASM nodes. */
static int node_cmp_attr_ASM(ir_node *a, ir_node *b) {
int i, n;
return 0;
} /* node_cmp_attr_ASM */
+/** Compares the inexistent attributes of two Dummy nodes. */
+static int node_cmp_attr_Dummy(ir_node *a, ir_node *b)
+{
+ (void) a;
+ (void) b;
+ return 1;
+}
+
/**
* Set the default node attribute compare operation for an ir_op_ops.
*
CASE(Mod);
CASE(Quot);
CASE(Bound);
+ CASE(Builtin);
+ CASE(Dummy);
/* FIXME CopyB */
default:
/* leave NULL */;
del_pset(value_table);
} /* del_identities */
-/**
- * Normalize a node by putting constants (and operands with larger
- * node index) on the right (operator side).
- *
- * @param n The node to normalize
- */
-static void normalize_node(ir_node *n) {
+/* Normalize a node by putting constants (and operands with larger
+ * node index) on the right (operator side). */
+void ir_normalize_node(ir_node *n) {
if (is_op_commutative(get_irn_op(n))) {
ir_node *l = get_binop_left(n);
ir_node *r = get_binop_right(n);
if (!operands_are_normalized(l, r)) {
set_binop_left(n, r);
set_binop_right(n, l);
+ hook_normalize(n);
}
}
-} /* normalize_node */
+} /* ir_normalize_node */
/**
* Update the nodes after a match in the value table. If both nodes have
if (!value_table) return n;
- normalize_node(n);
+ ir_normalize_node(n);
/* lookup or insert in hash table with given hash key. */
o = pset_insert(value_table, n, ir_node_hash(n));
* @param value_table The value table
* @param n The node to lookup
*/
-static INLINE ir_node *identify_cons(pset *value_table, ir_node *n) {
+static inline ir_node *identify_cons(pset *value_table, ir_node *n) {
ir_node *old = n;
n = identify_remember(value_table, n);
/* neither constants nor Tuple values can be evaluated */
if (iro != iro_Const && (get_irn_mode(n) != mode_T)) {
unsigned fp_model = get_irg_fp_model(current_ir_graph);
- int old_fp_mode = tarval_enable_fp_ops((fp_model & fp_strict_algebraic) == 0);
+ int old_fp_mode = tarval_fp_ops_enabled();
+
+ tarval_enable_fp_ops(! (fp_model & fp_no_float_fold));
+
/* try to evaluate */
tv = computed_value(n);
if (tv != tarval_bad) {
/* evaluation was successful -- replace the node. */
irg_kill_node(current_ir_graph, n);
- nw = new_Const(get_tarval_mode(tv), tv);
+ nw = new_Const(tv);
if (old_tp && get_type_mode(old_tp) == get_tarval_mode(tv))
set_Const_type(nw, old_tp);
/* neither constants nor Tuple values can be evaluated */
if (iro != iro_Const && get_irn_mode(n) != mode_T) {
unsigned fp_model = get_irg_fp_model(current_ir_graph);
- int old_fp_mode = tarval_enable_fp_ops((fp_model & fp_strict_algebraic) == 0);
+ int old_fp_mode = tarval_fp_ops_enabled();
+
+ tarval_enable_fp_ops((fp_model & fp_strict_algebraic) == 0);
/* try to evaluate */
tv = computed_value(n);
if (tv != tarval_bad) {
for (i = 0; i < arity && !old_tp; ++i)
old_tp = get_irn_type(get_irn_n(n, i));
- n = new_Const(get_tarval_mode(tv), tv);
+ n = new_Const(tv);
if (old_tp && get_type_mode(old_tp) == get_tarval_mode(tv))
set_Const_type(n, old_tp);
/* special value for const, as they only differ in their tarval. */
h = HASH_PTR(node->attr.con.tv);
- h = 9*h + HASH_PTR(get_irn_mode(node));
return h;
} /* hash_Const */
static unsigned hash_SymConst(const ir_node *node) {
unsigned h;
- /* special value for const, as they only differ in their symbol. */
+ /* all others are pointers */
h = HASH_PTR(node->attr.symc.sym.type_p);
- h = 9*h + HASH_PTR(get_irn_mode(node));
return h;
} /* hash_SymConst */