#include "opt_confirms.h"
#include "opt_polymorphy.h"
#include "irtools.h"
-#include "xmalloc.h"
+#include "array_t.h"
/* Make types visible to allow most efficient access */
#include "entity_t.h"
goto restart;
}
/* else both are strict conv, second is superfluous */
- } else if (is_Proj(a)) {
- ir_node *pred = get_Proj_pred(a);
- if (is_Load(pred)) {
- /* loads always return with the exact precision of n_mode */
- assert(get_Load_mode(pred) == n_mode);
- return a;
- }
- /* leave strict floating point Conv's */
- return n;
} else {
+ if (is_Proj(a)) {
+ ir_node *pred = get_Proj_pred(a);
+ if (is_Load(pred)) {
+ /* loads always return with the exact precision of n_mode */
+ assert(get_Load_mode(pred) == n_mode);
+ return a;
+ }
+ }
/* leave strict floating point Conv's */
return n;
}
n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
} else if (get_mode_arithmetic(n_mode) == get_mode_arithmetic(a_mode)) {
- if (smaller_mode(b_mode, a_mode)) {
+ if (values_in_mode(b_mode, a_mode)) {
n = b; /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
}
}
}
- if (mode_is_int(n_mode) && mode_is_float(a_mode)) {
+ if (mode_is_int(n_mode) && get_mode_arithmetic(a_mode) == irma_ieee754) {
/* ConvI(ConvF(I)) -> I, iff float mantissa >= int mode */
- size_t int_mantissa = get_mode_size_bits(n_mode) - (mode_is_signed(n_mode) ? 1 : 0);
- size_t float_mantissa;
- /* FIXME There is no way to get the mantissa size of a mode */
- switch (get_mode_size_bits(a_mode)) {
- case 32: float_mantissa = 23 + 1; break; // + 1 for implicit 1
- case 64: float_mantissa = 52 + 1; break;
- case 80: float_mantissa = 64 + 1; break;
- default: float_mantissa = 0; break;
- }
- if (float_mantissa != 0 && float_mantissa >= int_mantissa) {
+ unsigned int_mantissa = get_mode_size_bits(n_mode) - (mode_is_signed(n_mode) ? 1 : 0);
+ unsigned float_mantissa = tarval_ieee754_get_mantissa_size(a_mode);
+
+ if (float_mantissa >= int_mantissa) {
n = b;
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
return n;
if (! is_Phi(n) || get_irn_arity(n) == 0)
return 0;
- for (i = get_irn_arity(n) - 1; i >= 0; --i)
+ for (i = get_irn_arity(n) - 1; i >= 0; --i) {
if (! is_Const(get_irn_n(n, i)))
return 0;
- return 1;
+ }
+ return 1;
} /* is_const_Phi */
typedef tarval *(*tarval_sub_type)(tarval *a, tarval *b, ir_mode *mode);
if (get_mode_arithmetic(mode) == irma_ieee754) {
if (is_Const(a)) {
tarval *tv = get_Const_tarval(a);
- if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)) {
+ if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)
+ && !tarval_is_negative(tv)) {
/* 2.0 * b = b + b */
n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), b, b, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
}
else if (is_Const(b)) {
tarval *tv = get_Const_tarval(b);
- if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)) {
+ if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)
+ && !tarval_is_negative(tv)) {
/* a * 2.0 = a + a */
n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_nodes_block(n), a, a, mode);
DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
else if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
if (tv != tarval_bad) {
/* c >= 0 : Abs(a) <= c ==> (unsigned)(a + c) <= 2*c */
- if (get_irn_op(left) == op_Abs) { // TODO something is missing here
+ if (is_Abs(left)) { // TODO something is missing here
}
}
}
*/
static ir_node *transform_node_Conv(ir_node *n) {
ir_node *c, *oldn = n;
- ir_node *a = get_Conv_op(n);
+ ir_mode *mode = get_irn_mode(n);
+ ir_node *a = get_Conv_op(n);
- if (is_const_Phi(a)) {
- c = apply_conv_on_phi(a, get_irn_mode(n));
+ if (mode != mode_b && is_const_Phi(a)) {
+ /* Do NOT optimize mode_b Conv's, this leads to remaining
+ * Phib nodes later, because the conv_b_lower operation
+ * is instantly reverted, when it tries to insert a Convb.
+ */
+ c = apply_conv_on_phi(a, mode);
if (c) {
DBG_OPT_ALGSIM0(oldn, c, FS_OPT_CONST_PHI);
return c;
}
if (is_Unknown(a)) { /* Conv_A(Unknown_B) -> Unknown_A */
- ir_mode *mode = get_irn_mode(n);
return new_r_Unknown(current_ir_graph, mode);
}
+ if (mode_is_reference(mode) &&
+ get_mode_size_bits(mode) == get_mode_size_bits(get_irn_mode(a)) &&
+ is_Add(a)) {
+ ir_node *l = get_Add_left(a);
+ ir_node *r = get_Add_right(a);
+ dbg_info *dbgi = get_irn_dbg_info(a);
+ ir_node *block = get_nodes_block(n);
+ if(is_Conv(l)) {
+ ir_node *lop = get_Conv_op(l);
+ if(get_irn_mode(lop) == mode) {
+ /* ConvP(AddI(ConvI(P), x)) -> AddP(P, x) */
+ n = new_rd_Add(dbgi, current_ir_graph, block, lop, r, mode);
+ return n;
+ }
+ }
+ if(is_Conv(r)) {
+ ir_node *rop = get_Conv_op(r);
+ if(get_irn_mode(rop) == mode) {
+ /* ConvP(AddI(x, ConvI(P))) -> AddP(x, P) */
+ n = new_rd_Add(dbgi, current_ir_graph, block, l, rop, mode);
+ return n;
+ }
+ }
+ }
+
return n;
} /* transform_node_Conv */
} else if (is_irn_pinned_in_irg(ka) && is_Block_dead(get_nodes_block(ka))) {
continue;
}
- /* FIXME: beabi need to keep a Proj(M) */
- if (is_Phi(ka) || is_irn_keep(ka) || is_Proj(ka))
- in[j++] = ka;
+ in[j++] = ka;
}
if (j != n_keepalives)
set_End_keepalives(n, j, in);
if (pn == pn_Cmp_Lg) {
/* Mux((a & 2^C) != 0, 2^C, 0) */
n = cmp_l;
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
} else {
/* Mux((a & 2^C) == 0, 2^C, 0) */
n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph,
block, cmp_l, t, mode);
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
}
return n;
}
if (pn == pn_Cmp_Lg) {
/* (a & (1 << n)) != 0, (1 << n), 0) */
n = cmp_l;
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
} else {
/* (a & (1 << n)) == 0, (1 << n), 0) */
n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph,
block, cmp_l, t, mode);
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
}
return n;
}
if (pn == pn_Cmp_Lg) {
/* ((1 << n) & a) != 0, (1 << n), 0) */
n = cmp_l;
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
} else {
/* ((1 << n) & a) == 0, (1 << n), 0) */
n = new_rd_Eor(get_irn_dbg_info(n), current_ir_graph,
block, cmp_l, t, mode);
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_BITOP);
}
return n;
}
static int node_cmp_attr_Sel(ir_node *a, ir_node *b) {
const ir_entity *a_ent = get_Sel_entity(a);
const ir_entity *b_ent = get_Sel_entity(b);
+#if 0
return
(a_ent->kind != b_ent->kind) ||
(a_ent->name != b_ent->name) ||
(a_ent->owner != b_ent->owner) ||
(a_ent->ld_name != b_ent->ld_name) ||
(a_ent->type != b_ent->type);
+#endif
+ /* Matze: inlining of functions can produce 2 entities with same type,
+ * name, etc. */
+ return a_ent != b_ent;
} /* node_cmp_attr_Sel */
/** Compares the attributes of two Phi nodes. */
if (o != n) {
update_known_irn(o, n);
- DBG_OPT_CSE(n, o);
}
return o;
/* Now we have a legal, useful node. Enter it in hash table for CSE */
if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
- n = identify_remember(current_ir_graph->value_table, n);
+ ir_node *o = n;
+ n = identify_remember(current_ir_graph->value_table, o);
+ if (o != n)
+ DBG_OPT_CSE(o, n);
}
return n;
now all nodes are op_pin_state_pinned to blocks, i.e., the cse only finds common
subexpressions within a block. */
if (get_opt_cse()) {
- n = identify_remember(current_ir_graph->value_table, n);
+ ir_node *o = n;
+ n = identify_remember(current_ir_graph->value_table, o);
+ if (o != n)
+ DBG_OPT_CSE(o, n);
}
/* Some more constant expression evaluation. */
/* Now we have a legal, useful node. Enter it in hash table for cse.
Blocks should be unique anyways. (Except the successor of start:
is cse with the start block!) */
- if (get_opt_cse() && (get_irn_opcode(n) != iro_Block))
- n = identify_remember(current_ir_graph->value_table, n);
+ if (get_opt_cse() && (get_irn_opcode(n) != iro_Block)) {
+ ir_node *o = n;
+ n = identify_remember(current_ir_graph->value_table, o);
+ if (o != n)
+ DBG_OPT_CSE(o, n);
+ }
return n;
} /* optimize_in_place_2 */