#include "irtools.h"
#include "irhooks.h"
#include "array_t.h"
+#include "vrp.h"
+#include "firm_types.h"
/* Make types visible to allow most efficient access */
#include "entity_t.h"
return new_tarval_from_long (proj_nr == pn_Cmp_Eq, mode_b) */
return new_tarval_from_long (proj_nr & pn_Cmp_Eq, mode_b);
}
- else {
- tarval *taa = value_of(aa);
- tarval *tab = value_of(ab);
- ir_mode *mode = get_irn_mode(aa);
+ tarval *taa = value_of(aa);
+ tarval *tab = value_of(ab);
+ ir_mode *mode = get_irn_mode(aa);
- /*
- * The predecessors of Cmp are target values. We can evaluate
- * the Cmp.
- */
- if ((taa != tarval_bad) && (tab != tarval_bad)) {
- /* strange checks... */
- pn_Cmp flags = tarval_cmp(taa, tab);
- if (flags != pn_Cmp_False) {
- return new_tarval_from_long (proj_nr & flags, mode_b);
- }
- }
- /* for integer values, we can check against MIN/MAX */
- else if (mode_is_int(mode)) {
- /* MIN <=/> x. This results in true/false. */
- if (taa == get_mode_min(mode)) {
+ /*
+ * The predecessors of Cmp are target values. We can evaluate
+ * the Cmp.
+ */
+ if ((taa != tarval_bad) && (tab != tarval_bad)) {
+ /* strange checks... */
+ pn_Cmp flags = tarval_cmp(taa, tab);
+ if (flags != pn_Cmp_False) {
+ return new_tarval_from_long (proj_nr & flags, mode_b);
+ }
+ }
+ /* for integer values, we can check against MIN/MAX */
+ else if (mode_is_int(mode)) {
+ /* MIN <=/> x. This results in true/false. */
+ if (taa == get_mode_min(mode)) {
+ /* a compare with the MIN value */
+ if (proj_nr == pn_Cmp_Le)
+ return get_tarval_b_true();
+ else if (proj_nr == pn_Cmp_Gt)
+ return get_tarval_b_false();
+ }
+ /* x >=/< MIN. This results in true/false. */
+ else
+ if (tab == get_mode_min(mode)) {
/* a compare with the MIN value */
+ if (proj_nr == pn_Cmp_Ge)
+ return get_tarval_b_true();
+ else if (proj_nr == pn_Cmp_Lt)
+ return get_tarval_b_false();
+ }
+ /* MAX >=/< x. This results in true/false. */
+ else if (taa == get_mode_max(mode)) {
+ if (proj_nr == pn_Cmp_Ge)
+ return get_tarval_b_true();
+ else if (proj_nr == pn_Cmp_Lt)
+ return get_tarval_b_false();
+ }
+ /* x <=/> MAX. This results in true/false. */
+ else if (tab == get_mode_max(mode)) {
if (proj_nr == pn_Cmp_Le)
return get_tarval_b_true();
else if (proj_nr == pn_Cmp_Gt)
return get_tarval_b_false();
}
- /* x >=/< MIN. This results in true/false. */
- else
- if (tab == get_mode_min(mode)) {
- /* a compare with the MIN value */
- if (proj_nr == pn_Cmp_Ge)
- return get_tarval_b_true();
- else if (proj_nr == pn_Cmp_Lt)
- return get_tarval_b_false();
- }
- /* MAX >=/< x. This results in true/false. */
- else if (taa == get_mode_max(mode)) {
- if (proj_nr == pn_Cmp_Ge)
- return get_tarval_b_true();
- else if (proj_nr == pn_Cmp_Lt)
- return get_tarval_b_false();
- }
- /* x <=/> MAX. This results in true/false. */
- else if (tab == get_mode_max(mode)) {
- if (proj_nr == pn_Cmp_Le)
- return get_tarval_b_true();
- else if (proj_nr == pn_Cmp_Gt)
- return get_tarval_b_false();
- }
+
+ pn_Cmp cmp_result = vrp_cmp(aa, ab);
+ if (cmp_result != pn_Cmp_False) {
+ return new_tarval_from_long(cmp_result & proj_nr, mode_b);
}
- /*
- * The predecessors are Allocs or (void*)(0) constants. Allocs never
- * return NULL, they raise an exception. Therefore we can predict
- * the Cmp result.
- */
- else {
- ir_node *aaa = skip_Proj(aa);
- ir_node *aba = skip_Proj(ab);
-
- if ( ( (/* aa is ProjP and aaa is Alloc */
- is_Proj(aa)
- && mode_is_reference(get_irn_mode(aa))
- && is_Alloc(aaa))
- && ( (/* ab is NULL */
- mode_is_reference(get_irn_mode(ab))
- && tarval_is_null(tab))
- || (/* ab is other Alloc */
- is_Proj(ab)
- && mode_is_reference(get_irn_mode(ab))
- && is_Alloc(aba)
- && (aaa != aba))))
- || (/* aa is NULL and aba is Alloc */
- mode_is_reference(get_irn_mode(aa))
- && tarval_is_null(taa)
- && is_Proj(ab)
- && mode_is_reference(get_irn_mode(ab))
- && is_Alloc(aba)))
- /* 3.: */
+ }
+ /*
+ * The predecessors are Allocs or (void*)(0) constants. Allocs never
+ * return NULL, they raise an exception. Therefore we can predict
+ * the Cmp result.
+ */
+ else {
+ ir_node *aaa = skip_Proj(aa);
+ ir_node *aba = skip_Proj(ab);
+
+ if ( ( (/* aa is ProjP and aaa is Alloc */
+ is_Proj(aa)
+ && mode_is_reference(get_irn_mode(aa))
+ && is_Alloc(aaa))
+ && ( (/* ab is NULL */
+ mode_is_reference(get_irn_mode(ab))
+ && tarval_is_null(tab))
+ || (/* ab is other Alloc */
+ is_Proj(ab)
+ && mode_is_reference(get_irn_mode(ab))
+ && is_Alloc(aba)
+ && (aaa != aba))))
+ || (/* aa is NULL and aba is Alloc */
+ mode_is_reference(get_irn_mode(aa))
+ && tarval_is_null(taa)
+ && is_Proj(ab)
+ && mode_is_reference(get_irn_mode(ab))
+ && is_Alloc(aba)))
+ /* 3.: */
return new_tarval_from_long(proj_nr & pn_Cmp_Lg, mode_b);
- }
}
return computed_value_Cmp_Confirm(a, aa, ab, proj_nr);
} /* computed_value_Proj_Cmp */
* @param n The node this should be evaluated
*/
tarval *computed_value(const ir_node *n) {
+ if(mode_is_int(get_irn_mode(n)) && tarval_is_all_one(
+ tarval_or(n->vrp.bits_set, n->vrp.bits_not_set))) {
+ return n->vrp.bits_set;
+ }
if (n->op->ops.computed_value)
return n->op->ops.computed_value(n);
return tarval_bad;
DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
break;
- case pn_CopyB_M_except:
case pn_CopyB_X_except:
DBG_OPT_EXC_REM(proj);
proj = get_irg_bad(current_ir_graph);
typedef tarval *(*tarval_sub_type)(tarval *a, tarval *b, ir_mode *mode);
typedef tarval *(*tarval_binop_type)(tarval *a, tarval *b);
+/**
+ * in reality eval_func should be tarval (*eval_func)() but incomplete
+ * declarations are bad style and generate noisy warnings
+ */
+typedef void (*eval_func)(void);
+
/**
* Wrapper for the tarval binop evaluation, tarval_sub has one more parameter.
*/
-static tarval *do_eval(tarval *(*eval)(), tarval *a, tarval *b, ir_mode *mode) {
- if (eval == tarval_sub) {
+static tarval *do_eval(eval_func eval, tarval *a, tarval *b, ir_mode *mode)
+{
+ if (eval == (eval_func) tarval_sub) {
tarval_sub_type func = (tarval_sub_type)eval;
return func(a, b, mode);
*
* @return a new Phi node if the conversion was successful, NULL else
*/
-static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, tarval *(*eval)(), ir_mode *mode, int left) {
+static ir_node *apply_binop_on_phi(ir_node *phi, tarval *other, eval_func eval, ir_mode *mode, int left) {
tarval *tv;
void **res;
ir_node *pred;
*
* @return a new Phi node if the conversion was successful, NULL else
*/
-static ir_node *apply_binop_on_2_phis(ir_node *a, ir_node *b, tarval *(*eval)(), ir_mode *mode) {
+static ir_node *apply_binop_on_2_phis(ir_node *a, ir_node *b, eval_func eval, ir_mode *mode) {
tarval *tv_l, *tv_r, *tv;
void **res;
ir_node *pred;
}
}
- HANDLE_BINOP_PHI(tarval_add, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_add, a, b, c, mode);
/* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
}
}
}
+ if (mode_is_int(mode)) {
+ tarval *c = tarval_and(
+ tarval_not(a->vrp.bits_not_set),
+ tarval_not(b->vrp.bits_not_set)
+ );
+
+ if(tarval_is_null(c)) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ return new_rd_Or(dbgi, get_nodes_block(n),
+ a, b, mode);
+ }
+ }
return n;
} /* transform_node_Add */
}
restart:
- HANDLE_BINOP_PHI(tarval_sub, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_sub, a, b, c, mode);
/* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
if (mode != get_irn_mode(a))
return transform_node_Mul2n(n, mode);
- HANDLE_BINOP_PHI(tarval_mul, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_mul, a, b, c, mode);
if (mode_is_signed(mode)) {
ir_node *r = NULL;
if (is_Const(b) && is_const_Phi(a)) {
/* check for Div(Phi, Const) */
- value = apply_binop_on_phi(a, get_Const_tarval(b), tarval_div, mode, 0);
+ value = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_div, mode, 0);
if (value) {
DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
goto make_tuple;
}
else if (is_Const(a) && is_const_Phi(b)) {
/* check for Div(Const, Phi) */
- value = apply_binop_on_phi(b, get_Const_tarval(a), tarval_div, mode, 1);
+ value = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_div, mode, 1);
if (value) {
DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
goto make_tuple;
}
else if (is_const_Phi(a) && is_const_Phi(b)) {
/* check for Div(Phi, Phi) */
- value = apply_binop_on_2_phis(a, b, tarval_div, mode);
+ value = apply_binop_on_2_phis(a, b, (eval_func) tarval_div, mode);
if (value) {
DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
goto make_tuple;
if (is_Const(b) && is_const_Phi(a)) {
/* check for Div(Phi, Const) */
- value = apply_binop_on_phi(a, get_Const_tarval(b), tarval_mod, mode, 0);
+ value = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_mod, mode, 0);
if (value) {
DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
goto make_tuple;
}
else if (is_Const(a) && is_const_Phi(b)) {
/* check for Div(Const, Phi) */
- value = apply_binop_on_phi(b, get_Const_tarval(a), tarval_mod, mode, 1);
+ value = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_mod, mode, 1);
if (value) {
DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
goto make_tuple;
}
else if (is_const_Phi(a) && is_const_Phi(b)) {
/* check for Div(Phi, Phi) */
- value = apply_binop_on_2_phis(a, b, tarval_mod, mode);
+ value = apply_binop_on_2_phis(a, b, (eval_func) tarval_mod, mode);
if (value) {
DBG_OPT_ALGSIM0(n, value, FS_OPT_CONST_PHI);
goto make_tuple;
if (is_Const(b) && is_const_Phi(a)) {
/* check for Div(Phi, Const) */
- va = apply_binop_on_phi(a, get_Const_tarval(b), tarval_div, mode, 0);
- vb = apply_binop_on_phi(a, get_Const_tarval(b), tarval_mod, mode, 0);
+ va = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_div, mode, 0);
+ vb = apply_binop_on_phi(a, get_Const_tarval(b), (eval_func) tarval_mod, mode, 0);
if (va && vb) {
DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI);
DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI);
}
else if (is_Const(a) && is_const_Phi(b)) {
/* check for Div(Const, Phi) */
- va = apply_binop_on_phi(b, get_Const_tarval(a), tarval_div, mode, 1);
- vb = apply_binop_on_phi(b, get_Const_tarval(a), tarval_mod, mode, 1);
+ va = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_div, mode, 1);
+ vb = apply_binop_on_phi(b, get_Const_tarval(a), (eval_func) tarval_mod, mode, 1);
if (va && vb) {
DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI);
DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI);
}
else if (is_const_Phi(a) && is_const_Phi(b)) {
/* check for Div(Phi, Phi) */
- va = apply_binop_on_2_phis(a, b, tarval_div, mode);
- vb = apply_binop_on_2_phis(a, b, tarval_mod, mode);
+ va = apply_binop_on_2_phis(a, b, (eval_func) tarval_div, mode);
+ vb = apply_binop_on_2_phis(a, b, (eval_func) tarval_mod, mode);
if (va && vb) {
DBG_OPT_ALGSIM0(n, va, FS_OPT_CONST_PHI);
DBG_OPT_ALGSIM0(n, vb, FS_OPT_CONST_PHI);
ir_mode *mode;
mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_and, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_and, a, b, c, mode);
/* we can evaluate 2 Projs of the same Cmp */
if (mode == mode_b && is_Proj(a) && is_Proj(b)) {
return n;
}
+ if (is_Const(a) && (tarval_is_all_one(tarval_or(get_Const_tarval(a),
+ b->vrp.bits_not_set)))) {
+ return new_rd_Id(get_irn_dbg_info(n), get_nodes_block(n),
+ b, get_irn_mode(n));
+
+ }
+
+ if (is_Const(b) && (tarval_is_all_one(tarval_or(get_Const_tarval(b),
+ a->vrp.bits_not_set)))) {
+ return new_rd_Id(get_irn_dbg_info(n), get_nodes_block(n),
+ a, get_irn_mode(n));
+
+ }
+
n = transform_bitwise_distributive(n, transform_node_And);
return n;
ir_node *b = get_Eor_right(n);
ir_mode *mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_eor, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_eor, a, b, c, mode);
/* we can evaluate 2 Projs of the same Cmp */
if (mode == mode_b && is_Proj(a) && is_Proj(b)) {
n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph,
get_mode_null(mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_A_A);
- } else if (mode == mode_b &&
- is_Proj(a) &&
- is_Const(b) && is_Const_one(b) &&
- is_Cmp(get_Proj_pred(a))) {
- /* The Eor negates a Cmp. The Cmp has the negated result anyways! */
- n = new_r_Proj(get_nodes_block(n), get_Proj_pred(a),
- mode_b, get_negated_pnc(get_Proj_proj(a), mode));
-
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT_BOOL);
} else if (is_Const(b)) {
if (is_Not(a)) { /* ~x ^ const -> x ^ ~const */
ir_node *cnst = new_Const(tarval_not(get_Const_tarval(b)));
/* this case will NEVER be taken, kill it */
return get_irg_bad(current_ir_graph);
}
+ }
+ } else {
+ long num = get_Proj_proj(proj);
+ if (num != get_Cond_default_proj(n)) {
+ /* Try handling with vrp data. We only remove dead parts. */
+ tarval *tp = new_tarval_from_long(num, get_irn_mode(b));
+
+ if (b->vrp.range_type == VRP_RANGE) {
+ pn_Cmp cmp_result = tarval_cmp(b->vrp.range_bottom, tp);
+ pn_Cmp cmp_result2 = tarval_cmp(b->vrp.range_top, tp);
+
+ if ((cmp_result & pn_Cmp_Lt) == cmp_result && (cmp_result2
+ & pn_Cmp_Gt) == cmp_result2) {
+ return get_irg_bad(current_ir_graph);
+ }
+ } else if (b->vrp.range_type == VRP_ANTIRANGE) {
+ pn_Cmp cmp_result = tarval_cmp(b->vrp.range_bottom, tp);
+ pn_Cmp cmp_result2 = tarval_cmp(b->vrp.range_top, tp);
+
+ if ((cmp_result & pn_Cmp_Ge) == cmp_result && (cmp_result2
+ & pn_Cmp_Le) == cmp_result2) {
+ return get_irg_bad(current_ir_graph);
+ }
+ }
+
+ if (!(tarval_cmp(
+ tarval_and( b->vrp.bits_set, tp),
+ b->vrp.bits_set
+ ) == pn_Cmp_Eq)) {
+
+ return get_irg_bad(current_ir_graph);
+ }
+
+ if (!(tarval_cmp(
+ tarval_and(
+ tarval_not(tp),
+ b->vrp.bits_not_set),
+ b->vrp.bits_not_set)
+ == pn_Cmp_Eq)) {
+
+ return get_irg_bad(current_ir_graph);
+ }
+
+
}
}
}
/*
* UpConv(x) REL 0 ==> x REL 0
+ * Don't do this for float values as it's unclear whether it is a
+ * win. (on the other side it makes detection/creation of fabs hard)
*/
if (get_mode_size_bits(mode) > get_mode_size_bits(op_mode) &&
((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) ||
- mode_is_signed(mode) || !mode_is_signed(op_mode))) {
+ mode_is_signed(mode) || !mode_is_signed(op_mode)) &&
+ !mode_is_float(mode)) {
tv = get_mode_null(op_mode);
left = op;
mode = op_mode;
DBG_OPT_EXC_REM(proj);
proj = new_r_Jmp(get_nodes_block(copyb));
break;
- case pn_CopyB_M_except:
case pn_CopyB_X_except:
DBG_OPT_EXC_REM(proj);
proj = get_irg_bad(get_irn_irg(proj));
ir_node *new_and, *new_const, *block;
ir_mode *mode = get_irn_mode(or);
- tarval *tv1, *tv2, *tv3, *tv4, *tv, *n_tv4, *n_tv2;
+ tarval *tv1, *tv2, *tv3, *tv4, *tv;
while (1) {
get_comm_Binop_Ops(or, &and, &c1);
return or;
}
- n_tv4 = tarval_not(tv4);
- if (tv3 != tarval_and(tv3, n_tv4)) {
+ if (tv3 != tarval_andnot(tv3, tv4)) {
/* bit in the or_mask is outside the and_mask */
return or;
}
- n_tv2 = tarval_not(tv2);
- if (tv1 != tarval_and(tv1, n_tv2)) {
+ if (tv1 != tarval_andnot(tv1, tv2)) {
/* bit in the or_mask is outside the and_mask */
return or;
}
}
mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_or, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_or, a, b, c, mode);
n = transform_node_Or_bf_store(n);
n = transform_node_Or_Rotl(n);
ir_node *right = get_Shr_right(n);
ir_mode *mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_shr, left, right, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_shr, left, right, c, mode);
n = transform_node_shift(n);
if (is_Shr(n))
ir_node *b = get_Shrs_right(n);
ir_mode *mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_shrs, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_shrs, a, b, c, mode);
n = transform_node_shift(n);
if (is_Shrs(n))
ir_node *b = get_Shl_right(n);
ir_mode *mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_shl, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_shl, a, b, c, mode);
n = transform_node_shift(n);
if (is_Shl(n))
ir_node *b = get_Rotl_right(n);
ir_mode *mode = get_irn_mode(n);
- HANDLE_BINOP_PHI(tarval_rotl, a, b, c, mode);
+ HANDLE_BINOP_PHI((eval_func) tarval_rotl, a, b, c, mode);
n = transform_node_shift(n);
if (is_Rotl(n))
return n;
} /* transform_node_End */
-/** returns 1 if a == -b */
-static int is_negated_value(ir_node *a, ir_node *b) {
+bool is_negated_value(ir_node *a, ir_node *b)
+{
if (is_Minus(a) && get_Minus_op(a) == b)
- return 1;
+ return true;
if (is_Minus(b) && get_Minus_op(b) == a)
- return 1;
+ return true;
if (is_Sub(a) && is_Sub(b)) {
ir_node *a_left = get_Sub_left(a);
ir_node *a_right = get_Sub_right(a);
ir_node *b_right = get_Sub_right(b);
if (a_left == b_right && a_right == b_left)
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/**
if (!mode_honor_signed_zeros(mode) && is_negated_value(f, t)) {
/* f = -t */
- if ( (cmp_l == t && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt))
- || (cmp_l == f && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt)))
+ /* NaN's work fine with abs, so it is ok to remove Uo */
+ long pnc = pn & ~pn_Cmp_Uo;
+
+ if ( (cmp_l == t && (pnc == pn_Cmp_Ge || pnc == pn_Cmp_Gt))
+ || (cmp_l == f && (pnc == pn_Cmp_Le || pnc == pn_Cmp_Lt)))
{
/* Mux(a >/>= 0, a, -a) = Mux(a </<= 0, -a, a) ==> Abs(a) */
n = new_rd_Abs(get_irn_dbg_info(n), block, cmp_l, mode);
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
return n;
- } else if ((cmp_l == t && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt))
- || (cmp_l == f && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt)))
+ } else if ((cmp_l == t && (pnc == pn_Cmp_Le || pnc == pn_Cmp_Lt))
+ || (cmp_l == f && (pnc == pn_Cmp_Ge || pnc == pn_Cmp_Gt)))
{
/* Mux(a </<= 0, a, -a) = Mux(a >/>= 0, -a, a) ==> -Abs(a) */
n = new_rd_Abs(get_irn_dbg_info(n), block, cmp_l, mode);
ir_node *callee = get_Call_ptr(call);
ir_node *adr, *mem, *res, *bl, **in;
ir_type *ctp, *mtp, *tp;
- ident *id;
+ type_dbg_info *tdb;
dbg_info *db;
int i, n_res, n_param;
ir_variadicity var;
/* build a new call type */
mtp = get_Call_type(call);
- id = get_type_ident(mtp);
- id = id_mangle(new_id_from_chars("T_", 2), id);
- db = get_type_dbg_info(mtp);
+ tdb = get_type_dbg_info(mtp);
n_res = get_method_n_ress(mtp);
n_param = get_method_n_params(mtp);
- ctp = new_d_type_method(id, n_param + 1, n_res, db);
+ ctp = new_d_type_method(n_param + 1, n_res, tdb);
for (i = 0; i < n_res; ++i)
set_method_res_type(ctp, i, get_method_res_type(mtp, i));
/* FIXME: we don't need a new pointer type in every step */
tp = get_irg_frame_type(current_ir_graph);
- id = id_mangle(get_type_ident(tp), new_id_from_chars("_ptr", 4));
- tp = new_type_pointer(id, tp, mode_P_data);
+ tp = new_type_pointer(tp);
set_method_param_type(ctp, 0, tp);
in[0] = get_Builtin_param(callee, 2);
}
/* compare a->in[0..ins] with b->in[0..ins] */
- for (i = 0; i < irn_arity_a; i++)
- if (get_irn_intra_n(a, i) != get_irn_intra_n(b, i))
- return 1;
+ for (i = 0; i < irn_arity_a; ++i) {
+ ir_node *pred_a = get_irn_intra_n(a, i);
+ ir_node *pred_b = get_irn_intra_n(b, i);
+ if (pred_a != pred_b) {
+ /* if both predecessors are CSE neutral they might be different */
+ if (!is_irn_cse_neutral(pred_a) || !is_irn_cse_neutral(pred_b))
+ return 1;
+ }
+ }
/*
* here, we already now that the nodes are identical except their
* node could be found
*/
ir_node *identify_remember(pset *value_table, ir_node *n) {
- ir_node *o = NULL;
+ ir_node *nn = NULL;
if (!value_table) return n;
ir_normalize_node(n);
/* lookup or insert in hash table with given hash key. */
- o = pset_insert(value_table, n, ir_node_hash(n));
+ nn = pset_insert(value_table, n, ir_node_hash(n));
+
+ if (nn != n) {
+ update_known_irn(nn, n);
- if (o != n) {
- update_known_irn(o, n);
+ /* n is reachable again */
+ edges_node_revival(nn, get_irn_irg(nn));
}
- return o;
+ return nn;
} /* identify_remember */
/**