#include "irtools.h"
#include "irhooks.h"
#include "array_t.h"
+#include "vrp.h"
+#include "firm_types.h"
/* Make types visible to allow most efficient access */
#include "entity_t.h"
* There are several case where we can evaluate a Cmp node, see later.
*/
static tarval *computed_value_Proj_Cmp(const ir_node *n) {
- ir_node *a = get_Proj_pred(n);
- ir_node *aa = get_Cmp_left(a);
- ir_node *ab = get_Cmp_right(a);
- long proj_nr = get_Proj_proj(n);
+ ir_node *cmp = get_Proj_pred(n);
+ ir_node *left = get_Cmp_left(cmp);
+ ir_node *right = get_Cmp_right(cmp);
+ long pn_cmp = get_Proj_proj(n);
+ ir_mode *mode = get_irn_mode(left);
+ tarval *tv_l, *tv_r;
/*
* BEWARE: a == a is NOT always True for floating Point values, as
* NaN != NaN is defined, so we must check this here.
*/
- if (aa == ab && (
- !mode_is_float(get_irn_mode(aa)) || proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Gt)
- ) { /* 1.: */
-
+ if (left == right && (!mode_is_float(mode) || pn_cmp == pn_Cmp_Lt || pn_cmp == pn_Cmp_Gt)) {
/* This is a trick with the bits used for encoding the Cmp
Proj numbers, the following statement is not the same:
- return new_tarval_from_long (proj_nr == pn_Cmp_Eq, mode_b) */
- return new_tarval_from_long (proj_nr & pn_Cmp_Eq, mode_b);
+ return new_tarval_from_long(pn_cmp == pn_Cmp_Eq, mode_b) */
+ return new_tarval_from_long(pn_cmp & pn_Cmp_Eq, mode_b);
}
- else {
- tarval *taa = value_of(aa);
- tarval *tab = value_of(ab);
- ir_mode *mode = get_irn_mode(aa);
+ tv_l = value_of(left);
+ tv_r = value_of(right);
+ if ((tv_l != tarval_bad) && (tv_r != tarval_bad)) {
/*
* The predecessors of Cmp are target values. We can evaluate
* the Cmp.
*/
- if ((taa != tarval_bad) && (tab != tarval_bad)) {
- /* strange checks... */
- pn_Cmp flags = tarval_cmp(taa, tab);
- if (flags != pn_Cmp_False) {
- return new_tarval_from_long (proj_nr & flags, mode_b);
- }
+ pn_Cmp flags = tarval_cmp(tv_l, tv_r);
+ if (flags != pn_Cmp_False) {
+ return new_tarval_from_long (pn_cmp & flags, mode_b);
}
+ } else if (mode_is_int(mode)) {
/* for integer values, we can check against MIN/MAX */
- else if (mode_is_int(mode)) {
+ pn_Cmp cmp_result;
+
+ if (tv_l == get_mode_min(mode)) {
/* MIN <=/> x. This results in true/false. */
- if (taa == get_mode_min(mode)) {
- /* a compare with the MIN value */
- if (proj_nr == pn_Cmp_Le)
- return get_tarval_b_true();
- else if (proj_nr == pn_Cmp_Gt)
- return get_tarval_b_false();
- }
+ if (pn_cmp == pn_Cmp_Le)
+ return tarval_b_true;
+ else if (pn_cmp == pn_Cmp_Gt)
+ return tarval_b_false;
+ } else if (tv_r == get_mode_min(mode)) {
/* x >=/< MIN. This results in true/false. */
- else
- if (tab == get_mode_min(mode)) {
- /* a compare with the MIN value */
- if (proj_nr == pn_Cmp_Ge)
- return get_tarval_b_true();
- else if (proj_nr == pn_Cmp_Lt)
- return get_tarval_b_false();
- }
- /* MAX >=/< x. This results in true/false. */
- else if (taa == get_mode_max(mode)) {
- if (proj_nr == pn_Cmp_Ge)
- return get_tarval_b_true();
- else if (proj_nr == pn_Cmp_Lt)
- return get_tarval_b_false();
- }
- /* x <=/> MAX. This results in true/false. */
- else if (tab == get_mode_max(mode)) {
- if (proj_nr == pn_Cmp_Le)
- return get_tarval_b_true();
- else if (proj_nr == pn_Cmp_Gt)
- return get_tarval_b_false();
+ if (pn_cmp == pn_Cmp_Ge)
+ return tarval_b_true;
+ else if (pn_cmp == pn_Cmp_Lt)
+ return tarval_b_false;
+ } else if (tv_l == get_mode_max(mode)) {
+ /* MAX >=/< x. This results in true/false. */
+ if (pn_cmp == pn_Cmp_Ge)
+ return tarval_b_true;
+ else if (pn_cmp == pn_Cmp_Lt)
+ return tarval_b_false;
+ } else if (tv_r == get_mode_max(mode)) {
+ /* x <=/> MAX. This results in true/false. */
+ if (pn_cmp == pn_Cmp_Le)
+ return tarval_b_true;
+ else if (pn_cmp == pn_Cmp_Gt)
+ return tarval_b_false;
+ }
+
+ cmp_result = vrp_cmp(left, right);
+ if (cmp_result != pn_Cmp_False) {
+ if (cmp_result == pn_Cmp_Lg) {
+ if (pn_cmp == pn_Cmp_Eq) {
+ return tarval_b_false;
+ } else if (pn_cmp == pn_Cmp_Lg) {
+ return tarval_b_true;
}
+ } else {
+ return new_tarval_from_long(cmp_result & pn_cmp, mode_b);
+ }
}
- /*
- * The predecessors are Allocs or (void*)(0) constants. Allocs never
- * return NULL, they raise an exception. Therefore we can predict
- * the Cmp result.
- */
- else {
- ir_node *aaa = skip_Proj(aa);
- ir_node *aba = skip_Proj(ab);
-
- if ( ( (/* aa is ProjP and aaa is Alloc */
- is_Proj(aa)
- && mode_is_reference(get_irn_mode(aa))
- && is_Alloc(aaa))
- && ( (/* ab is NULL */
- mode_is_reference(get_irn_mode(ab))
- && tarval_is_null(tab))
- || (/* ab is other Alloc */
- is_Proj(ab)
- && mode_is_reference(get_irn_mode(ab))
- && is_Alloc(aba)
- && (aaa != aba))))
- || (/* aa is NULL and aba is Alloc */
- mode_is_reference(get_irn_mode(aa))
- && tarval_is_null(taa)
- && is_Proj(ab)
- && mode_is_reference(get_irn_mode(ab))
- && is_Alloc(aba)))
- /* 3.: */
- return new_tarval_from_long(proj_nr & pn_Cmp_Lg, mode_b);
- }
- }
- return computed_value_Cmp_Confirm(a, aa, ab, proj_nr);
+ } else if (mode_is_reference(mode)) {
+ /* pointer compare */
+ ir_node *s_l = skip_Proj(left);
+ ir_node *s_r = skip_Proj(right);
+
+ if ((is_Alloc(s_l) && tarval_is_null(tv_r)) ||
+ (tarval_is_null(tv_l) && is_Alloc(s_r))) {
+ /*
+ * The predecessors are Allocs and (void*)(0) constants. In Firm Allocs never
+ * return NULL, they raise an exception. Therefore we can predict
+ * the Cmp result.
+ */
+ return new_tarval_from_long(pn_cmp & pn_Cmp_Lg, mode_b);
+ }
+ }
+ return computed_value_Cmp_Confirm(cmp, left, right, pn_cmp);
} /* computed_value_Proj_Cmp */
/**
* @param n The node this should be evaluated
*/
tarval *computed_value(const ir_node *n) {
+ if(mode_is_int(get_irn_mode(n)) && n->vrp.valid && tarval_is_all_one(
+ tarval_or(n->vrp.bits_set, n->vrp.bits_not_set))) {
+ return n->vrp.bits_set;
+ }
if (n->op->ops.computed_value)
return n->op->ops.computed_value(n);
return tarval_bad;
}
}
}
+ if (mode_is_int(mode) && a->vrp.valid && b->vrp.valid) {
+ tarval *c = tarval_and(
+ tarval_not(a->vrp.bits_not_set),
+ tarval_not(b->vrp.bits_not_set)
+ );
+
+ if(tarval_is_null(c)) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ return new_rd_Or(dbgi, get_nodes_block(n),
+ a, b, mode);
+ }
+ }
return n;
} /* transform_node_Add */
return n;
}
+ if (is_Const(a) && b->vrp.valid && (tarval_is_all_one(tarval_or(get_Const_tarval(a),
+ b->vrp.bits_not_set)))) {
+ return new_rd_Id(get_irn_dbg_info(n), get_nodes_block(n),
+ b, get_irn_mode(n));
+
+ }
+
+ if (is_Const(b) && a->vrp.valid && (tarval_is_all_one(tarval_or(get_Const_tarval(b),
+ a->vrp.bits_not_set)))) {
+ return new_rd_Id(get_irn_dbg_info(n), get_nodes_block(n),
+ a, get_irn_mode(n));
+
+ }
+
n = transform_bitwise_distributive(n, transform_node_And);
return n;
n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph,
get_mode_null(mode));
DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_A_A);
- } else if (mode == mode_b &&
- is_Proj(a) &&
- is_Const(b) && is_Const_one(b) &&
- is_Cmp(get_Proj_pred(a))) {
- /* The Eor negates a Cmp. The Cmp has the negated result anyways! */
- n = new_r_Proj(get_nodes_block(n), get_Proj_pred(a),
- mode_b, get_negated_pnc(get_Proj_proj(a), mode));
-
- DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT_BOOL);
} else if (is_Const(b)) {
if (is_Not(a)) { /* ~x ^ const -> x ^ ~const */
ir_node *cnst = new_Const(tarval_not(get_Const_tarval(b)));
/* this case will NEVER be taken, kill it */
return get_irg_bad(current_ir_graph);
}
+ }
+ } else {
+ long num = get_Proj_proj(proj);
+ if (num != get_Cond_default_proj(n) && b->vrp.valid) {
+ /* Try handling with vrp data. We only remove dead parts. */
+ tarval *tp = new_tarval_from_long(num, get_irn_mode(b));
+
+ if (b->vrp.range_type == VRP_RANGE) {
+ pn_Cmp cmp_result = tarval_cmp(b->vrp.range_bottom, tp);
+ pn_Cmp cmp_result2 = tarval_cmp(b->vrp.range_top, tp);
+
+ if ((cmp_result & pn_Cmp_Lt) == cmp_result && (cmp_result2
+ & pn_Cmp_Gt) == cmp_result2) {
+ return get_irg_bad(current_ir_graph);
+ }
+ } else if (b->vrp.range_type == VRP_ANTIRANGE) {
+ pn_Cmp cmp_result = tarval_cmp(b->vrp.range_bottom, tp);
+ pn_Cmp cmp_result2 = tarval_cmp(b->vrp.range_top, tp);
+
+ if ((cmp_result & pn_Cmp_Ge) == cmp_result && (cmp_result2
+ & pn_Cmp_Le) == cmp_result2) {
+ return get_irg_bad(current_ir_graph);
+ }
+ }
+
+ if (!(tarval_cmp(
+ tarval_and( b->vrp.bits_set, tp),
+ b->vrp.bits_set
+ ) == pn_Cmp_Eq)) {
+
+ return get_irg_bad(current_ir_graph);
+ }
+
+ if (!(tarval_cmp(
+ tarval_and(
+ tarval_not(tp),
+ b->vrp.bits_not_set),
+ b->vrp.bits_not_set)
+ == pn_Cmp_Eq)) {
+
+ return get_irg_bad(current_ir_graph);
+ }
+
+
}
}
}
/*
* UpConv(x) REL 0 ==> x REL 0
+ * Don't do this for float values as it's unclear whether it is a
+ * win. (on the other side it makes detection/creation of fabs hard)
*/
if (get_mode_size_bits(mode) > get_mode_size_bits(op_mode) &&
((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) ||
- mode_is_signed(mode) || !mode_is_signed(op_mode))) {
+ mode_is_signed(mode) || !mode_is_signed(op_mode)) &&
+ !mode_is_float(mode)) {
tv = get_mode_null(op_mode);
left = op;
mode = op_mode;
ir_node *new_and, *new_const, *block;
ir_mode *mode = get_irn_mode(or);
- tarval *tv1, *tv2, *tv3, *tv4, *tv, *n_tv4, *n_tv2;
+ tarval *tv1, *tv2, *tv3, *tv4, *tv;
while (1) {
get_comm_Binop_Ops(or, &and, &c1);
return or;
}
- n_tv4 = tarval_not(tv4);
- if (tv3 != tarval_and(tv3, n_tv4)) {
+ if (tv3 != tarval_andnot(tv3, tv4)) {
/* bit in the or_mask is outside the and_mask */
return or;
}
- n_tv2 = tarval_not(tv2);
- if (tv1 != tarval_and(tv1, n_tv2)) {
+ if (tv1 != tarval_andnot(tv1, tv2)) {
/* bit in the or_mask is outside the and_mask */
return or;
}
return n;
} /* transform_node_End */
-/** returns 1 if a == -b */
-static int is_negated_value(ir_node *a, ir_node *b) {
+bool is_negated_value(ir_node *a, ir_node *b)
+{
if (is_Minus(a) && get_Minus_op(a) == b)
- return 1;
+ return true;
if (is_Minus(b) && get_Minus_op(b) == a)
- return 1;
+ return true;
if (is_Sub(a) && is_Sub(b)) {
ir_node *a_left = get_Sub_left(a);
ir_node *a_right = get_Sub_right(a);
ir_node *b_right = get_Sub_right(b);
if (a_left == b_right && a_right == b_left)
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/**
if (!mode_honor_signed_zeros(mode) && is_negated_value(f, t)) {
/* f = -t */
- if ( (cmp_l == t && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt))
- || (cmp_l == f && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt)))
+ /* NaN's work fine with abs, so it is ok to remove Uo */
+ long pnc = pn & ~pn_Cmp_Uo;
+
+ if ( (cmp_l == t && (pnc == pn_Cmp_Ge || pnc == pn_Cmp_Gt))
+ || (cmp_l == f && (pnc == pn_Cmp_Le || pnc == pn_Cmp_Lt)))
{
/* Mux(a >/>= 0, a, -a) = Mux(a </<= 0, -a, a) ==> Abs(a) */
n = new_rd_Abs(get_irn_dbg_info(n), block, cmp_l, mode);
DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
return n;
- } else if ((cmp_l == t && (pn == pn_Cmp_Le || pn == pn_Cmp_Lt))
- || (cmp_l == f && (pn == pn_Cmp_Ge || pn == pn_Cmp_Gt)))
+ } else if ((cmp_l == t && (pnc == pn_Cmp_Le || pnc == pn_Cmp_Lt))
+ || (cmp_l == f && (pnc == pn_Cmp_Ge || pnc == pn_Cmp_Gt)))
{
/* Mux(a </<= 0, a, -a) = Mux(a >/>= 0, -a, a) ==> -Abs(a) */
n = new_rd_Abs(get_irn_dbg_info(n), block, cmp_l, mode);
ir_node *callee = get_Call_ptr(call);
ir_node *adr, *mem, *res, *bl, **in;
ir_type *ctp, *mtp, *tp;
- ident *id;
+ type_dbg_info *tdb;
dbg_info *db;
int i, n_res, n_param;
ir_variadicity var;
/* build a new call type */
mtp = get_Call_type(call);
- id = get_type_ident(mtp);
- id = id_mangle(new_id_from_chars("T_", 2), id);
- db = get_type_dbg_info(mtp);
+ tdb = get_type_dbg_info(mtp);
n_res = get_method_n_ress(mtp);
n_param = get_method_n_params(mtp);
- ctp = new_d_type_method(id, n_param + 1, n_res, db);
+ ctp = new_d_type_method(n_param + 1, n_res, tdb);
for (i = 0; i < n_res; ++i)
set_method_res_type(ctp, i, get_method_res_type(mtp, i));
/* FIXME: we don't need a new pointer type in every step */
tp = get_irg_frame_type(current_ir_graph);
- id = id_mangle(get_type_ident(tp), new_id_from_chars("_ptr", 4));
- tp = new_type_pointer(id, tp, mode_P_data);
+ tp = new_type_pointer(tp);
set_method_param_type(ctp, 0, tp);
in[0] = get_Builtin_param(callee, 2);
}
/* compare a->in[0..ins] with b->in[0..ins] */
- for (i = 0; i < irn_arity_a; i++)
- if (get_irn_intra_n(a, i) != get_irn_intra_n(b, i))
- return 1;
+ for (i = 0; i < irn_arity_a; ++i) {
+ ir_node *pred_a = get_irn_intra_n(a, i);
+ ir_node *pred_b = get_irn_intra_n(b, i);
+ if (pred_a != pred_b) {
+ /* if both predecessors are CSE neutral they might be different */
+ if (!is_irn_cse_neutral(pred_a) || !is_irn_cse_neutral(pred_b))
+ return 1;
+ }
+ }
/*
* here, we already now that the nodes are identical except their
* node could be found
*/
ir_node *identify_remember(pset *value_table, ir_node *n) {
- ir_node *o = NULL;
+ ir_node *nn = NULL;
if (!value_table) return n;
ir_normalize_node(n);
/* lookup or insert in hash table with given hash key. */
- o = pset_insert(value_table, n, ir_node_hash(n));
+ nn = pset_insert(value_table, n, ir_node_hash(n));
+
+ if (nn != n) {
+ update_known_irn(nn, n);
- if (o != n) {
- update_known_irn(o, n);
+ /* n is reachable again */
+ edges_node_revival(nn, get_irn_irg(nn));
}
- return o;
+ return nn;
} /* identify_remember */
/**