+static ir_node *equivalent_node_Phi(ir_node *n)
+{
+ int i, n_preds;
+
+ ir_node *oldn = n;
+ ir_node *block = NULL; /* to shutup gcc */
+ ir_node *first_val = NULL; /* to shutup gcc */
+ ir_node *scnd_val = NULL; /* to shutup gcc */
+
+ if (!get_opt_normalize()) return n;
+
+ n_preds = get_Phi_n_preds(n);
+
+ block = get_nodes_block(n);
+ /* @@@ fliegt 'raus, sollte aber doch immer wahr sein!!!
+ assert(get_irn_arity(block) == n_preds && "phi in wrong block!"); */
+ if ((is_Block_dead(block)) || /* Control dead */
+ (block == current_ir_graph->start_block)) /* There should be no Phi nodes */
+ return new_Bad(); /* in the Start Block. */
+
+ if (n_preds == 0) return n; /* Phi of dead Region without predecessors. */
+
+ /* If the Block has a Bad pred, we also have one. */
+ for (i = 0; i < n_preds; ++i)
+ if (is_Bad(get_Block_cfgpred(block, i)))
+ set_Phi_pred(n, i, new_Bad());
+
+ /* Find first non-self-referencing input */
+ for (i = 0; i < n_preds; ++i) {
+ first_val = get_Phi_pred(n, i);
+ if ( (first_val != n) /* not self pointer */
+#if 1
+ && (! is_Bad(first_val))
+#endif
+ ) { /* value not dead */
+ break; /* then found first value. */
+ }
+ }
+
+ if (i >= n_preds) {
+ /* A totally Bad or self-referencing Phi (we didn't break the above loop) */
+ return new_Bad();
+ }
+
+ scnd_val = NULL;
+
+ /* follow_Id () for rest of inputs, determine if any of these
+ are non-self-referencing */
+ while (++i < n_preds) {
+ scnd_val = get_Phi_pred(n, i);
+ if ( (scnd_val != n)
+ && (scnd_val != first_val)
+#if 1
+ && (! is_Bad(scnd_val))
+#endif
+ ) {
+ break;
+ }
+ }
+
+ if (i >= n_preds) {
+ /* Fold, if no multiple distinct non-self-referencing inputs */
+ n = first_val;
+ DBG_OPT_PHI(oldn, n);
+ } else {
+ /* skip the remaining Ids (done in get_Phi_pred). */
+ /* superfluous, since we walk all to propagate Block's Bads.
+ while (++i < n_preds) get_Phi_pred(n, i); */
+ }
+ return n;
+}
+
+/**
+ * optimize Proj(Tuple) and gigo() for ProjX in Bad block
+ */
+static ir_node *equivalent_node_Proj(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ ir_node *a = get_Proj_pred(n);
+
+ if ( get_irn_op(a) == op_Tuple) {
+ /* Remove the Tuple/Proj combination. */
+ if ( get_Proj_proj(n) <= get_Tuple_n_preds(a) ) {
+ n = get_Tuple_pred(a, get_Proj_proj(n));
+ DBG_OPT_TUPLE(oldn, a, n);
+ } else {
+ assert(0); /* This should not happen! */
+ n = new_Bad();
+ }
+ } else if (get_irn_mode(n) == mode_X) {
+ if (is_Block_dead(get_nodes_block(skip_Proj(n)))) {
+ /* Remove dead control flow -- early gigo(). */
+ n = new_Bad();
+ }
+ }
+ return n;
+}
+
+/**
+ * Remove Id's.
+ */
+static ir_node *equivalent_node_Id(ir_node *n)
+{
+ ir_node *oldn = n;
+
+ do {
+ n = get_Id_pred(n);
+ } while (get_irn_op(n) == op_Id);
+
+ DBG_OPT_ID(oldn, n);
+ return n;
+}
+
+/**
+ * optimize a Mux
+ */
+static ir_node *equivalent_node_Mux(ir_node *n)
+{
+ ir_node *oldn = n, *sel = get_Mux_sel(n);
+ tarval *ts = value_of(sel);
+
+ /* Mux(true, f, t) == t */
+ if (ts == tarval_b_true) {
+ n = get_Mux_true(n);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_C);
+ }
+ /* Mux(false, f, t) == f */
+ else if (ts == tarval_b_false) {
+ n = get_Mux_false(n);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_C);
+ }
+ /* Mux(v, x, x) == x */
+ else if (get_Mux_false(n) == get_Mux_true(n)) {
+ n = get_Mux_true(n);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_EQ);
+ }
+ else if (get_irn_op(sel) == op_Proj && !mode_honor_signed_zeros(get_irn_mode(n))) {
+ ir_node *cmp = get_Proj_pred(sel);
+ long proj_nr = get_Proj_proj(sel);
+ ir_node *b = get_Mux_false(n);
+ ir_node *a = get_Mux_true(n);
+
+ /*
+ * Note: normalization puts the constant on the right site,
+ * so we check only one case.
+ *
+ * Note further that these optimization work even for floating point
+ * with NaN's because -NaN == NaN.
+ * However, if +0 and -0 is handled differently, we cannot use the first one.
+ */
+ if (get_irn_op(cmp) == op_Cmp && get_Cmp_left(cmp) == a) {
+ if (classify_Const(get_Cmp_right(cmp)) == CNST_NULL) {
+ /* Mux(a CMP 0, X, a) */
+ if (get_irn_op(b) == op_Minus && get_Minus_op(b) == a) {
+ /* Mux(a CMP 0, -a, a) */
+ if (proj_nr == pn_Cmp_Eq) {
+ /* Mux(a == 0, -a, a) ==> -a */
+ n = b;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM);
+ }
+ else if (proj_nr == pn_Cmp_Lg || proj_nr == pn_Cmp_Ne) {
+ /* Mux(a != 0, -a, a) ==> a */
+ n = a;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM);
+ }
+ }
+ else if (classify_Const(b) == CNST_NULL) {
+ /* Mux(a CMP 0, 0, a) */
+ if (proj_nr == pn_Cmp_Lg || proj_nr == pn_Cmp_Ne) {
+ /* Mux(a != 0, 0, a) ==> a */
+ n = a;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM);
+ }
+ else if (proj_nr == pn_Cmp_Eq) {
+ /* Mux(a == 0, 0, a) ==> 0 */
+ n = b;
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_MUX_TRANSFORM);
+ }
+ }
+ }
+ }
+ }
+ return n;
+}
+
+/**
+ * Optimize -a CMP -b into b CMP a.
+ * This works only for for modes where unary Minus
+ * cannot Overflow.
+ * Note that two-complement integers can Overflow
+ * so it will NOT work.
+ */
+static ir_node *equivalent_node_Cmp(ir_node *n)
+{
+ ir_node *left = get_Cmp_left(n);
+ ir_node *right = get_Cmp_right(n);
+
+ if (get_irn_op(left) == op_Minus && get_irn_op(right) == op_Minus &&
+ !mode_overflow_on_unary_Minus(get_irn_mode(left))) {
+ left = get_Minus_op(left);
+ right = get_Minus_op(right);
+ set_Cmp_left(n, right);
+ set_Cmp_right(n, left);
+ }
+ return n;
+}
+
+/**
+ * Remove Confirm nodes if setting is on.
+ */
+static ir_node *equivalent_node_Confirm(ir_node *n)
+{
+ if (get_Confirm_cmp(n) == pn_Cmp_Eq) {
+ ir_node *bound = get_Confirm_bound(n);
+ ir_op *op = get_irn_op(bound);
+
+ /*
+ * Optimize a rare case:
+ * Confirm(x, '=', Const) ==> Const
+ */
+ if (op == op_Const || op == op_SymConst)
+ return bound;
+ }
+ return get_opt_remove_Confirm() ? get_Confirm_value(n) : n;
+}
+
+/**
+ * equivalent_node() returns a node equivalent to input n. It skips all nodes that
+ * perform no actual computation, as, e.g., the Id nodes. It does not create
+ * new nodes. It is therefore safe to free n if the node returned is not n.
+ * If a node returns a Tuple we can not just skip it. If the size of the
+ * in array fits, we transform n into a tuple (e.g., Div).
+ */
+ir_node *
+equivalent_node(ir_node *n)
+{
+ if (n->op->equivalent_node)
+ return n->op->equivalent_node(n);
+ return n;
+}
+
+/**
+ * set the default equivalent node operation
+ */
+static ir_op *firm_set_default_equivalent_node(ir_op *op)
+{
+#define CASE(a) \
+ case iro_##a: \
+ op->equivalent_node = equivalent_node_##a; \
+ break
+
+ switch (op->code) {
+ CASE(Block);
+ CASE(Jmp);
+ CASE(Or);
+ CASE(Add);
+ CASE(Eor);
+ CASE(Sub);
+ CASE(Shl);
+ CASE(Shr);
+ CASE(Shrs);
+ CASE(Rot);
+ CASE(Not);
+ CASE(Minus);
+ CASE(Mul);
+ CASE(Div);
+ CASE(DivMod);
+ CASE(And);
+ CASE(Conv);
+ CASE(Cast);
+ CASE(Phi);
+ CASE(Proj);
+ CASE(Id);
+ CASE(Mux);
+ CASE(Cmp);
+ CASE(Confirm);
+ default:
+ op->equivalent_node = NULL;
+ }
+
+ return op;
+#undef CASE
+}
+
+/**
+ * Do node specific optimizations of nodes predecessors.
+ */
+static void
+optimize_preds(ir_node *n) {
+ ir_node *a = NULL, *b = NULL;
+
+ /* get the operands we will work on for simple cases. */
+ if (is_binop(n)) {
+ a = get_binop_left(n);
+ b = get_binop_right(n);
+ } else if (is_unop(n)) {
+ a = get_unop_op(n);
+ }
+
+ switch (get_irn_opcode(n)) {
+
+ case iro_Cmp:
+ /* We don't want Cast as input to Cmp. */
+ if (get_irn_op(a) == op_Cast) {
+ a = get_Cast_op(a);
+ set_Cmp_left(n, a);
+ }
+ if (get_irn_op(b) == op_Cast) {
+ b = get_Cast_op(b);
+ set_Cmp_right(n, b);
+ }
+ break;
+
+ default: break;
+ } /* end switch */
+}
+
+/**
+ * Transform AddP(P, ConvIs(Iu)), AddP(P, ConvIu(Is)) and
+ * SubP(P, ConvIs(Iu)), SubP(P, ConvIu(Is)).
+ * If possible, remove the Conv's.
+ */
+static ir_node *transform_node_AddSub(ir_node *n)
+{
+ ir_mode *mode = get_irn_mode(n);
+
+ if (mode_is_reference(mode)) {
+ ir_node *left = get_binop_left(n);
+ ir_node *right = get_binop_right(n);
+ int ref_bits = get_mode_size_bits(mode);
+
+ if (get_irn_op(left) == op_Conv) {
+ ir_mode *mode = get_irn_mode(left);
+ int bits = get_mode_size_bits(mode);
+
+ if (ref_bits == bits &&
+ mode_is_int(mode) &&
+ get_mode_arithmetic(mode) == irma_twos_complement) {
+ ir_node *pre = get_Conv_op(left);
+ ir_mode *pre_mode = get_irn_mode(pre);
+
+ if (mode_is_int(pre_mode) &&
+ get_mode_size_bits(pre_mode) == bits &&
+ get_mode_arithmetic(pre_mode) == irma_twos_complement) {
+ /* ok, this conv just changes to sign, moreover the calculation
+ * is done with same number of bits as our address mode, so
+ * we can ignore the conv as address calculation can be viewed
+ * as either signed or unsigned
+ */
+ set_binop_left(n, pre);
+ }
+ }
+ }
+
+ if (get_irn_op(right) == op_Conv) {
+ ir_mode *mode = get_irn_mode(right);
+ int bits = get_mode_size_bits(mode);
+
+ if (ref_bits == bits &&
+ mode_is_int(mode) &&
+ get_mode_arithmetic(mode) == irma_twos_complement) {
+ ir_node *pre = get_Conv_op(right);
+ ir_mode *pre_mode = get_irn_mode(pre);
+
+ if (mode_is_int(pre_mode) &&
+ get_mode_size_bits(pre_mode) == bits &&
+ get_mode_arithmetic(pre_mode) == irma_twos_complement) {
+ /* ok, this conv just changes to sign, moreover the calculation
+ * is done with same number of bits as our address mode, so
+ * we can ignore the conv as address calculation can be viewed
+ * as either signed or unsigned
+ */
+ set_binop_right(n, pre);
+ }
+ }
+ }
+ }
+ return n;
+}
+
+/**
+ * Do the AddSub optimization, then Transform Add(a,a) into Mul(a, 2)
+ * if the mode is integer or float.
+ * Transform Add(a,-b) into Sub(a,b).
+ * Reassociation might fold this further.
+ */
+static ir_node *transform_node_Add(ir_node *n)
+{
+ ir_mode *mode;
+ ir_node *oldn = n;
+
+ n = transform_node_AddSub(n);
+
+ mode = get_irn_mode(n);
+ if (mode_is_num(mode)) {
+ ir_node *a = get_Add_left(n);
+
+ if (a == get_Add_right(n)) {
+ ir_node *block = get_irn_n(n, -1);
+
+ n = new_rd_Mul(
+ get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ a,
+ new_r_Const_long(current_ir_graph, block, mode, 2),
+ mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_A);
+ }
+ else {
+ ir_node *b = get_Add_right(n);
+
+ if (get_irn_op(a) == op_Minus) {
+ n = new_rd_Sub(
+ get_irn_dbg_info(n),
+ current_ir_graph,
+ get_irn_n(n, -1),
+ b,
+ get_Minus_op(a),
+ mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B);
+ }
+ else if (get_irn_op(b) == op_Minus) {
+ n = new_rd_Sub(
+ get_irn_dbg_info(n),
+ current_ir_graph,
+ get_irn_n(n, -1),
+ a,
+ get_Minus_op(b),
+ mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_A_MINUS_B);
+ }
+ }
+ }
+ return n;
+}
+
+/**
+ * Do the AddSub optimization, then Transform Sub(0,a) into Minus(a).
+ */
+static ir_node *transform_node_Sub(ir_node *n)
+{
+ ir_mode *mode;
+ ir_node *oldn = n;
+
+ n = transform_node_AddSub(n);
+
+ mode = get_irn_mode(n);
+ if (mode_is_num(mode) && (classify_Const(get_Sub_left(n)) == CNST_NULL)) {
+ n = new_rd_Minus(
+ get_irn_dbg_info(n),
+ current_ir_graph,
+ get_irn_n(n, -1),
+ get_Sub_right(n),
+ mode);
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_0_A);
+ }
+
+ return n;
+}
+
+/**
+ * Transform Mul(a,-1) into -a.
+ * Do architecture dependent optimizations on Mul nodes
+ */
+static ir_node *transform_node_Mul(ir_node *n) {
+ ir_node *oldn = n;
+ ir_mode *mode = get_irn_mode(n);
+
+ if (mode_is_signed(mode)) {
+ ir_node *r = NULL;
+ ir_node *a = get_Mul_left(n);
+ ir_node *b = get_Mul_right(n);
+
+ if (value_of(a) == get_mode_minus_one(mode))
+ r = b;
+ else if (value_of(b) == get_mode_minus_one(mode))
+ r = a;
+ if (r) {
+ n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), r, mode);
+ DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_MUL_MINUS_1);
+ return n;
+ }
+ }
+ return arch_dep_replace_mul_with_shifts(n);
+}
+
+/**
+ * transform a Div Node
+ */
+static ir_node *transform_node_Div(ir_node *n)
+{
+ tarval *tv = value_of(n);
+ ir_node *value = n;
+
+ /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
+
+ if (tv != tarval_bad) {
+ value = new_Const(get_tarval_mode(tv), tv);
+
+ DBG_OPT_CSTEVAL(n, value);
+ }
+ else /* Try architecture dependent optimization */
+ value = arch_dep_replace_div_by_const(n);
+
+ if (value != n) {
+ /* Turn Div into a tuple (mem, bad, value) */
+ ir_node *mem = get_Div_mem(n);
+
+ turn_into_tuple(n, 3);
+ set_Tuple_pred(n, pn_Div_M, mem);
+ set_Tuple_pred(n, pn_Div_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Div_res, value);
+ }
+ return n;
+}
+
+/**
+ * transform a Mod node
+ */
+static ir_node *transform_node_Mod(ir_node *n)
+{
+ tarval *tv = value_of(n);
+ ir_node *value = n;
+
+ /* BEWARE: it is NOT possible to optimize a%a to 0, as this may cause a exception */
+
+ if (tv != tarval_bad) {
+ value = new_Const(get_tarval_mode(tv), tv);
+
+ DBG_OPT_CSTEVAL(n, value);
+ }
+ else /* Try architecture dependent optimization */
+ value = arch_dep_replace_mod_by_const(n);
+
+ if (value != n) {
+ /* Turn Mod into a tuple (mem, bad, value) */
+ ir_node *mem = get_Mod_mem(n);
+
+ turn_into_tuple(n, 3);
+ set_Tuple_pred(n, pn_Mod_M, mem);
+ set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Mod_res, value);
+ }
+ return n;
+}
+
+/**
+ * transform a DivMod node
+ */
+static ir_node *transform_node_DivMod(ir_node *n)
+{
+ int evaluated = 0;
+
+ ir_node *a = get_DivMod_left(n);
+ ir_node *b = get_DivMod_right(n);
+ ir_mode *mode = get_irn_mode(a);
+ tarval *ta = value_of(a);
+ tarval *tb = value_of(b);
+
+ if (!(mode_is_int(mode) && mode_is_int(get_irn_mode(b))))
+ return n;
+
+ /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
+
+ if (tb != tarval_bad) {
+ if (tb == get_mode_one(get_tarval_mode(tb))) {
+ b = new_Const (mode, get_mode_null(mode));
+ evaluated = 1;
+
+ DBG_OPT_CSTEVAL(n, b);
+ }
+ else if (ta != tarval_bad) {
+ tarval *resa, *resb;
+ resa = tarval_div (ta, tb);
+ if (resa == tarval_bad) return n; /* Causes exception!!! Model by replacing through
+ Jmp for X result!? */
+ resb = tarval_mod (ta, tb);
+ if (resb == tarval_bad) return n; /* Causes exception! */
+ a = new_Const (mode, resa);
+ b = new_Const (mode, resb);
+ evaluated = 1;
+
+ DBG_OPT_CSTEVAL(n, a);
+ DBG_OPT_CSTEVAL(n, b);
+ }
+ else { /* Try architecture dependent optimization */
+ arch_dep_replace_divmod_by_const(&a, &b, n);
+ evaluated = a != NULL;
+ }
+ } else if (ta == get_mode_null(mode)) {
+ /* 0 / non-Const = 0 */
+ b = a;
+ evaluated = 1;
+ }
+
+ if (evaluated) { /* replace by tuple */
+ ir_node *mem = get_DivMod_mem(n);
+ turn_into_tuple(n, 4);
+ set_Tuple_pred(n, pn_DivMod_M, mem);
+ set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_DivMod_res_div, a);
+ set_Tuple_pred(n, pn_DivMod_res_mod, b);
+ }
+
+ return n;
+}
+
+/**
+ * Optimize Abs(x) into x if x is Confirmed >= 0
+ * Optimize Abs(x) into -x if x is Confirmed <= 0
+ */
+static ir_node *transform_node_Abs(ir_node *n)
+{
+ ir_node *oldn = n;
+ ir_node *a = get_Abs_op(n);
+ value_classify sign = classify_value_sign(a);
+
+ if (sign == VALUE_NEGATIVE) {
+ ir_mode *mode = get_irn_mode(n);
+
+ /*
+ * We can replace the Abs by -x here.
+ * We even could add a new Confirm here.
+ *
+ * Note that -x would create a new node, so we could
+ * not run it in the equivalent_node() context.
+ */
+ n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph,
+ get_irn_n(n, -1), a, mode);
+
+ DBG_OPT_CONFIRM(oldn, n);
+ }
+ else if (sign == VALUE_POSITIVE) {
+ /* n is positive, Abs is not needed */
+ n = a;
+
+ DBG_OPT_CONFIRM(oldn, n);
+ }
+
+ return n;
+}
+
+/**
+ * transform a Cond node
+ */
+static ir_node *transform_node_Cond(ir_node *n)
+{
+ /* Replace the Cond by a Jmp if it branches on a constant
+ condition. */
+ ir_node *jmp;
+ ir_node *a = get_Cond_selector(n);
+ tarval *ta = value_of(a);
+
+ /* we need block info which is not available in floating irgs */
+ if (get_irg_pinned(current_ir_graph) == op_pin_state_floats)
+ return n;
+
+ if ((ta != tarval_bad) &&
+ (get_irn_mode(a) == mode_b) &&
+ (get_opt_unreachable_code())) {
+ /* It's a boolean Cond, branching on a boolean constant.
+ Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
+ jmp = new_r_Jmp(current_ir_graph, get_nodes_block(n));
+ turn_into_tuple(n, 2);
+ if (ta == tarval_b_true) {
+ set_Tuple_pred(n, pn_Cond_false, new_Bad());
+ set_Tuple_pred(n, pn_Cond_true, jmp);
+ } else {
+ set_Tuple_pred(n, pn_Cond_false, jmp);
+ set_Tuple_pred(n, pn_Cond_true, new_Bad());
+ }
+ /* We might generate an endless loop, so keep it alive. */
+ add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_block(n));
+ } else if ((ta != tarval_bad) &&
+ (get_irn_mode(a) == mode_Iu) &&
+ (get_Cond_kind(n) == dense) &&
+ (get_opt_unreachable_code())) {
+ /* I don't want to allow Tuples smaller than the biggest Proj.
+ Also this tuple might get really big...
+ I generate the Jmp here, and remember it in link. Link is used
+ when optimizing Proj. */
+ set_irn_link(n, new_r_Jmp(current_ir_graph, get_nodes_block(n)));
+ /* We might generate an endless loop, so keep it alive. */
+ add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_block(n));
+ } else if ((get_irn_op(a) == op_Eor)
+ && (get_irn_mode(a) == mode_b)
+ && (classify_tarval(value_of(get_Eor_right(a))) == TV_CLASSIFY_ONE)) {
+ /* The Eor is a negate. Generate a new Cond without the negate,
+ simulate the negate by exchanging the results. */
+ set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_block(n),
+ get_Eor_left(a)));
+ } else if ((get_irn_op(a) == op_Not)
+ && (get_irn_mode(a) == mode_b)) {
+ /* A Not before the Cond. Generate a new Cond without the Not,
+ simulate the Not by exchanging the results. */
+ set_irn_link(n, new_r_Cond(current_ir_graph, get_nodes_block(n),
+ get_Not_op(a)));
+ }
+ return n;
+}
+
+/**
+ * Transform an Eor.
+ */
+static ir_node *transform_node_Eor(ir_node *n)
+{
+ ir_node *oldn = n;
+ ir_node *a = get_Eor_left(n);
+ ir_node *b = get_Eor_right(n);
+ ir_mode *mode = get_irn_mode(n);
+
+ if (a == b) {
+ /* a ^ a = 0 */
+ n = new_rd_Const(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1),
+ mode, get_mode_null(mode));
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_A_A);
+ }
+ else if ((mode == mode_b)
+ && (get_irn_op(a) == op_Proj)
+ && (get_irn_mode(a) == mode_b)
+ && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)
+ && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) {
+ /* The Eor negates a Cmp. The Cmp has the negated result anyways! */
+ n = new_r_Proj(current_ir_graph, get_irn_n(n, -1), get_Proj_pred(a),
+ mode_b, get_negated_pnc(get_Proj_proj(a), mode));
+
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT_BOOL);
+ }
+ else if ((mode == mode_b)
+ && (classify_tarval (value_of(b)) == TV_CLASSIFY_ONE)) {
+ /* The Eor is a Not. Replace it by a Not. */
+ /* ????!!!Extend to bitfield 1111111. */
+ n = new_r_Not(current_ir_graph, get_irn_n(n, -1), a, mode_b);
+
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
+ }
+
+ return n;
+}
+
+/**
+ * Transform a boolean Not.
+ */
+static ir_node *transform_node_Not(ir_node *n)
+{
+ ir_node *oldn = n;
+ ir_node *a = get_Not_op(n);
+
+ if ( (get_irn_mode(n) == mode_b)
+ && (get_irn_op(a) == op_Proj)
+ && (get_irn_mode(a) == mode_b)
+ && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) {
+ /* We negate a Cmp. The Cmp has the negated result anyways! */
+ n = new_r_Proj(current_ir_graph, get_irn_n(n, -1), get_Proj_pred(a),
+ mode_b, get_negated_pnc(get_Proj_proj(a), mode_b));
+ DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_CMP);
+ }
+
+ return n;
+}
+
+/**
+ * Transform a Cast_type(Const) into a new Const_type
+ */
+static ir_node *transform_node_Cast(ir_node *n) {
+ ir_node *oldn = n;
+ ir_node *pred = get_Cast_op(n);
+ type *tp = get_irn_type(n);
+
+ if (get_irn_op(pred) == op_Const && get_Const_type(pred) != tp) {
+ n = new_rd_Const_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_irn_mode(pred),
+ get_Const_tarval(pred), tp);
+ DBG_OPT_CSTEVAL(oldn, n);
+ } else if ((get_irn_op(pred) == op_SymConst) && (get_SymConst_value_type(pred) != tp)) {
+ n = new_rd_SymConst_type(NULL, current_ir_graph, get_irn_n(pred, -1), get_SymConst_symbol(pred),
+ get_SymConst_kind(pred), tp);
+ DBG_OPT_CSTEVAL(oldn, n);
+ }
+
+ return n;
+}
+
+/**
+ * Transform a Proj(Div) with a non-zero value.
+ * Removes the exceptions and routes the memory to the NoMem node.
+ */
+static ir_node *transform_node_Proj_Div(ir_node *proj)
+{
+ ir_node *n = get_Proj_pred(proj);
+ ir_node *b = get_Div_right(n);
+ long proj_nr;
+
+ if (value_not_zero(b)) {
+ /* div(x, y) && y != 0 */
+ proj_nr = get_Proj_proj(proj);
+
+ /* this node may float */
+ set_irn_pinned(n, op_pin_state_floats);
+
+ if (proj_nr == pn_Div_X_except) {
+ /* we found an exception handler, remove it */
+ return new_Bad();
+ } else if (proj_nr == pn_Div_M) {
+ /* the memory Proj can be removed */
+ ir_node *res = get_Div_mem(n);
+ set_Div_mem(n, get_irg_no_mem(current_ir_graph));
+
+ return res;
+ }
+ }
+ return proj;
+}
+
+/**
+ * Transform a Proj(Mod) with a non-zero value.
+ * Removes the exceptions and routes the memory to the NoMem node.
+ */
+static ir_node *transform_node_Proj_Mod(ir_node *proj)
+{
+ ir_node *n = get_Proj_pred(proj);
+ ir_node *b = get_Mod_right(n);
+ long proj_nr;
+
+ if (value_not_zero(b)) {
+ /* mod(x, y) && y != 0 */
+ proj_nr = get_Proj_proj(proj);
+
+ /* this node may float */
+ set_irn_pinned(n, op_pin_state_floats);
+
+ if (proj_nr == pn_Mod_X_except) {
+ /* we found an exception handler, remove it */
+ return new_Bad();
+ } else if (proj_nr == pn_Mod_M) {
+ /* the memory Proj can be removed */
+ ir_node *res = get_Mod_mem(n);
+ set_Mod_mem(n, get_irg_no_mem(current_ir_graph));
+
+ return res;
+ }
+ else if (proj_nr == pn_Mod_res && get_Mod_left(n) == b) {
+ /* a % a = 0 if a != 0 */
+ ir_mode *mode = get_irn_mode(proj);
+ ir_node *res = new_Const(mode, get_mode_null(mode));
+
+ DBG_OPT_CSTEVAL(n, res);
+ return res;
+ }
+ }
+ return proj;
+}
+
+/**
+ * Transform a Proj(DivMod) with a non-zero value.
+ * Removes the exceptions and routes the memory to the NoMem node.
+ */
+static ir_node *transform_node_Proj_DivMod(ir_node *proj)
+{
+ ir_node *n = get_Proj_pred(proj);
+ ir_node *b = get_DivMod_right(n);
+ long proj_nr;
+
+ if (value_not_zero(b)) {
+ /* DivMod(x, y) && y != 0 */
+ proj_nr = get_Proj_proj(proj);
+
+ /* this node may float */
+ set_irn_pinned(n, op_pin_state_floats);
+
+ if (proj_nr == pn_DivMod_X_except) {
+ /* we found an exception handler, remove it */
+ return new_Bad();
+ }
+ else if (proj_nr == pn_DivMod_M) {
+ /* the memory Proj can be removed */
+ ir_node *res = get_DivMod_mem(n);
+ set_DivMod_mem(n, get_irg_no_mem(current_ir_graph));
+
+ return res;
+ }
+ else if (proj_nr == pn_DivMod_res_mod && get_DivMod_left(n) == b) {
+ /* a % a = 0 if a != 0 */
+ ir_mode *mode = get_irn_mode(proj);
+ ir_node *res = new_Const(mode, get_mode_null(mode));
+
+ DBG_OPT_CSTEVAL(n, res);
+ return res;
+ }
+ }
+ return proj;
+}
+
+/**
+ * Optimizes jump tables (CondIs or CondIu) by removing all impossible cases.
+ */
+static ir_node *transform_node_Proj_Cond(ir_node *proj)
+{
+ if (get_opt_unreachable_code()) {
+ ir_node *n = get_Proj_pred(proj);
+ ir_node *b = get_Cond_selector(n);
+ tarval *tb = value_of(b);
+
+ if (tb != tarval_bad && mode_is_int(get_tarval_mode(tb))) {
+ /* we have a constant switch */
+ long num = get_Proj_proj(proj);
+
+ if (num != get_Cond_defaultProj(n)) { /* we cannot optimize default Proj's yet */
+ if (get_tarval_long(tb) == num) {
+ /* Do NOT create a jump here, or we will have 2 control flow ops
+ * in a block. This case is optimized away in optimize_cf(). */
+ return proj;
+ }
+ else {
+ /* this case will NEVER be taken, kill it */
+ return new_Bad();
+ }
+ }
+ }
+ }
+ return proj;
+}
+
+/**
+ * Normalizes and optimizes Cmp nodes.
+ */
+static ir_node *transform_node_Proj_Cmp(ir_node *proj)
+{
+ if (get_opt_reassociation()) {
+ ir_node *n = get_Proj_pred(proj);
+ ir_node *left = get_Cmp_left(n);
+ ir_node *right = get_Cmp_right(n);
+ ir_node *c = NULL;
+ tarval *tv = NULL;
+ int changed = 0;
+ ir_mode *mode = NULL;
+ long proj_nr = get_Proj_proj(proj);
+
+ /*
+ * First step: normalize the compare op
+ * by placing the constant on the right site
+ * or moving the lower address node to the left.
+ * We ignore the case that both are constants
+ * this case should be optimized away.
+ */
+ if (get_irn_op(right) == op_Const)
+ c = right;
+ else if (get_irn_op(left) == op_Const) {
+ c = left;
+ left = right;
+ right = c;
+
+ proj_nr = get_inversed_pnc(proj_nr);
+ changed |= 1;
+ }
+ else if (left > right) {
+ ir_node *t = left;
+
+ left = right;
+ right = t;
+
+ proj_nr = get_inversed_pnc(proj_nr);
+ changed |= 1;
+ }
+
+ /*
+ * Second step: Try to reduce the magnitude
+ * of a constant. This may help to generate better code
+ * later and may help to normalize more compares.
+ * Of course this is only possible for integer values.
+ */
+ if (c) {
+ mode = get_irn_mode(c);
+ tv = get_Const_tarval(c);
+
+ if (tv != tarval_bad) {
+ /* the following optimization is possible on modes without Overflow
+ * on Unary Minus or on == and !=:
+ * -a CMP c ==> a swap(CMP) -c
+ *
+ * Beware: for two-complement Overflow may occur, so only == and != can
+ * be optimized, see this:
+ * -MININT < 0 =/=> MININT > 0 !!!
+ */
+ if (get_opt_constant_folding() && get_irn_op(left) == op_Minus &&
+ (!mode_overflow_on_unary_Minus(mode) ||
+ (mode_is_int(mode) && (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg)))) {
+ left = get_Minus_op(left);
+ tv = tarval_sub(get_mode_null(mode), tv);
+
+ proj_nr = get_inversed_pnc(proj_nr);
+ changed |= 2;
+ }
+
+ /* for integer modes, we have more */
+ if (mode_is_int(mode)) {
+ /* Ne includes Unordered which is not possible on integers.
+ * However, frontends often use this wrong, so fix it here */
+ if (proj_nr & pn_Cmp_Uo) {
+ proj_nr &= ~pn_Cmp_Uo;
+ set_Proj_proj(proj, proj_nr);
+ }
+
+ /* c > 0 : a < c ==> a <= (c-1) a >= c ==> a > (c-1) */
+ if ((proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Ge) &&
+ tarval_cmp(tv, get_mode_null(mode)) == pn_Cmp_Gt) {
+ tv = tarval_sub(tv, get_mode_one(mode));
+
+ proj_nr ^= pn_Cmp_Eq;
+ changed |= 2;
+ }
+ /* c < 0 : a > c ==> a >= (c+1) a <= c ==> a < (c+1) */
+ else if ((proj_nr == pn_Cmp_Gt || proj_nr == pn_Cmp_Le) &&
+ tarval_cmp(tv, get_mode_null(mode)) == pn_Cmp_Lt) {
+ tv = tarval_add(tv, get_mode_one(mode));
+
+ proj_nr ^= pn_Cmp_Eq;
+ changed |= 2;
+ }
+
+ /* the following reassociations work only for == and != */
+ if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
+
+ /* a-b == 0 ==> a == b, a-b != 0 ==> a != b */
+ if (classify_tarval(tv) == TV_CLASSIFY_NULL && get_irn_op(left) == op_Sub) {
+ right = get_Sub_right(left);
+ left = get_Sub_left(left);
+
+ tv = value_of(right);
+ changed = 1;
+ }
+
+ if (tv != tarval_bad) {
+ ir_op *op = get_irn_op(left);
+
+ /* a-c1 == c2 ==> a == c2+c1, a-c1 != c2 ==> a != c2+c1 */
+ if (op == op_Sub) {
+ ir_node *c1 = get_Sub_right(left);
+ tarval *tv2 = value_of(c1);
+
+ if (tv2 != tarval_bad) {
+ tv2 = tarval_add(tv, value_of(c1));
+
+ if (tv2 != tarval_bad) {
+ left = get_Sub_left(left);
+ tv = tv2;
+ changed |= 2;
+ }
+ }
+ }
+ /* a+c1 == c2 ==> a == c2-c1, a+c1 != c2 ==> a != c2-c1 */
+ else if (op == op_Add) {
+ ir_node *a_l = get_Add_left(left);
+ ir_node *a_r = get_Add_right(left);
+ ir_node *a;
+ tarval *tv2;
+
+ if (get_irn_op(a_l) == op_Const) {
+ a = a_r;
+ tv2 = value_of(a_l);
+ }
+ else {
+ a = a_l;
+ tv2 = value_of(a_r);
+ }
+
+ if (tv2 != tarval_bad) {
+ tv2 = tarval_sub(tv, tv2);
+
+ if (tv2 != tarval_bad) {
+ left = a;
+ tv = tv2;
+ changed |= 2;
+ }
+ }
+ }
+ /* -a == c ==> a == -c, -a != c ==> a != -c */
+ else if (op == op_Minus) {
+ tarval *tv2 = tarval_sub(get_mode_null(mode), tv);
+
+ if (tv2 != tarval_bad) {
+ left = get_Minus_op(left);
+ tv = tv2;
+ changed |= 2;
+ }
+ }
+ }
+ } /* == or != */
+ /* the following reassociations work only for <= */
+ else if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
+ if (tv != tarval_bad) {
+ ir_op *op = get_irn_op(left);
+
+ /* c >= 0 : Abs(a) <= c ==> (unsigned)(a + c) <= 2*c */
+ if (op == op_Abs) {
+ }
+ }
+ }
+ } /* mode_is_int */
+ }
+ }
+
+ if (changed) {
+ ir_node *block = get_irn_n(n, -1); /* Beware of get_nodes_Block() */
+
+ if (changed & 2) /* need a new Const */
+ right = new_Const(mode, tv);
+
+ /* create a new compare */
+ n = new_rd_Cmp(get_irn_dbg_info(n), current_ir_graph, block,
+ left, right);
+
+ set_Proj_pred(proj, n);
+ set_Proj_proj(proj, proj_nr);
+ }
+ }
+ return proj;
+}
+
+/**
+ * Does all optimizations on nodes that must be done on it's Proj's
+ * because of creating new nodes.
+ */
+static ir_node *transform_node_Proj(ir_node *proj)
+{
+ ir_node *n = get_Proj_pred(proj);
+
+ switch (get_irn_opcode(n)) {
+ case iro_Div:
+ return transform_node_Proj_Div(proj);
+
+ case iro_Mod:
+ return transform_node_Proj_Mod(proj);
+
+ case iro_DivMod:
+ return transform_node_Proj_DivMod(proj);
+
+ case iro_Cond:
+ return transform_node_Proj_Cond(proj);
+
+ case iro_Cmp:
+ return transform_node_Proj_Cmp(proj);
+
+ case iro_Tuple:
+ /* should not happen, but if it does will be optimized away */
+ return equivalent_node_Proj(proj);
+
+ default:
+ /* do nothing */
+ return proj;
+ }
+}
+
+/**
+ * returns the operands of a commutative bin-op, if one operand is
+ * a const, it is returned as the second one.
+ */
+static void get_comm_Binop_Ops(ir_node *binop, ir_node **a, ir_node **c)
+{
+ ir_node *op_a = get_binop_left(binop);
+ ir_node *op_b = get_binop_right(binop);
+
+ assert(is_op_commutative(get_irn_op(binop)));
+
+ if (get_irn_op(op_a) == op_Const) {
+ *a = op_b;
+ *c = op_a;
+ }
+ else {
+ *a = op_a;
+ *c = op_b;
+ }
+}
+
+/**
+ * Optimize a Or(And(Or(And(v,c4),c3),c2),c1) pattern if possible.
+ * Such pattern may arise in bitfield stores.
+ *
+ * value c4 value c4 & c2
+ * AND c3 AND c1 | c3
+ * OR c2 ===> OR
+ * AND c1
+ * OR
+ */
+static ir_node *transform_node_Or_bf_store(ir_node *or)
+{
+ ir_node *and, *c1;
+ ir_node *or_l, *c2;
+ ir_node *and_l, *c3;
+ ir_node *value, *c4;
+ ir_node *new_and, *new_const, *block;
+ ir_mode *mode = get_irn_mode(or);
+
+ tarval *tv1, *tv2, *tv3, *tv4, *tv, *n_tv4, *n_tv2;
+
+ get_comm_Binop_Ops(or, &and, &c1);
+ if ((get_irn_op(c1) != op_Const) || (get_irn_op(and) != op_And))
+ return or;
+
+ get_comm_Binop_Ops(and, &or_l, &c2);
+ if ((get_irn_op(c2) != op_Const) || (get_irn_op(or_l) != op_Or))
+ return or;
+
+ get_comm_Binop_Ops(or_l, &and_l, &c3);
+ if ((get_irn_op(c3) != op_Const) || (get_irn_op(and_l) != op_And))
+ return or;
+
+ get_comm_Binop_Ops(and_l, &value, &c4);
+ if (get_irn_op(c4) != op_Const)
+ return or;
+
+ /* ok, found the pattern, check for conditions */
+ assert(mode == get_irn_mode(and));
+ assert(mode == get_irn_mode(or_l));
+ assert(mode == get_irn_mode(and_l));
+
+ tv1 = get_Const_tarval(c1);
+ tv2 = get_Const_tarval(c2);
+ tv3 = get_Const_tarval(c3);
+ tv4 = get_Const_tarval(c4);
+
+ tv = tarval_or(tv4, tv2);
+ if (classify_tarval(tv) != TV_CLASSIFY_ALL_ONE) {
+ /* have at least one 0 at the same bit position */
+ return or;
+ }
+
+ n_tv4 = tarval_not(tv4);
+ if (tv3 != tarval_and(tv3, n_tv4)) {
+ /* bit in the or_mask is outside the and_mask */
+ return or;
+ }
+
+ n_tv2 = tarval_not(tv2);
+ if (tv1 != tarval_and(tv1, n_tv2)) {
+ /* bit in the or_mask is outside the and_mask */
+ return or;
+ }
+
+ /* ok, all conditions met */
+ block = get_irn_n(or, -1);
+
+ new_and = new_r_And(current_ir_graph, block,
+ value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode);
+
+ new_const = new_r_Const(current_ir_graph, block, mode, tarval_or(tv3, tv1));
+
+ set_Or_left(or, new_and);
+ set_Or_right(or, new_const);
+
+ /* check for more */
+ return transform_node_Or_bf_store(or);
+}
+
+/**
+ * Optimize an Or(shl(x, c), shr(x, bits - c)) into a Rot
+ */
+static ir_node *transform_node_Or_Rot(ir_node *or)
+{
+ ir_mode *mode = get_irn_mode(or);
+ ir_node *shl, *shr, *block;
+ ir_node *irn, *x, *c1, *c2, *v, *sub, *n;
+ tarval *tv1, *tv2;
+
+ if (! mode_is_int(mode))
+ return or;
+
+ shl = get_binop_left(or);
+ shr = get_binop_right(or);
+
+ if (get_irn_op(shl) == op_Shr) {
+ if (get_irn_op(shr) != op_Shl)
+ return or;
+
+ irn = shl;
+ shl = shr;
+ shr = irn;
+ }
+ else if (get_irn_op(shl) != op_Shl)
+ return or;
+ else if (get_irn_op(shr) != op_Shr)
+ return or;
+
+ x = get_Shl_left(shl);
+ if (x != get_Shr_left(shr))
+ return or;
+
+ c1 = get_Shl_right(shl);
+ c2 = get_Shr_right(shr);
+ if (get_irn_op(c1) == op_Const && get_irn_op(c2) == op_Const) {
+ tv1 = get_Const_tarval(c1);
+ if (! tarval_is_long(tv1))
+ return or;
+
+ tv2 = get_Const_tarval(c2);
+ if (! tarval_is_long(tv2))
+ return or;
+
+ if (get_tarval_long(tv1) + get_tarval_long(tv2)
+ != get_mode_size_bits(mode))
+ return or;
+
+ /* yet, condition met */
+ block = get_irn_n(or, -1);
+
+ n = new_r_Rot(current_ir_graph, block, x, c1, mode);
+
+ DBG_OPT_ALGSIM1(or, shl, shr, n, FS_OPT_OR_SHFT_TO_ROT);
+ return n;
+ }
+ else if (get_irn_op(c1) == op_Sub) {
+ v = c2;
+ sub = c1;
+
+ if (get_Sub_right(sub) != v)
+ return or;
+
+ c1 = get_Sub_left(sub);
+ if (get_irn_op(c1) != op_Const)
+ return or;
+
+ tv1 = get_Const_tarval(c1);
+ if (! tarval_is_long(tv1))
+ return or;