+ * Transform a Proj(DivMod) with a non-zero value.
+ * Removes the exceptions and routes the memory to the NoMem node.
+ */
+static ir_node *transform_node_Proj_DivMod(ir_node *proj) {
+ ir_node *divmod = get_Proj_pred(proj);
+ ir_node *b = get_DivMod_right(divmod);
+ ir_node *confirm, *res, *new_mem;
+ long proj_nr;
+
+ if (value_not_zero(b, &confirm)) {
+ /* DivMod(x, y) && y != 0 */
+ proj_nr = get_Proj_proj(proj);
+
+ switch (proj_nr) {
+
+ case pn_DivMod_X_regular:
+ return new_r_Jmp(current_ir_graph, get_irn_n(divmod, -1));
+
+ case pn_DivMod_X_except:
+ /* we found an exception handler, remove it */
+ DBG_OPT_EXC_REM(proj);
+ return new_Bad();
+
+ case pn_DivMod_M:
+ res = get_DivMod_mem(divmod);
+ new_mem = get_irg_no_mem(current_ir_graph);
+
+ if (confirm) {
+ /* This node can only float up to the Confirm block */
+ new_mem = new_r_Pin(current_ir_graph, get_nodes_block(confirm), new_mem);
+ }
+ set_irn_pinned(divmod, op_pin_state_floats);
+ /* this is a DivMod without exception, we can remove the memory edge */
+ set_DivMod_mem(divmod, get_irg_no_mem(current_ir_graph));
+ return res;
+
+ case pn_DivMod_res_mod:
+ if (get_DivMod_left(divmod) == b) {
+ /* a % a = 0 if a != 0 */
+ ir_mode *mode = get_irn_mode(proj);
+ ir_node *res = new_Const(mode, get_mode_null(mode));
+
+ DBG_OPT_CSTEVAL(divmod, res);
+ return res;
+ }
+ }
+ }
+ return proj;
+} /* transform_node_Proj_DivMod */
+
+/**
+ * Optimizes jump tables (CondIs or CondIu) by removing all impossible cases.
+ */
+static ir_node *transform_node_Proj_Cond(ir_node *proj) {
+ if (get_opt_unreachable_code()) {
+ ir_node *n = get_Proj_pred(proj);
+ ir_node *b = get_Cond_selector(n);
+
+ if (mode_is_int(get_irn_mode(b))) {
+ tarval *tb = value_of(b);
+
+ if (tb != tarval_bad) {
+ /* we have a constant switch */
+ long num = get_Proj_proj(proj);
+
+ if (num != get_Cond_defaultProj(n)) { /* we cannot optimize default Proj's yet */
+ if (get_tarval_long(tb) == num) {
+ /* Do NOT create a jump here, or we will have 2 control flow ops
+ * in a block. This case is optimized away in optimize_cf(). */
+ return proj;
+ } else {
+ /* this case will NEVER be taken, kill it */
+ return new_Bad();
+ }
+ }
+ }
+ }
+ }
+ return proj;
+} /* transform_node_Proj_Cond */
+
+/**
+ * Normalizes and optimizes Cmp nodes.
+ */
+static ir_node *transform_node_Proj_Cmp(ir_node *proj) {
+ ir_node *n = get_Proj_pred(proj);
+ ir_node *left = get_Cmp_left(n);
+ ir_node *right = get_Cmp_right(n);
+ ir_node *c = NULL;
+ tarval *tv = NULL;
+ int changed = 0;
+ ir_mode *mode = NULL;
+ long proj_nr = get_Proj_proj(proj);
+
+ /* we can evaluate this direct */
+ switch(proj_nr) {
+ case pn_Cmp_False:
+ return new_Const(mode_b, get_tarval_b_false());
+ case pn_Cmp_True:
+ return new_Const(mode_b, get_tarval_b_true());
+ case pn_Cmp_Leg:
+ if(!mode_is_float(get_irn_mode(left)))
+ return new_Const(mode_b, get_tarval_b_true());
+ break;
+ default:
+ break;
+ }
+
+ /* Remove unnecessary conversions */
+ /* TODO handle constants */
+ if (is_Conv(left) && is_Conv(right)) {
+ ir_mode *mode = get_irn_mode(left);
+ ir_node *op_left = get_Conv_op(left);
+ ir_node *op_right = get_Conv_op(right);
+ ir_mode *mode_left = get_irn_mode(op_left);
+ ir_mode *mode_right = get_irn_mode(op_right);
+
+ if (smaller_mode(mode_left, mode) && smaller_mode(mode_right, mode)) {
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(n);
+
+ if (mode_left == mode_right) {
+ left = op_left;
+ right = op_right;
+ changed |= 1;
+ } else if (smaller_mode(mode_left, mode_right)) {
+ left = new_r_Conv(irg, block, op_left, mode_right);
+ right = op_right;
+ changed |= 1;
+ } else if (smaller_mode(mode_right, mode_left)) {
+ left = op_left;
+ right = new_r_Conv(irg, block, op_right, mode_left);
+ changed |= 1;
+ }
+ }
+ }
+
+ /* TODO extend to arbitrary constants */
+ if (is_Conv(left) && is_Const(right) && is_Const_null(right)) {
+ ir_mode* mode = get_irn_mode(left);
+ ir_node* op = get_Conv_op(left);
+ ir_mode* op_mode = get_irn_mode(op);
+
+ if (get_mode_size_bits(mode) > get_mode_size_bits(op_mode) &&
+ (mode_is_signed(mode) || !mode_is_signed(op_mode))) {
+ ir_node *null = new_Const(op_mode, get_mode_null(op_mode));
+ set_Cmp_left( n, op);
+ set_Cmp_right(n, null);
+ return proj;
+ }
+ }
+
+ /* remove Casts */
+ if (is_Cast(left))
+ left = get_Cast_op(left);
+ if (is_Cast(right))
+ right = get_Cast_op(right);
+
+ /* remove operation of both sides if possible */
+ if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
+ ir_opcode lop = get_irn_opcode(left);
+
+ if (lop == get_irn_opcode(right)) {
+ ir_node *ll, *lr, *rl, *rr;
+
+ /* same operation on both sides, try to remove */
+ switch (lop) {
+ case iro_Not:
+ case iro_Minus:
+ /* ~a CMP ~b => a CMP b, -a CMP -b ==> a CMP b */
+ left = get_unop_op(left);
+ right = get_unop_op(right);
+ changed |= 1;
+ break;
+ case iro_Add:
+ ll = get_Add_left(left);
+ lr = get_Add_right(left);
+ rl = get_Add_left(right);
+ rr = get_Add_right(right);
+
+ if (ll == rl) {
+ /* X + a CMP X + b ==> a CMP b */
+ left = lr;
+ right = rr;
+ changed |= 1;
+ } else if (ll == rr) {
+ /* X + a CMP b + X ==> a CMP b */
+ left = lr;
+ right = rl;
+ changed |= 1;
+ } else if (lr == rl) {
+ /* a + X CMP X + b ==> a CMP b */
+ left = ll;
+ right = rr;
+ changed |= 1;
+ } else if (lr == rr) {
+ /* a + X CMP b + X ==> a CMP b */
+ left = ll;
+ right = rl;
+ changed |= 1;
+ }
+ break;
+ case iro_Sub:
+ ll = get_Sub_left(left);
+ lr = get_Sub_right(left);
+ rl = get_Sub_left(right);
+ rr = get_Sub_right(right);
+
+ if (ll == rl) {
+ /* X - a CMP X - b ==> a CMP b */
+ left = lr;
+ right = rr;
+ changed |= 1;
+ } else if (lr == rr) {
+ /* a - X CMP b - X ==> a CMP b */
+ left = ll;
+ right = rl;
+ changed |= 1;
+ }
+ break;
+ case iro_Rot:
+ if (get_Rot_right(left) == get_Rot_right(right)) {
+ /* a ROT X CMP b ROT X */
+ left = get_Rot_left(left);
+ right = get_Rot_left(right);
+ changed |= 1;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (get_irn_mode(left) == mode_b) {
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(n);
+
+ switch (proj_nr) {
+ case pn_Cmp_Le: return new_r_Or( irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b);
+ case pn_Cmp_Lt: return new_r_And(irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b);
+ case pn_Cmp_Ge: return new_r_Or( irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b);
+ case pn_Cmp_Gt: return new_r_And(irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b);
+ case pn_Cmp_Lg: return new_r_Eor(irg, block, left, right, mode_b);
+ case pn_Cmp_Eq: return new_r_Not(irg, block, new_r_Eor(irg, block, left, right, mode_b), mode_b);
+ }
+ }
+
+ if (!get_opt_reassociation())
+ return proj;
+
+ /*
+ * First step: normalize the compare op
+ * by placing the constant on the right side
+ * or moving the lower address node to the left.
+ * We ignore the case that both are constants
+ * this case should be optimized away.
+ */
+ if (is_Const(right)) {
+ c = right;
+ } else if (is_Const(left)) {
+ c = left;
+ left = right;
+ right = c;
+
+ proj_nr = get_inversed_pnc(proj_nr);
+ changed |= 1;
+ } else if (get_irn_idx(left) > get_irn_idx(right)) {
+ ir_node *t = left;
+
+ left = right;
+ right = t;
+
+ proj_nr = get_inversed_pnc(proj_nr);
+ changed |= 1;
+ }
+
+ /*
+ * Second step: Try to reduce the magnitude
+ * of a constant. This may help to generate better code
+ * later and may help to normalize more compares.
+ * Of course this is only possible for integer values.
+ */
+ if (c) {
+ mode = get_irn_mode(c);
+ tv = get_Const_tarval(c);
+
+ if (tv != tarval_bad) {
+ /* the following optimization is possible on modes without Overflow
+ * on Unary Minus or on == and !=:
+ * -a CMP c ==> a swap(CMP) -c
+ *
+ * Beware: for two-complement Overflow may occur, so only == and != can
+ * be optimized, see this:
+ * -MININT < 0 =/=> MININT > 0 !!!
+ */
+ if (is_Minus(left) &&
+ (!mode_overflow_on_unary_Minus(mode) ||
+ (mode_is_int(mode) && (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg)))) {
+ tv = tarval_neg(tv);
+
+ if (tv != tarval_bad) {
+ left = get_Minus_op(left);
+ proj_nr = get_inversed_pnc(proj_nr);
+ changed |= 2;
+ }
+ } else if (is_Not(left) && (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg)) {
+ tv = tarval_not(tv);
+
+ if (tv != tarval_bad) {
+ left = get_Not_op(left);
+ changed |= 2;
+ }
+ }
+
+ /* for integer modes, we have more */
+ if (mode_is_int(mode)) {
+ /* Ne includes Unordered which is not possible on integers.
+ * However, frontends often use this wrong, so fix it here */
+ if (proj_nr & pn_Cmp_Uo) {
+ proj_nr &= ~pn_Cmp_Uo;
+ set_Proj_proj(proj, proj_nr);
+ }
+
+ /* c > 0 : a < c ==> a <= (c-1) a >= c ==> a > (c-1) */
+ if ((proj_nr == pn_Cmp_Lt || proj_nr == pn_Cmp_Ge) &&
+ tarval_cmp(tv, get_mode_null(mode)) == pn_Cmp_Gt) {
+ tv = tarval_sub(tv, get_mode_one(mode));
+
+ if (tv != tarval_bad) {
+ proj_nr ^= pn_Cmp_Eq;
+ changed |= 2;
+ }
+ }
+ /* c < 0 : a > c ==> a >= (c+1) a <= c ==> a < (c+1) */
+ else if ((proj_nr == pn_Cmp_Gt || proj_nr == pn_Cmp_Le) &&
+ tarval_cmp(tv, get_mode_null(mode)) == pn_Cmp_Lt) {
+ tv = tarval_add(tv, get_mode_one(mode));
+
+ if (tv != tarval_bad) {
+ proj_nr ^= pn_Cmp_Eq;
+ changed |= 2;
+ }
+ }
+
+ /* the following reassociations work only for == and != */
+ if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
+
+ /* a-b == 0 ==> a == b, a-b != 0 ==> a != b */
+ if (tarval_is_null(tv) && is_Sub(left)) {
+ right =get_Sub_right(left);
+ left = get_Sub_left(left);
+
+ tv = value_of(right);
+ changed = 1;
+ }
+
+ if (tv != tarval_bad) {
+ /* a-c1 == c2 ==> a == c2+c1, a-c1 != c2 ==> a != c2+c1 */
+ if (is_Sub(left)) {
+ ir_node *c1 = get_Sub_right(left);
+ tarval *tv2 = value_of(c1);
+
+ if (tv2 != tarval_bad) {
+ tv2 = tarval_add(tv, value_of(c1));
+
+ if (tv2 != tarval_bad) {
+ left = get_Sub_left(left);
+ tv = tv2;
+ changed |= 2;
+ }
+ }
+ }
+ /* a+c1 == c2 ==> a == c2-c1, a+c1 != c2 ==> a != c2-c1 */
+ else if (is_Add(left)) {
+ ir_node *a_l = get_Add_left(left);
+ ir_node *a_r = get_Add_right(left);
+ ir_node *a;
+ tarval *tv2;
+
+ if (is_Const(a_l)) {
+ a = a_r;
+ tv2 = value_of(a_l);
+ } else {
+ a = a_l;
+ tv2 = value_of(a_r);
+ }
+
+ if (tv2 != tarval_bad) {
+ tv2 = tarval_sub(tv, tv2);
+
+ if (tv2 != tarval_bad) {
+ left = a;
+ tv = tv2;
+ changed |= 2;
+ }
+ }
+ }
+ /* -a == c ==> a == -c, -a != c ==> a != -c */
+ else if (is_Minus(left)) {
+ tarval *tv2 = tarval_sub(get_mode_null(mode), tv);
+
+ if (tv2 != tarval_bad) {
+ left = get_Minus_op(left);
+ tv = tv2;
+ changed |= 2;
+ }
+ }
+ }
+ } /* == or != */
+ /* the following reassociations work only for <= */
+ else if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
+ if (tv != tarval_bad) {
+ /* c >= 0 : Abs(a) <= c ==> (unsigned)(a + c) <= 2*c */
+ if (get_irn_op(left) == op_Abs) { // TODO something is missing here
+ }
+ }
+ }
+ } /* mode_is_int */
+
+ /*
+ * optimization for AND:
+ * Optimize:
+ * And(x, C) == C ==> And(x, C) != 0
+ * And(x, C) != C ==> And(X, C) == 0
+ *
+ * if C is a single Bit constant.
+ */
+ if ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) && is_And(left)) {
+ if (tarval_is_single_bit(tv)) {
+ /* check for Constant's match. We have check hare the tarvals,
+ because our const might be changed */
+ ir_node *la = get_And_left(left);
+ ir_node *ra = get_And_right(left);
+ if ((is_Const(la) && get_Const_tarval(la) == tv) ||
+ (is_Const(ra) && get_Const_tarval(ra) == tv)) {
+ /* fine: do the transformation */
+ tv = get_mode_null(get_tarval_mode(tv));
+ proj_nr ^= pn_Cmp_Leg;
+ changed |= 2;
+ }
+ }
+ }
+ } /* tarval != bad */
+ }
+
+ if (changed) {
+ ir_node *block = get_irn_n(n, -1); /* Beware of get_nodes_Block() */
+
+ if (changed & 2) /* need a new Const */
+ right = new_Const(mode, tv);
+
+ /* create a new compare */
+ n = new_rd_Cmp(get_irn_dbg_info(n), current_ir_graph, block, left, right);
+
+ set_Proj_pred(proj, n);
+ set_Proj_proj(proj, proj_nr);
+ }
+
+ return proj;
+} /* transform_node_Proj_Cmp */
+
+/**
+ * Does all optimizations on nodes that must be done on it's Proj's
+ * because of creating new nodes.
+ */
+static ir_node *transform_node_Proj(ir_node *proj) {
+ ir_node *n = get_Proj_pred(proj);
+
+ switch (get_irn_opcode(n)) {
+ case iro_Div:
+ return transform_node_Proj_Div(proj);
+
+ case iro_Mod:
+ return transform_node_Proj_Mod(proj);
+
+ case iro_DivMod:
+ return transform_node_Proj_DivMod(proj);
+
+ case iro_Cond:
+ return transform_node_Proj_Cond(proj);
+
+ case iro_Cmp:
+ return transform_node_Proj_Cmp(proj);
+
+ case iro_Tuple:
+ /* should not happen, but if it does will be optimized away */
+ return equivalent_node_Proj(proj);
+
+ default:
+ /* do nothing */
+ return proj;
+ }
+} /* transform_node_Proj */
+
+/**
+ * Move Confirms down through Phi nodes.
+ */
+static ir_node *transform_node_Phi(ir_node *phi) {
+ int i, n;
+ ir_mode *mode = get_irn_mode(phi);
+
+ if (mode_is_reference(mode)) {
+ n = get_irn_arity(phi);
+
+ /* Beware of Phi0 */
+ if (n > 0) {
+ ir_node *pred = get_irn_n(phi, 0);
+ ir_node *bound, *new_Phi, *block, **in;
+ pn_Cmp pnc;
+
+ if (! is_Confirm(pred))
+ return phi;
+
+ bound = get_Confirm_bound(pred);
+ pnc = get_Confirm_cmp(pred);
+
+ NEW_ARR_A(ir_node *, in, n);
+ in[0] = get_Confirm_value(pred);
+
+ for (i = 1; i < n; ++i) {
+ pred = get_irn_n(phi, i);
+
+ if (! is_Confirm(pred) ||
+ get_Confirm_bound(pred) != bound ||
+ get_Confirm_cmp(pred) != pnc)
+ return phi;
+ in[i] = get_Confirm_value(pred);
+ }
+ /* move the Confirm nodes "behind" the Phi */
+ block = get_irn_n(phi, -1);
+ new_Phi = new_r_Phi(current_ir_graph, block, n, in, get_irn_mode(phi));
+ return new_r_Confirm(current_ir_graph, block, new_Phi, bound, pnc);
+ }
+ }
+ return phi;
+} /* transform_node_Phi */
+
+/**
+ * Returns the operands of a commutative bin-op, if one operand is