+ /* remove Casts */
+ if (is_Cast(left))
+ left = get_Cast_op(left);
+ if (is_Cast(right))
+ right = get_Cast_op(right);
+
+ /* remove operation of both sides if possible */
+ if (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) {
+ ir_opcode lop = get_irn_opcode(left);
+
+ if (lop == get_irn_opcode(right)) {
+ ir_node *ll, *lr, *rl, *rr;
+
+ /* same operation on both sides, try to remove */
+ switch (lop) {
+ case iro_Not:
+ case iro_Minus:
+ /* ~a CMP ~b => a CMP b, -a CMP -b ==> a CMP b */
+ left = get_unop_op(left);
+ right = get_unop_op(right);
+ changed |= 1;
+ break;
+ case iro_Add:
+ ll = get_Add_left(left);
+ lr = get_Add_right(left);
+ rl = get_Add_left(right);
+ rr = get_Add_right(right);
+
+ if (ll == rl) {
+ /* X + a CMP X + b ==> a CMP b */
+ left = lr;
+ right = rr;
+ changed |= 1;
+ } else if (ll == rr) {
+ /* X + a CMP b + X ==> a CMP b */
+ left = lr;
+ right = rl;
+ changed |= 1;
+ } else if (lr == rl) {
+ /* a + X CMP X + b ==> a CMP b */
+ left = ll;
+ right = rr;
+ changed |= 1;
+ } else if (lr == rr) {
+ /* a + X CMP b + X ==> a CMP b */
+ left = ll;
+ right = rl;
+ changed |= 1;
+ }
+ break;
+ case iro_Sub:
+ ll = get_Sub_left(left);
+ lr = get_Sub_right(left);
+ rl = get_Sub_left(right);
+ rr = get_Sub_right(right);
+
+ if (ll == rl) {
+ /* X - a CMP X - b ==> a CMP b */
+ left = lr;
+ right = rr;
+ changed |= 1;
+ } else if (lr == rr) {
+ /* a - X CMP b - X ==> a CMP b */
+ left = ll;
+ right = rl;
+ changed |= 1;
+ }
+ break;
+ case iro_Rot:
+ if (get_Rot_right(left) == get_Rot_right(right)) {
+ /* a ROT X CMP b ROT X */
+ left = get_Rot_left(left);
+ right = get_Rot_left(right);
+ changed |= 1;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (get_irn_mode(left) == mode_b) {
+ ir_graph *irg = current_ir_graph;
+ ir_node *block = get_nodes_block(n);
+
+ switch (proj_nr) {
+ case pn_Cmp_Le: return new_r_Or( irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b);
+ case pn_Cmp_Lt: return new_r_And(irg, block, new_r_Not(irg, block, left, mode_b), right, mode_b);
+ case pn_Cmp_Ge: return new_r_Or( irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b);
+ case pn_Cmp_Gt: return new_r_And(irg, block, left, new_r_Not(irg, block, right, mode_b), mode_b);
+ case pn_Cmp_Lg: return new_r_Eor(irg, block, left, right, mode_b);
+ case pn_Cmp_Eq: return new_r_Not(irg, block, new_r_Eor(irg, block, left, right, mode_b), mode_b);
+ }
+ }
+
+ if (!get_opt_reassociation())
+ return proj;
+
+ /*
+ * First step: normalize the compare op
+ * by placing the constant on the right side
+ * or moving the lower address node to the left.
+ * We ignore the case that both are constants
+ * this case should be optimized away.
+ */
+ if (is_Const(right)) {
+ c = right;
+ } else if (is_Const(left)) {
+ c = left;
+ left = right;
+ right = c;
+
+ proj_nr = get_inversed_pnc(proj_nr);
+ changed |= 1;
+ } else if (get_irn_idx(left) > get_irn_idx(right)) {
+ ir_node *t = left;
+
+ left = right;
+ right = t;
+
+ proj_nr = get_inversed_pnc(proj_nr);
+ changed |= 1;
+ }
+
+ /*
+ * Second step: Try to reduce the magnitude
+ * of a constant. This may help to generate better code
+ * later and may help to normalize more compares.
+ * Of course this is only possible for integer values.
+ */
+ if (c) {
+ mode = get_irn_mode(c);
+ tv = get_Const_tarval(c);
+
+ if (tv != tarval_bad) {
+ /* the following optimization is possible on modes without Overflow
+ * on Unary Minus or on == and !=:
+ * -a CMP c ==> a swap(CMP) -c
+ *
+ * Beware: for two-complement Overflow may occur, so only == and != can
+ * be optimized, see this:
+ * -MININT < 0 =/=> MININT > 0 !!!
+ */
+ if (is_Minus(left) &&
+ (!mode_overflow_on_unary_Minus(mode) ||
+ (mode_is_int(mode) && (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg)))) {
+ tv = tarval_neg(tv);
+
+ if (tv != tarval_bad) {