+static int reassoc_commutative(ir_node **node)
+{
+ ir_node *n = *node;
+ ir_op *op = get_irn_op(n);
+ ir_node *block = get_nodes_block(n);
+ ir_node *t1, *c1;
+
+ get_comm_Binop_ops(n, &t1, &c1);
+
+ if (get_irn_op(t1) == op) {
+ ir_node *t2, *c2;
+ const_class_t c_c1, c_c2, c_t2;
+
+ get_comm_Binop_ops(t1, &t2, &c2);
+
+ /* do not optimize Bad nodes, will fail later */
+ if (is_Bad(t2))
+ return 0;
+
+ c_c1 = get_const_class(c1, block);
+ c_c2 = get_const_class(c2, block);
+ c_t2 = get_const_class(t2, block);
+
+ if ( ((c_c1 > NO_CONSTANT) & (c_t2 > NO_CONSTANT)) &&
+ ((((c_c1 ^ c_c2 ^ c_t2) & REGION_CONST) == 0) || ((c_c1 & c_c2 & c_t2) == REGION_CONST)) ) {
+ /* All three are constant and either all are constant expressions
+ * or two of them are:
+ * then applying this rule would lead into a cycle
+ *
+ * Note that if t2 is a constant so is c2 hence we save one test.
+ */
+ return 0;
+ }
+
+ if ((c_c1 != NO_CONSTANT) /* & (c_c2 != NO_CONSTANT) */) {
+ /* handles rules R7, R8, R9, R10:
+ * convert c1 .OP. (c2 .OP. x) => x .OP. (c1 .OP. c2)
+ */
+ ir_node *irn, *in[2];
+ ir_mode *mode, *mode_c1 = get_irn_mode(c1), *mode_c2 = get_irn_mode(c2);
+
+ /* It might happen, that c1 and c2 have different modes, for
+ * instance Is and Iu.
+ * Handle this here.
+ */
+ if (mode_c1 != mode_c2) {
+ if (mode_is_int(mode_c1) && mode_is_int(mode_c2)) {
+ /* get the bigger one */
+ if (get_mode_size_bits(mode_c1) > get_mode_size_bits(mode_c2))
+ c2 = new_r_Conv(current_ir_graph, block, c2, mode_c1);
+ else if (get_mode_size_bits(mode_c1) < get_mode_size_bits(mode_c2))
+ c1 = new_r_Conv(current_ir_graph, block, c1, mode_c2);
+ else {
+ /* Try to cast the real const */
+ if (c_c1 == REAL_CONSTANT)
+ c1 = new_r_Conv(current_ir_graph, block, c1, mode_c2);
+ else
+ c2 = new_r_Conv(current_ir_graph, block, c2, mode_c1);
+ }
+ }
+ }
+
+ in[0] = c1;
+ in[1] = c2;
+
+ mode = get_mode_from_ops(in[0], in[1]);
+ in[1] = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
+ in[0] = t2;
+
+ mode = get_mode_from_ops(in[0], in[1]);
+ irn = optimize_node(new_ir_node(NULL, current_ir_graph, block, op, mode, 2, in));
+
+ DBG((dbg, LEVEL_5, "Applied: %n .%s. (%n .%s. %n) => %n .%s. (%n .%s. %n)\n",
+ c1, get_irn_opname(n), c2, get_irn_opname(n), t2,
+ t2, get_irn_opname(n), c1, get_irn_opname(n), c2));
+ /*
+ * In some rare cases it can really happen that we get the same
+ * node back. This might be happen in dead loops, were the Phi
+ * nodes are already gone away. So check this.
+ */
+ if (n != irn) {
+ exchange(n, irn);
+ *node = irn;
+ return 1;
+ }
+ }
+ }
+ return 0;
+} /* reassoc_commutative */
+
+#else
+
+static ir_op *commutative_op;
+static ir_node *commutative_block;
+static struct obstack commutative_args;
+
+static void collect_args(ir_node *node)
+{
+ ir_node *left = get_binop_left(node);
+ ir_node *right = get_binop_right(node);
+
+ if (get_irn_op(left) == commutative_op
+ && (!get_irn_outs_computed(left) || get_irn_n_outs(left) == 1)) {
+ collect_args(left);
+ } else {
+ obstack_ptr_grow(&commutative_args, left);
+ }
+
+ if (get_irn_op(right) == commutative_op
+ && (!get_irn_outs_computed(right) || get_irn_n_outs(right) == 1)) {
+ collect_args(right);
+ } else {
+ obstack_ptr_grow(&commutative_args, right);
+ }
+
+#ifndef NDEBUG
+ {
+ ir_mode *mode = get_irn_mode(node);
+ if (is_Add(node) && mode_is_reference(mode)) {
+ assert(get_irn_mode(left) == mode || get_irn_mode(right) == mode);
+ } else {
+ assert(get_irn_mode(left) == mode);
+ assert(get_irn_mode(right) == mode);
+ }
+ }
+#endif
+}
+
+static int compare_nodes(const ir_node *node1, const ir_node *node2)