+}
+
+/**
+ * returns the operands of a commutative bin-op, if one operand is
+ * a const, it is returned as the second one.
+ */
+static void get_comm_Binop_Ops(ir_node *binop, ir_node **a, ir_node **c)
+{
+ ir_node *op_a = get_binop_left(binop);
+ ir_node *op_b = get_binop_right(binop);
+
+ assert(is_op_commutative(get_irn_op(binop)));
+
+ if (get_irn_op(op_a) == op_Const) {
+ *a = op_b;
+ *c = op_a;
+ }
+ else {
+ *a = op_a;
+ *c = op_b;
+ }
+}
+
+/**
+ * Optimize a Or(And(Or(And(v,c4),c3),c2),c1) pattern if possible.
+ * Such pattern may arise in bitfield stores.
+ *
+ * value c4 value c4 & c2
+ * AND c3 AND c1 | c3
+ * OR c2 ===> OR
+ * AND c1
+ * OR
+ */
+static ir_node *transform_node_Or_bf_store(ir_node *or)
+{
+ ir_node *and, *c1;
+ ir_node *or_l, *c2;
+ ir_node *and_l, *c3;
+ ir_node *value, *c4;
+ ir_node *new_and, *new_const, *block;
+ ir_mode *mode = get_irn_mode(or);
+
+ tarval *tv1, *tv2, *tv3, *tv4, *tv, *n_tv4, *n_tv2;
+
+ get_comm_Binop_Ops(or, &and, &c1);
+ if ((get_irn_op(c1) != op_Const) || (get_irn_op(and) != op_And))
+ return or;
+
+ get_comm_Binop_Ops(and, &or_l, &c2);
+ if ((get_irn_op(c2) != op_Const) || (get_irn_op(or_l) != op_Or))
+ return or;
+
+ get_comm_Binop_Ops(or_l, &and_l, &c3);
+ if ((get_irn_op(c3) != op_Const) || (get_irn_op(and_l) != op_And))
+ return or;
+
+ get_comm_Binop_Ops(and_l, &value, &c4);
+ if (get_irn_op(c4) != op_Const)
+ return or;
+
+ /* ok, found the pattern, check for conditions */
+ assert(mode == get_irn_mode(and));
+ assert(mode == get_irn_mode(or_l));
+ assert(mode == get_irn_mode(and_l));
+
+ tv1 = get_Const_tarval(c1);
+ tv2 = get_Const_tarval(c2);
+ tv3 = get_Const_tarval(c3);
+ tv4 = get_Const_tarval(c4);
+
+ tv = tarval_or(tv4, tv2);
+ if (classify_tarval(tv) != TV_CLASSIFY_ALL_ONE) {
+ /* have at least one 0 at the same bit position */
+ return or;
+ }
+
+ n_tv4 = tarval_not(tv4);
+ if (tv3 != tarval_and(tv3, n_tv4)) {
+ /* bit in the or_mask is outside the and_mask */
+ return or;
+ }
+
+ n_tv2 = tarval_not(tv2);
+ if (tv1 != tarval_and(tv1, n_tv2)) {
+ /* bit in the or_mask is outside the and_mask */
+ return or;
+ }
+
+ /* ok, all conditions met */
+ block = get_irn_n(or, -1);
+
+ new_and = new_r_And(current_ir_graph, block,
+ value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode);
+
+ new_const = new_r_Const(current_ir_graph, block, mode, tarval_or(tv3, tv1));
+
+ set_Or_left(or, new_and);
+ set_Or_right(or, new_const);
+
+ /* check for more */
+ return transform_node_Or_bf_store(or);
+}
+
+/**
+ * Optimize an Or(shl(x, c), shr(x, bits - c)) into a Rot
+ */
+static ir_node *transform_node_Or_Rot(ir_node *or)
+{
+ ir_mode *mode = get_irn_mode(or);
+ ir_node *shl, *shr, *block;
+ ir_node *irn, *x, *c1, *c2, *v, *sub, *n;
+ tarval *tv1, *tv2;
+
+ if (! mode_is_int(mode))
+ return or;
+
+ shl = get_binop_left(or);
+ shr = get_binop_right(or);
+
+ if (get_irn_op(shl) == op_Shr) {
+ if (get_irn_op(shr) != op_Shl)
+ return or;
+
+ irn = shl;
+ shl = shr;
+ shr = irn;
+ }
+ else if (get_irn_op(shl) != op_Shl)
+ return or;
+ else if (get_irn_op(shr) != op_Shr)
+ return or;
+
+ x = get_Shl_left(shl);
+ if (x != get_Shr_left(shr))
+ return or;
+
+ c1 = get_Shl_right(shl);
+ c2 = get_Shr_right(shr);
+ if (get_irn_op(c1) == op_Const && get_irn_op(c2) == op_Const) {
+ tv1 = get_Const_tarval(c1);
+ if (! tarval_is_long(tv1))
+ return or;
+
+ tv2 = get_Const_tarval(c2);
+ if (! tarval_is_long(tv2))
+ return or;
+
+ if (get_tarval_long(tv1) + get_tarval_long(tv2)
+ != get_mode_size_bits(mode))
+ return or;
+
+ /* yet, condition met */
+ block = get_irn_n(or, -1);
+
+ n = new_r_Rot(current_ir_graph, block, x, c1, mode);
+
+ DBG_OPT_ALGSIM1(or, shl, shr, n, FS_OPT_OR_SHFT_TO_ROT);
+ return n;
+ }
+ else if (get_irn_op(c1) == op_Sub) {
+ v = c2;
+ sub = c1;
+
+ if (get_Sub_right(sub) != v)
+ return or;
+
+ c1 = get_Sub_left(sub);
+ if (get_irn_op(c1) != op_Const)
+ return or;
+
+ tv1 = get_Const_tarval(c1);
+ if (! tarval_is_long(tv1))
+ return or;
+
+ if (get_tarval_long(tv1) != get_mode_size_bits(mode))
+ return or;
+
+ /* yet, condition met */
+ block = get_nodes_block(or);
+
+ /* a Rot right is not supported, so use a rot left */
+ n = new_r_Rot(current_ir_graph, block, x, sub, mode);
+
+ DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROT);
+ return n;
+ }
+ else if (get_irn_op(c2) == op_Sub) {
+ v = c1;
+ sub = c2;
+
+ c1 = get_Sub_left(sub);
+ if (get_irn_op(c1) != op_Const)
+ return or;
+
+ tv1 = get_Const_tarval(c1);
+ if (! tarval_is_long(tv1))
+ return or;
+
+ if (get_tarval_long(tv1) != get_mode_size_bits(mode))
+ return or;
+
+ /* yet, condition met */
+ block = get_irn_n(or, -1);
+
+ /* a Rot Left */
+ n = new_r_Rot(current_ir_graph, block, x, v, mode);
+
+ DBG_OPT_ALGSIM0(or, n, FS_OPT_OR_SHFT_TO_ROT);
+ return n;
+ }
+
+ return or;
+}
+
+/**
+ * Optimize an Or
+ */
+static ir_node *transform_node_Or(ir_node *or)
+{
+ or = transform_node_Or_bf_store(or);
+ or = transform_node_Or_Rot(or);
+
+ return or;
+}
+
+/* forward */
+static ir_node *transform_node(ir_node *n);
+
+/**
+ * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl.
+ *
+ * Should be moved to reassociation?
+ */
+static ir_node *transform_node_shift(ir_node *n)
+{
+ ir_node *left, *right;
+ tarval *tv1, *tv2, *res;
+ ir_mode *mode;
+ int modulo_shf, flag;
+
+ left = get_binop_left(n);
+
+ /* different operations */
+ if (get_irn_op(left) != get_irn_op(n))
+ return n;
+
+ right = get_binop_right(n);
+ tv1 = value_of(right);
+ if (tv1 == tarval_bad)
+ return n;
+
+ tv2 = value_of(get_binop_right(left));
+ if (tv2 == tarval_bad)
+ return n;
+
+ res = tarval_add(tv1, tv2);
+
+ /* beware: a simple replacement works only, if res < modulo shift */
+ mode = get_irn_mode(n);
+
+ flag = 0;
+
+ modulo_shf = get_mode_modulo_shift(mode);
+ if (modulo_shf > 0) {
+ tarval *modulo = new_tarval_from_long(modulo_shf, get_tarval_mode(res));
+
+ if (tarval_cmp(res, modulo) & pn_Cmp_Lt)
+ flag = 1;
+ }
+ else
+ flag = 1;
+
+ if (flag) {
+ /* ok, we can replace it */
+ ir_node *in[2], *irn, *block = get_irn_n(n, -1);
+
+ in[0] = get_binop_left(left);
+ in[1] = new_r_Const(current_ir_graph, block, get_tarval_mode(res), res);
+
+ irn = new_ir_node(NULL, current_ir_graph, block, get_irn_op(n), mode, 2, in);
+
+ DBG_OPT_ALGSIM0(n, irn, FS_OPT_REASSOC_SHIFT);
+
+ return transform_node(irn);
+ }
+ return n;
+}
+
+#define transform_node_Shr transform_node_shift
+#define transform_node_Shrs transform_node_shift
+#define transform_node_Shl transform_node_shift
+
+/**
+ * Remove dead blocks and nodes in dead blocks
+ * in keep alive list. We do not generate a new End node.
+ */
+static ir_node *transform_node_End(ir_node *n) {
+ int i, n_keepalives = get_End_n_keepalives(n);
+
+ for (i = 0; i < n_keepalives; ++i) {
+ ir_node *ka = get_End_keepalive(n, i);
+ if (is_Block(ka)) {
+ if (is_Block_dead(ka)) {
+ set_End_keepalive(n, i, new_Bad());
+ }
+ }
+ else if (is_irn_pinned_in_irg(ka) && is_Block_dead(get_nodes_block(ka)))
+ set_End_keepalive(n, i, new_Bad());
+ }
+ return n;
+}
+
+/**
+ * Optimize a Mux into some simpler cases.
+ */
+static ir_node *transform_node_Mux(ir_node *n)
+{
+ ir_node *oldn = n, *sel = get_Mux_sel(n);
+ ir_mode *mode = get_irn_mode(n);
+
+ if (get_irn_op(sel) == op_Proj && !mode_honor_signed_zeros(mode)) {
+ ir_node *cmp = get_Proj_pred(sel);
+ long proj_nr = get_Proj_proj(sel);
+ ir_node *f = get_Mux_false(n);
+ ir_node *t = get_Mux_true(n);
+
+ if (get_irn_op(cmp) == op_Cmp && classify_Const(get_Cmp_right(cmp)) == CNST_NULL) {
+ ir_node *block = get_irn_n(n, -1);
+
+ /*
+ * Note: normalization puts the constant on the right site,
+ * so we check only one case.
+ *
+ * Note further that these optimization work even for floating point
+ * with NaN's because -NaN == NaN.
+ * However, if +0 and -0 is handled differently, we cannot use the first one.
+ */
+ if (get_irn_op(f) == op_Minus &&
+ get_Minus_op(f) == t &&
+ get_Cmp_left(cmp) == t) {
+
+ if (proj_nr == pn_Cmp_Ge || proj_nr == pn_Cmp_Gt) {
+ /* Mux(a >=/> 0, -a, a) ==> Abs(a) */
+ n = new_rd_Abs(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ t, mode);
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
+ return n;
+ }
+ else if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
+ /* Mux(a <=/< 0, -a, a) ==> Minus(Abs(a)) */
+ n = new_rd_Abs(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ t, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ n, mode);
+
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
+ return n;
+ }
+ }
+ else if (get_irn_op(t) == op_Minus &&
+ get_Minus_op(t) == f &&
+ get_Cmp_left(cmp) == f) {
+
+ if (proj_nr == pn_Cmp_Le || proj_nr == pn_Cmp_Lt) {
+ /* Mux(a <=/< 0, a, -a) ==> Abs(a) */
+ n = new_rd_Abs(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ f, mode);
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
+ return n;
+ }
+ else if (proj_nr == pn_Cmp_Ge || proj_nr == pn_Cmp_Gt) {
+ /* Mux(a >=/> 0, a, -a) ==> Minus(Abs(a)) */
+ n = new_rd_Abs(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ f, mode);
+ n = new_rd_Minus(get_irn_dbg_info(n),
+ current_ir_graph,
+ block,
+ n, mode);
+
+ DBG_OPT_ALGSIM1(oldn, cmp, sel, n, FS_OPT_MUX_TO_ABS);
+ return n;
+ }
+ }