+ n = transform_bitwise_distributive(n, transform_node_Or);
+
+ return n;
+} /* transform_node_Or */
+
+
+/* forward */
+static ir_node *transform_node(ir_node *n);
+
+/**
+ * Optimize (a >> c1) >> c2), works for Shr, Shrs, Shl, Rotl.
+ *
+ * Should be moved to reassociation?
+ */
+static ir_node *transform_node_shift(ir_node *n) {
+ ir_node *left, *right;
+ ir_mode *mode;
+ tarval *tv1, *tv2, *res;
+ ir_node *in[2], *irn, *block;
+
+ left = get_binop_left(n);
+
+ /* different operations */
+ if (get_irn_op(left) != get_irn_op(n))
+ return n;
+
+ right = get_binop_right(n);
+ tv1 = value_of(right);
+ if (tv1 == tarval_bad)
+ return n;
+
+ tv2 = value_of(get_binop_right(left));
+ if (tv2 == tarval_bad)
+ return n;
+
+ res = tarval_add(tv1, tv2);
+ mode = get_irn_mode(n);
+
+ /* beware: a simple replacement works only, if res < modulo shift */
+ if (!is_Rotl(n)) {
+ int modulo_shf = get_mode_modulo_shift(mode);
+ assert(modulo_shf >= (int) get_mode_size_bits(mode));
+ if (modulo_shf > 0) {
+ tarval *modulo = new_tarval_from_long(modulo_shf,
+ get_tarval_mode(res));
+
+ /* shifting too much */
+ if (!(tarval_cmp(res, modulo) & pn_Cmp_Lt)) {
+ if (is_Shrs(n)) {
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *block = get_nodes_block(n);
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *cnst = new_Const(mode_Iu, new_tarval_from_long(get_mode_size_bits(mode)-1, mode_Iu));
+ return new_rd_Shrs(dbgi, irg, block, get_binop_left(left),
+ cnst, mode);
+ }
+
+ return new_Const(mode, get_mode_null(mode));
+ }
+ }
+ } else {
+ res = tarval_mod(res, new_tarval_from_long(get_mode_size_bits(mode), get_tarval_mode(res)));
+ }
+
+ /* ok, we can replace it */
+ block = get_nodes_block(n);
+
+ in[0] = get_binop_left(left);
+ in[1] = new_r_Const(current_ir_graph, block, get_tarval_mode(res), res);
+
+ irn = new_ir_node(NULL, current_ir_graph, block, get_irn_op(n), mode, 2, in);
+
+ DBG_OPT_ALGSIM0(n, irn, FS_OPT_REASSOC_SHIFT);
+
+ return transform_node(irn);
+} /* transform_node_shift */
+
+/**
+ * normalisation: (x & c1) >> c2 to (x >> c2) & (c1 >> c2)
+ * (we can use:
+ * - and, or, xor instead of &
+ * - Shl, Shr, Shrs, rotl instead of >>
+ * (with a special case for Or/Xor + Shrs)
+ */
+static ir_node *transform_node_bitop_shift(ir_node *n) {
+ ir_node *left;
+ ir_node *right = get_binop_right(n);
+ ir_mode *mode = get_irn_mode(n);
+ ir_node *bitop_left;
+ ir_node *bitop_right;
+ ir_op *op_left;
+ ir_graph *irg;
+ ir_node *block;
+ dbg_info *dbgi;
+ ir_node *new_shift;
+ ir_node *new_bitop;
+ ir_node *new_const;
+ tarval *tv1;
+ tarval *tv2;
+ tarval *tv_shift;
+
+ assert(is_Shrs(n) || is_Shr(n) || is_Shl(n) || is_Rotl(n));
+
+ if (!is_Const(right))
+ return n;
+
+ left = get_binop_left(n);
+ op_left = get_irn_op(left);
+ if (op_left != op_And && op_left != op_Or && op_left != op_Eor)
+ return n;
+
+ /* doing it with Shrs is not legal if the Or/Eor affects the topmost bit */
+ if (is_Shrs(n) && (op_left == op_Or || op_left == op_Eor)) {
+ /* TODO: test if sign bit is affectes */
+ return n;
+ }
+
+ bitop_right = get_binop_right(left);
+ if (!is_Const(bitop_right))
+ return n;
+
+ bitop_left = get_binop_left(left);