- if (is_Cmp(a) && is_Cmp(b)) {
- ir_node *a_left = get_Cmp_left(a);
- ir_node *a_right = get_Cmp_right(a);
- ir_node *b_left = get_Cmp_left(b);
- ir_node *b_right = get_Cmp_right(b);
- ir_relation a_relation = get_Cmp_relation(a);
- ir_relation b_relation = get_Cmp_relation(b);
- /* we can combine the relations of two compares with the same
- * operands */
- if (a_left == b_left && b_left == b_right) {
- dbg_info *dbgi = get_irn_dbg_info(n);
- ir_node *block = get_nodes_block(n);
- ir_relation new_relation = a_relation & b_relation;
- return new_rd_Cmp(dbgi, block, a_left, a_right, new_relation);
- }
- /* Cmp(a==0) and Cmp(b==0) can be optimized to Cmp(a|b==0) */
- if (is_Const(a_right) && is_Const_null(a_right)
- && is_Const(b_right) && is_Const_null(b_right)
- && a_relation == b_relation && a_relation == ir_relation_equal
- && !mode_is_float(get_irn_mode(a_left))
- && !mode_is_float(get_irn_mode(b_left))) {
- dbg_info *dbgi = get_irn_dbg_info(n);
- ir_node *block = get_nodes_block(n);
- ir_mode *mode = get_irn_mode(a_left);
- ir_node *n_b_left = get_irn_mode(b_left) != mode ?
- new_rd_Conv(dbgi, block, b_left, mode) : b_left;
- ir_node *or = new_rd_Or(dbgi, block, a_left, n_b_left, mode);
- ir_graph *irg = get_irn_irg(n);
- ir_node *zero = create_zero_const(irg, mode);
- return new_rd_Cmp(dbgi, block, or, zero, ir_relation_equal);
- }
+ if (is_irg_state(irg, IR_GRAPH_STATE_NORMALISATION2))
+ return n;
+
+ assert(is_Shrs(n) || is_Shr(n) || is_Shl(n) || is_Rotl(n));
+
+ if (!is_Const(right))
+ return n;
+
+ left = get_binop_left(n);
+ op_left = get_irn_op(left);
+ if (op_left != op_And && op_left != op_Or && op_left != op_Eor)
+ return n;
+
+ /* doing it with Shrs is not legal if the Or/Eor affects the topmost bit */
+ if (is_Shrs(n) && (op_left == op_Or || op_left == op_Eor)) {
+ /* TODO: test if sign bit is affectes */
+ return n;
+ }
+
+ bitop_right = get_binop_right(left);
+ if (!is_Const(bitop_right))
+ return n;
+
+ bitop_left = get_binop_left(left);
+
+ block = get_nodes_block(n);
+ dbgi = get_irn_dbg_info(n);
+ tv1 = get_Const_tarval(bitop_right);
+ tv2 = get_Const_tarval(right);
+
+ assert(get_tarval_mode(tv1) == mode);
+
+ if (is_Shl(n)) {
+ new_shift = new_rd_Shl(dbgi, block, bitop_left, right, mode);
+ tv_shift = tarval_shl(tv1, tv2);
+ } else if (is_Shr(n)) {
+ new_shift = new_rd_Shr(dbgi, block, bitop_left, right, mode);
+ tv_shift = tarval_shr(tv1, tv2);
+ } else if (is_Shrs(n)) {
+ new_shift = new_rd_Shrs(dbgi, block, bitop_left, right, mode);
+ tv_shift = tarval_shrs(tv1, tv2);
+ } else {
+ assert(is_Rotl(n));
+ new_shift = new_rd_Rotl(dbgi, block, bitop_left, right, mode);
+ tv_shift = tarval_rotl(tv1, tv2);
+ }
+
+ assert(get_tarval_mode(tv_shift) == mode);
+ irg = get_irn_irg(n);
+ new_const = new_r_Const(irg, tv_shift);
+
+ if (op_left == op_And) {
+ new_bitop = new_rd_And(dbgi, block, new_shift, new_const, mode);
+ } else if (op_left == op_Or) {
+ new_bitop = new_rd_Or(dbgi, block, new_shift, new_const, mode);
+ } else {
+ assert(op_left == op_Eor);
+ new_bitop = new_rd_Eor(dbgi, block, new_shift, new_const, mode);
+ }
+
+ return new_bitop;
+}
+
+/**
+ * normalisation: (x >> c1) & c2 to (x & (c2<<c1)) >> c1
+ * (we can use:
+ * - and, or, xor instead of &
+ * - Shl, Shr, Shrs, rotl instead of >>
+ * (with a special case for Or/Xor + Shrs)
+ *
+ * This normalisation is usually good for the backend since << C can often be
+ * matched as address-mode.
+ */
+static ir_node *transform_node_bitop_shift(ir_node *n)
+{
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *left = get_binop_left(n);
+ ir_node *right = get_binop_right(n);
+ ir_mode *mode = get_irn_mode(n);
+ ir_node *shift_left;
+ ir_node *shift_right;
+ ir_node *block;
+ dbg_info *dbg_bitop;
+ dbg_info *dbg_shift;
+ ir_node *new_bitop;
+ ir_node *new_shift;
+ ir_node *new_const;
+ ir_tarval *tv1;
+ ir_tarval *tv2;
+ ir_tarval *tv_bitop;
+
+ if (!is_irg_state(irg, IR_GRAPH_STATE_NORMALISATION2))
+ return n;
+
+ assert(is_And(n) || is_Or(n) || is_Eor(n));
+ if (!is_Const(right) || !is_shiftop(left))
+ return n;
+
+ shift_left = get_binop_left(left);
+ shift_right = get_binop_right(left);
+ if (!is_Const(shift_right))
+ return n;
+
+ /* doing it with Shrs is not legal if the Or/Eor affects the topmost bit */
+ if (is_Shrs(left)) {
+ /* TODO this could be improved */
+ return n;
+ }
+
+ irg = get_irn_irg(n);
+ block = get_nodes_block(n);
+ dbg_bitop = get_irn_dbg_info(n);
+ dbg_shift = get_irn_dbg_info(left);
+ tv1 = get_Const_tarval(shift_right);
+ tv2 = get_Const_tarval(right);
+ assert(get_tarval_mode(tv2) == mode);
+
+ if (is_Shl(left)) {
+ tv_bitop = tarval_shr(tv2, tv1);
+ } else if (is_Shr(left)) {
+ tv_bitop = tarval_shl(tv2, tv1);
+ } else {
+ assert(is_Rotl(left));
+ tv_bitop = tarval_rotl(tv2, tarval_neg(tv1));
+ }
+ new_const = new_r_Const(irg, tv_bitop);
+
+ if (is_And(n)) {
+ new_bitop = new_rd_And(dbg_bitop, block, shift_left, new_const, mode);
+ } else if (is_Or(n)) {
+ new_bitop = new_rd_Or(dbg_bitop, block, shift_left, new_const, mode);
+ } else {
+ assert(is_Eor(n));
+ new_bitop = new_rd_Eor(dbg_bitop, block, shift_left, new_const, mode);
+ }
+
+ if (is_Shl(left)) {
+ new_shift = new_rd_Shl(dbg_shift, block, new_bitop, shift_right, mode);
+ } else if (is_Shr(left)) {
+ new_shift = new_rd_Shr(dbg_shift, block, new_bitop, shift_right, mode);
+ } else if (is_Rotl(left)) {
+ assert(is_Rotl(left));
+ new_shift = new_rd_Rotl(dbg_shift, block, new_bitop, shift_right, mode);
+ }
+
+ return new_shift;
+}
+
+/**
+ * Transform an And.
+ */
+static ir_node *transform_node_And(ir_node *n)
+{
+ ir_node *c, *oldn = n;
+ ir_node *a = get_And_left(n);
+ ir_node *b = get_And_right(n);
+ ir_mode *mode;
+ vrp_attr *a_vrp, *b_vrp;
+
+ if (is_Cmp(a) && is_Cmp(b)) {
+ ir_node *a_left = get_Cmp_left(a);
+ ir_node *a_right = get_Cmp_right(a);
+ ir_node *b_left = get_Cmp_left(b);
+ ir_node *b_right = get_Cmp_right(b);
+ ir_relation a_relation = get_Cmp_relation(a);
+ ir_relation b_relation = get_Cmp_relation(b);
+ /* we can combine the relations of two compares with the same
+ * operands */
+ if (a_left == b_left && b_left == b_right) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_relation new_relation = a_relation & b_relation;
+ return new_rd_Cmp(dbgi, block, a_left, a_right, new_relation);
+ }
+ /* Cmp(a==b) and Cmp(c==d) can be optimized to Cmp((a^b)|(c^d)==0) */
+ if (a_relation == b_relation && a_relation == ir_relation_equal
+ && !mode_is_float(get_irn_mode(a_left))
+ && !mode_is_float(get_irn_mode(b_left))
+ && values_in_mode(get_irn_mode(a_left), get_irn_mode(b_left))) {
+ dbg_info *dbgi = get_irn_dbg_info(n);
+ ir_node *block = get_nodes_block(n);
+ ir_mode *a_mode = get_irn_mode(a_left);
+ ir_mode *b_mode = get_irn_mode(b_left);
+ ir_node *xora = new_rd_Eor(dbgi, block, a_left, a_right, a_mode);
+ ir_node *xorb = new_rd_Eor(dbgi, block, b_left, b_right, b_mode);
+ ir_node *conv = new_rd_Conv(dbgi, block, xorb, a_mode);
+ ir_node *or = new_rd_Or(dbgi, block, xora, conv, a_mode);
+ ir_graph *irg = get_irn_irg(n);
+ ir_node *zero = create_zero_const(irg, a_mode);
+ return new_rd_Cmp(dbgi, block, or, zero, ir_relation_equal);
+ }