handle ia32_l_vfist node
[libfirm] / ir / ir / iropt.c
index c47d72a..698ba03 100644 (file)
@@ -998,7 +998,7 @@ static ir_node *equivalent_node_idempotent_unop(ir_node *n) {
 /** Optimize Not(Not(x)) == x. */
 #define equivalent_node_Not    equivalent_node_idempotent_unop
 
-/** --x == x       ??? Is this possible or can --x raise an
+/** -(-x) == x       ??? Is this possible or can --x raise an
                        out of bounds exception if min =! max? */
 #define equivalent_node_Minus  equivalent_node_idempotent_unop
 
@@ -1144,9 +1144,20 @@ static ir_node *equivalent_node_Conv(ir_node *n) {
        ir_mode *a_mode = get_irn_mode(a);
 
        if (n_mode == a_mode) { /* No Conv necessary */
-               /* leave strict floating point Conv's */
-               if (get_Conv_strict(n))
-                       return n;
+               if (get_Conv_strict(n)) {
+                       /* special case: the predecessor might be a also a Conv */
+                       if (is_Conv(a)) {
+                               if (! get_Conv_strict(a)) {
+                                       /* first one is not strict, kick it */
+                                       set_Conv_op(n, get_Conv_op(a));
+                                       return n;
+                               }
+                               /* else both are strict conv, second is superflous */
+                       } else {
+                               /* leave strict floating point Conv's */
+                               return n;
+                       }
+               }
                n = a;
                DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
        } else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
@@ -1160,7 +1171,7 @@ static ir_node *equivalent_node_Conv(ir_node *n) {
                        if (n_mode == mode_b) {
                                n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
                                DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
-                       } else if (mode_is_int(n_mode) || mode_is_character(n_mode)) {
+                       } else if (mode_is_int(n_mode)) {
                                if (smaller_mode(b_mode, a_mode)){
                                        n = b;        /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
                                        DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
@@ -1566,8 +1577,8 @@ static ir_node *equivalent_node_Bound(ir_node *n) {
                                /*
                                 * One could expect that we simply return the previous
                                 * Bound here. However, this would be wrong, as we could
-                                * add an exception Proj to a new location than.
-                                * So, we must turn in into a tuple
+                                * add an exception Proj to a new location then.
+                                * So, we must turn in into a tuple.
                                 */
                                ret_tuple = 1;
                        }
@@ -1901,7 +1912,7 @@ static ir_node *transform_node_Add(ir_node *n) {
                return n;
 
        if (mode_is_num(mode)) {
-               if (a == b) {
+               if (a == b && mode_is_int(mode)) {
                        ir_node *block = get_irn_n(n, -1);
 
                        n = new_rd_Mul(
@@ -1993,6 +2004,15 @@ static ir_node *transform_node_Add(ir_node *n) {
                                DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
                        }
                }
+               /* Here we rely on constants be on the RIGHT side */
+               else if (get_mode_arithmetic(mode) == irma_twos_complement &&
+                        is_Not(a) && classify_Const(b) == CNST_ONE) {
+                       /* ~x + 1 = -x */
+                       ir_node *op = get_Not_op(a);
+                       ir_node *blk = get_irn_n(n, -1);
+                       n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, op, mode);
+                       DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_PLUS_1);
+               }
        }
        return n;
 }  /* transform_node_Add */
@@ -2172,6 +2192,24 @@ static ir_node *transform_node_Mul(ir_node *n) {
                        return n;
                }
        }
+       if (get_mode_arithmetic(mode) == irma_ieee754) {
+               if (is_Const(a)) {
+                       tarval *tv = get_Const_tarval(a);
+                       if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)) {
+                               n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), b, b, mode);
+                               DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
+                               return n;
+                       }
+               }
+               else if (is_Const(b)) {
+                       tarval *tv = get_Const_tarval(b);
+                       if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)) {
+                               n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, a, mode);
+                               DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
+                               return n;
+                       }
+               }
+       }
        return arch_dep_replace_mul_with_shifts(n);
 }  /* transform_node_Mul */
 
@@ -2180,22 +2218,51 @@ static ir_node *transform_node_Mul(ir_node *n) {
  */
 static ir_node *transform_node_Div(ir_node *n) {
        tarval *tv = value_of(n);
+       ir_mode *mode = get_Div_resmode(n);
        ir_node *value = n;
 
-       /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
-
        if (tv != tarval_bad) {
                value = new_Const(get_tarval_mode(tv), tv);
 
                DBG_OPT_CSTEVAL(n, value);
-       } else /* Try architecture dependent optimization */
-               value = arch_dep_replace_div_by_const(n);
+               goto make_tuple;
+       } else {
+               ir_node *a = get_Div_left(n);
+               ir_node *b = get_Div_right(n);
+               ir_node *dummy;
+
+               if (a == b && value_not_zero(a, &dummy)) {
+                       /* BEWARE: we can optimize a/a to 1 only if this cannot cause a exception */
+                       value = new_Const(mode, get_mode_one(mode));
+                       DBG_OPT_CSTEVAL(n, value);
+                       goto make_tuple;
+               } else {
+                       if (mode_is_signed(mode) && is_Const(b)) {
+                               tarval *tv = get_Const_tarval(b);
+
+                               if (tv == get_mode_minus_one(mode)) {
+                                       /* a / -1 */
+                                       value = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, mode);
+                                       DBG_OPT_CSTEVAL(n, value);
+                                       goto make_tuple;
+                               }
+                       }
+                       /* Try architecture dependent optimization */
+                       value = arch_dep_replace_div_by_const(n);
+               }
+       }
 
        if (value != n) {
+               ir_node *mem, *blk;
+
+make_tuple:
                /* Turn Div into a tuple (mem, jmp, bad, value) */
-               ir_node *mem = get_Div_mem(n);
-               ir_node *blk = get_irn_n(n, -1);
+               mem = get_Div_mem(n);
+               blk = get_irn_n(n, -1);
 
+               /* skip a potential Pin */
+               if (is_Pin(mem))
+                       mem = get_Pin_op(mem);
                turn_into_tuple(n, pn_Div_max);
                set_Tuple_pred(n, pn_Div_M,         mem);
                set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk));
@@ -2210,22 +2277,51 @@ static ir_node *transform_node_Div(ir_node *n) {
  */
 static ir_node *transform_node_Mod(ir_node *n) {
        tarval *tv = value_of(n);
+       ir_mode *mode = get_Mod_resmode(n);
        ir_node *value = n;
 
-       /* BEWARE: it is NOT possible to optimize a%a to 0, as this may cause a exception */
-
        if (tv != tarval_bad) {
                value = new_Const(get_tarval_mode(tv), tv);
 
                DBG_OPT_CSTEVAL(n, value);
-       } else /* Try architecture dependent optimization */
-               value = arch_dep_replace_mod_by_const(n);
+               goto make_tuple;
+       } else {
+               ir_node *a = get_Mod_left(n);
+               ir_node *b = get_Mod_right(n);
+               ir_node *dummy;
+
+               if (a == b && value_not_zero(a, &dummy)) {
+                       /* BEWARE: we can optimize a%a to 0 only if this cannot cause a exception */
+                       value = new_Const(mode, get_mode_null(mode));
+                       DBG_OPT_CSTEVAL(n, value);
+                       goto make_tuple;
+               } else {
+                       if (mode_is_signed(mode) && is_Const(b)) {
+                               tarval *tv = get_Const_tarval(b);
+
+                               if (tv == get_mode_minus_one(mode)) {
+                                       /* a % -1 = 0 */
+                                       value = new_Const(mode, get_mode_null(mode));
+                                       DBG_OPT_CSTEVAL(n, value);
+                                       goto make_tuple;
+                               }
+                       }
+                       /* Try architecture dependent optimization */
+                       value = arch_dep_replace_mod_by_const(n);
+               }
+       }
 
        if (value != n) {
+               ir_node *mem, *blk;
+
+make_tuple:
                /* Turn Mod into a tuple (mem, jmp, bad, value) */
-               ir_node *mem = get_Mod_mem(n);
-               ir_node *blk = get_irn_n(n, -1);
+               mem = get_Mod_mem(n);
+               blk = get_irn_n(n, -1);
 
+               /* skip a potential Pin */
+               if (is_Pin(mem))
+                       mem = get_Pin_op(mem);
                turn_into_tuple(n, pn_Mod_max);
                set_Tuple_pred(n, pn_Mod_M,         mem);
                set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(current_ir_graph, blk));
@@ -2239,51 +2335,69 @@ static ir_node *transform_node_Mod(ir_node *n) {
  * Transform a DivMod node.
  */
 static ir_node *transform_node_DivMod(ir_node *n) {
-       int evaluated = 0;
-
+       ir_node *dummy;
        ir_node *a = get_DivMod_left(n);
        ir_node *b = get_DivMod_right(n);
-       ir_mode *mode = get_irn_mode(a);
+       ir_mode *mode = get_DivMod_resmode(n);
        tarval *ta = value_of(a);
        tarval *tb = value_of(b);
-
-       if (!(mode_is_int(mode) && mode_is_int(get_irn_mode(b))))
-               return n;
-
-       /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
+       int evaluated = 0;
 
        if (tb != tarval_bad) {
                if (tb == get_mode_one(get_tarval_mode(tb))) {
-                       b = new_Const (mode, get_mode_null(mode));
-                       evaluated = 1;
-
+                       b = new_Const(mode, get_mode_null(mode));
                        DBG_OPT_CSTEVAL(n, b);
+                       goto make_tuple;
                } else if (ta != tarval_bad) {
                        tarval *resa, *resb;
-                       resa = tarval_div (ta, tb);
+                       resa = tarval_div(ta, tb);
                        if (resa == tarval_bad) return n; /* Causes exception!!! Model by replacing through
                                                             Jmp for X result!? */
-                       resb = tarval_mod (ta, tb);
+                       resb = tarval_mod(ta, tb);
                        if (resb == tarval_bad) return n; /* Causes exception! */
-                       a = new_Const (mode, resa);
-                       b = new_Const (mode, resb);
-                       evaluated = 1;
-
+                       a = new_Const(mode, resa);
+                       b = new_Const(mode, resb);
+                       DBG_OPT_CSTEVAL(n, a);
+                       DBG_OPT_CSTEVAL(n, b);
+                       goto make_tuple;
+               } else if (mode_is_signed(mode) && tb == get_mode_minus_one(mode)) {
+                       a = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, mode);
+                       b = new_Const(mode, get_mode_null(mode));
                        DBG_OPT_CSTEVAL(n, a);
                        DBG_OPT_CSTEVAL(n, b);
+                       goto make_tuple;
                } else { /* Try architecture dependent optimization */
                        arch_dep_replace_divmod_by_const(&a, &b, n);
                        evaluated = a != NULL;
                }
-       } else if (ta == get_mode_null(mode)) {
+       } else if (a == b) {
+               if (value_not_zero(a, &dummy)) {
+                       /* a/a && a != 0 */
+                       a = new_Const(mode, get_mode_one(mode));
+                       b = new_Const(mode, get_mode_null(mode));
+                       DBG_OPT_CSTEVAL(n, a);
+                       DBG_OPT_CSTEVAL(n, b);
+                       goto make_tuple;
+               } else {
+                       /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
+                       return n;
+               }
+       } else if (ta == get_mode_null(mode) && value_not_zero(b, &dummy)) {
                /* 0 / non-Const = 0 */
                b = a;
-               evaluated = 1;
+               goto make_tuple;
        }
 
        if (evaluated) { /* replace by tuple */
-               ir_node *mem = get_DivMod_mem(n);
-               ir_node *blk = get_irn_n(n, -1);
+               ir_node *mem, *blk;
+
+make_tuple:
+               mem = get_DivMod_mem(n);
+               /* skip a potential Pin */
+               if (is_Pin(mem))
+                       mem = get_Pin_op(mem);
+
+               blk = get_irn_n(n, -1);
                turn_into_tuple(n, pn_DivMod_max);
                set_Tuple_pred(n, pn_DivMod_M,         mem);
                set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk));
@@ -2295,6 +2409,46 @@ static ir_node *transform_node_DivMod(ir_node *n) {
        return n;
 }  /* transform_node_DivMod */
 
+/**
+ * Optimize x / c to x * (1/c)
+ */
+static ir_node *transform_node_Quot(ir_node *n) {
+       ir_mode *mode = get_Quot_resmode(n);
+       ir_node *oldn = n;
+
+       if (get_mode_arithmetic(mode) == irma_ieee754) {
+               ir_node *b = get_Quot_right(n);
+
+               if (is_Const(b)) {
+                       tarval *tv = get_Const_tarval(b);
+
+                       tv = tarval_quo(get_mode_one(mode), tv);
+
+                       /* Do the transformation if the result is either exact or we are not
+                          using strict rules. */
+                       if (tv != tarval_bad &&
+                           (tarval_ieee754_get_exact() || (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic) == 0)) {
+                               ir_node *blk = get_irn_n(n, -1);
+                               ir_node *c = new_r_Const(current_ir_graph, blk, mode, tv);
+                               ir_node *a = get_Quot_left(n);
+                               ir_node *m = new_rd_Mul(get_irn_dbg_info(n), current_ir_graph, blk, a, c, mode);
+                               ir_node *mem = get_Quot_mem(n);
+
+                               /* skip a potential Pin */
+                               if (is_Pin(mem))
+                                       mem = get_Pin_op(mem);
+                               turn_into_tuple(n, pn_Quot_max);
+                               set_Tuple_pred(n, pn_Quot_M, mem);
+                               set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(current_ir_graph, blk));
+                               set_Tuple_pred(n, pn_Quot_X_except,  new_r_Bad(current_ir_graph));
+                               set_Tuple_pred(n, pn_Quot_res, m);
+                               DBG_OPT_ALGSIM1(oldn, a, b, m, FS_OPT_FP_INV_MUL);
+                       }
+               }
+       }
+       return n;
+}  /* transform_node_Quot */
+
 /**
  * Optimize Abs(x) into  x if x is Confirmed >= 0
  * Optimize Abs(x) into -x if x is Confirmed <= 0
@@ -2563,13 +2717,14 @@ static ir_node *transform_node_Not(ir_node *n) {
 static ir_node *transform_node_Minus(ir_node *n) {
        ir_node *c, *oldn = n;
        ir_node *a = get_Minus_op(n);
+       ir_mode *mode;
 
        HANDLE_UNOP_PHI(tarval_neg,a,c);
 
-       if (is_Not(a)) {
+       mode = get_irn_mode(a);
+       if (get_mode_arithmetic(mode) == irma_twos_complement && is_Not(a)) {
                /* -(~x) = x + 1 */
                ir_node *op   = get_Not_op(a);
-               ir_mode *mode = get_irn_mode(op);
                tarval *tv    = get_mode_one(mode);
                ir_node *blk  = get_irn_n(n, -1);
                ir_node *c    = new_r_Const(current_ir_graph, blk, mode, tv);
@@ -2964,7 +3119,7 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) {
                                 */
                                if ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) &&
                                    (get_irn_op(left) == op_And)) {
-                                       if (is_single_bit_tarval(tv)) {
+                                       if (tarval_is_single_bit(tv)) {
                                                /* check for Constant's match. We have check hare the tarvals,
                                                   because our const might be changed */
                                                ir_node *la = get_And_left(left);
@@ -3102,6 +3257,11 @@ static void get_comm_Binop_Ops(ir_node *binop, ir_node **a, ir_node **c) {
  *        OR     c2      ===>               OR
  *           AND    c1
  *               OR
+ *
+ *
+ * value  c2                 value  c1
+ *     AND   c1    ===>           OR     if (c1 | c2) == 0x111..11
+ *        OR
  */
 static ir_node *transform_node_Or_bf_store(ir_node *or) {
        ir_node *and, *c1;
@@ -3113,63 +3273,78 @@ static ir_node *transform_node_Or_bf_store(ir_node *or) {
 
        tarval *tv1, *tv2, *tv3, *tv4, *tv, *n_tv4, *n_tv2;
 
-       get_comm_Binop_Ops(or, &and, &c1);
-       if ((get_irn_op(c1) != op_Const) || (get_irn_op(and) != op_And))
-               return or;
+       while (1) {
+               get_comm_Binop_Ops(or, &and, &c1);
+               if (!is_Const(c1) || !is_And(and))
+                       return or;
 
-       get_comm_Binop_Ops(and, &or_l, &c2);
-       if ((get_irn_op(c2) != op_Const) || (get_irn_op(or_l) != op_Or))
-               return or;
+               get_comm_Binop_Ops(and, &or_l, &c2);
+               if (!is_Const(c2))
+                       return or;
 
-       get_comm_Binop_Ops(or_l, &and_l, &c3);
-       if ((get_irn_op(c3) != op_Const) || (get_irn_op(and_l) != op_And))
-               return or;
+               tv1 = get_Const_tarval(c1);
+               tv2 = get_Const_tarval(c2);
 
-       get_comm_Binop_Ops(and_l, &value, &c4);
-       if (get_irn_op(c4) != op_Const)
-               return or;
+               tv = tarval_or(tv1, tv2);
+               if (classify_tarval(tv) == TV_CLASSIFY_ALL_ONE) {
+                       /* the AND does NOT clear a bit with isn't set be the OR */
+                       set_Or_left(or, or_l);
+                       set_Or_right(or, c1);
 
-       /* ok, found the pattern, check for conditions */
-       assert(mode == get_irn_mode(and));
-       assert(mode == get_irn_mode(or_l));
-       assert(mode == get_irn_mode(and_l));
+                       /* check for more */
+                       continue;
+               }
 
-       tv1 = get_Const_tarval(c1);
-       tv2 = get_Const_tarval(c2);
-       tv3 = get_Const_tarval(c3);
-       tv4 = get_Const_tarval(c4);
+               if (!is_Or(or_l))
+                       return or;
 
-       tv = tarval_or(tv4, tv2);
-       if (classify_tarval(tv) != TV_CLASSIFY_ALL_ONE) {
-               /* have at least one 0 at the same bit position */
-               return or;
-       }
+               get_comm_Binop_Ops(or_l, &and_l, &c3);
+               if (!is_Const(c3) || !is_And(and_l))
+                       return or;
 
-       n_tv4 = tarval_not(tv4);
-       if (tv3 != tarval_and(tv3, n_tv4)) {
-               /* bit in the or_mask is outside the and_mask */
-               return or;
-       }
+               get_comm_Binop_Ops(and_l, &value, &c4);
+               if (!is_Const(c4))
+                       return or;
 
-       n_tv2 = tarval_not(tv2);
-       if (tv1 != tarval_and(tv1, n_tv2)) {
-               /* bit in the or_mask is outside the and_mask */
-               return or;
-       }
+               /* ok, found the pattern, check for conditions */
+               assert(mode == get_irn_mode(and));
+               assert(mode == get_irn_mode(or_l));
+               assert(mode == get_irn_mode(and_l));
 
-       /* ok, all conditions met */
-       block = get_irn_n(or, -1);
+               tv3 = get_Const_tarval(c3);
+               tv4 = get_Const_tarval(c4);
 
-       new_and = new_r_And(current_ir_graph, block,
-               value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode);
+               tv = tarval_or(tv4, tv2);
+               if (classify_tarval(tv) != TV_CLASSIFY_ALL_ONE) {
+                       /* have at least one 0 at the same bit position */
+                       return or;
+               }
+
+               n_tv4 = tarval_not(tv4);
+               if (tv3 != tarval_and(tv3, n_tv4)) {
+                       /* bit in the or_mask is outside the and_mask */
+                       return or;
+               }
 
-       new_const = new_r_Const(current_ir_graph, block, mode, tarval_or(tv3, tv1));
+               n_tv2 = tarval_not(tv2);
+               if (tv1 != tarval_and(tv1, n_tv2)) {
+                       /* bit in the or_mask is outside the and_mask */
+                       return or;
+               }
+
+               /* ok, all conditions met */
+               block = get_irn_n(or, -1);
 
-       set_Or_left(or, new_and);
-       set_Or_right(or, new_const);
+               new_and = new_r_And(current_ir_graph, block,
+                       value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode);
 
-       /* check for more */
-       return transform_node_Or_bf_store(or);
+               new_const = new_r_Const(current_ir_graph, block, mode, tarval_or(tv3, tv1));
+
+               set_Or_left(or, new_and);
+               set_Or_right(or, new_const);
+
+               /* check for more */
+       }
 }  /* transform_node_Or_bf_store */
 
 /**
@@ -3433,6 +3608,44 @@ static ir_node *transform_node_Mux(ir_node *n) {
        ir_node *oldn = n, *sel = get_Mux_sel(n);
        ir_mode *mode = get_irn_mode(n);
 
+       if (mode == mode_b) {
+               ir_node  *t     = get_Mux_true(n);
+               ir_node  *f     = get_Mux_false(n);
+               dbg_info *dbg   = get_irn_dbg_info(n);
+               ir_node  *block = get_irn_n(n, -1);
+               ir_graph *irg   = current_ir_graph;
+
+               if (is_Const(t)) {
+                       tarval *tv_t = get_Const_tarval(t);
+                       if (tv_t == tarval_b_true) {
+                               if (is_Const(f)) {
+                                       assert(get_Const_tarval(f) == tarval_b_false);
+                                       return sel;
+                               } else {
+                                       return new_rd_Or(dbg, irg, block, sel, f, mode_b);
+                               }
+                       } else {
+                               ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b);
+                               assert(tv_t == tarval_b_false);
+                               if (is_Const(f)) {
+                                       assert(get_Const_tarval(f) == tarval_b_true);
+                                       return not_sel;
+                               } else {
+                                       return new_rd_And(dbg, irg, block, not_sel, f, mode_b);
+                               }
+                       }
+               } else if (is_Const(f)) {
+                       tarval *tv_f = get_Const_tarval(f);
+                       if (tv_f == tarval_b_true) {
+                               ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b);
+                               return new_rd_Or(dbg, irg, block, not_sel, t, mode_b);
+                       } else {
+                               assert(tv_f == tarval_b_false);
+                               return new_rd_And(dbg, irg, block, sel, t, mode_b);
+                       }
+               }
+       }
+
        if (get_irn_op(sel) == op_Proj && !mode_honor_signed_zeros(mode)) {
                ir_node *cmp = get_Proj_pred(sel);
                long proj_nr = get_Proj_proj(sel);
@@ -3572,8 +3785,19 @@ static ir_node *transform_node_Psi(ir_node *n) {
  * not be freed even if the equivalent node isn't the old one.
  */
 static ir_node *transform_node(ir_node *n) {
-       if (n->op->ops.transform_node)
-               n = n->op->ops.transform_node(n);
+       ir_node *oldn;
+
+       /*
+        * Transform_node is the only "optimizing transformation" that might
+        * return a node with a different opcode. We iterate HERE until fixpoint
+        * to get the final result.
+        */
+       do {
+               oldn = n;
+               if (n->op->ops.transform_node)
+                       n = n->op->ops.transform_node(n);
+       } while (oldn != n);
+
        return n;
 }  /* transform_node */
 
@@ -3600,6 +3824,7 @@ static ir_op_ops *firm_set_default_transform_node(ir_opcode code, ir_op_ops *ops
        CASE(Div);
        CASE(Mod);
        CASE(DivMod);
+       CASE(Quot);
        CASE(Abs);
        CASE(Cond);
        CASE(And);
@@ -3715,12 +3940,19 @@ static int node_cmp_attr_Load(ir_node *a, ir_node *b) {
            get_Load_volatility(b) == volatility_is_volatile)
                /* NEVER do CSE on volatile Loads */
                return 1;
+       /* do not CSE Loads with different alignment. Be conservative. */
+       if (get_Load_align(a) != get_Load_align(b))
+               return 1;
 
        return get_Load_mode(a) != get_Load_mode(b);
 }  /* node_cmp_attr_Load */
 
 /** Compares the attributes of two Store nodes. */
 static int node_cmp_attr_Store(ir_node *a, ir_node *b) {
+       /* do not CSE Stores with different alignment. Be conservative. */
+       if (get_Store_align(a) != get_Store_align(b))
+               return 1;
+
        /* NEVER do CSE on volatile Stores */
        return (get_Store_volatility(a) == volatility_is_volatile ||
                get_Store_volatility(b) == volatility_is_volatile);