handle ia32_l_vfist node
[libfirm] / ir / ir / iropt.c
index 3bd393c..698ba03 100644 (file)
@@ -953,62 +953,23 @@ static ir_node *equivalent_node_left_zero(ir_node *n) {
  */
 static ir_node *equivalent_node_Sub(ir_node *n) {
        ir_node *oldn = n;
-       ir_node *a, *b;
+       ir_node *b;
        ir_mode *mode = get_irn_mode(n);
 
        /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
        if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
                return n;
 
-       a = get_Sub_left(n);
        b = get_Sub_right(n);
-restart:
 
        /* Beware: modes might be different */
        if (classify_tarval(value_of(b)) == TV_CLASSIFY_NULL) {
+               ir_node *a = get_Sub_left(n);
                if (mode == get_irn_mode(a)) {
                        n = a;
 
                        DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_NEUTRAL_0);
                }
-       } else if (is_Add(a)) {
-               if (mode_wrap_around(mode)) {
-                       ir_node *left  = get_Add_left(a);
-                       ir_node *right = get_Add_right(a);
-
-                       if (left == b) {
-                               if (mode == get_irn_mode(right)) {
-                                       n = right;
-                                       DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
-                               }
-                       } else if (right == b) {
-                               if (mode == get_irn_mode(left)) {
-                                       n = left;
-                                       DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
-                               }
-                       }
-               }
-       } else if (mode_is_int(mode) && is_Conv(a) && is_Conv(b)) {
-               ir_mode *mode = get_irn_mode(a);
-
-               if (mode == get_irn_mode(b)) {
-                       ir_mode *ma, *mb;
-
-                       a = get_Conv_op(a);
-                       b = get_Conv_op(b);
-
-                       /* check if it's allowed to skip the conv */
-                       ma = get_irn_mode(a);
-                       mb = get_irn_mode(b);
-
-                       if (mode_is_reference(ma) && mode_is_reference(mb)) {
-                               /* SubInt(ConvInt(aP), ConvInt(bP)) -> SubInt(aP,bP) */
-                               set_Sub_left(n, a);
-                               set_Sub_right(n, b);
-
-                               goto restart;
-                       }
-               }
        }
        return n;
 }  /* equivalent_node_Sub */
@@ -1029,7 +990,7 @@ static ir_node *equivalent_node_idempotent_unop(ir_node *n) {
        /* optimize symmetric unop */
        if (get_irn_op(pred) == get_irn_op(n)) {
                n = get_unop_op(pred);
-               DBG_OPT_ALGSIM2(oldn, pred, n);
+               DBG_OPT_ALGSIM2(oldn, pred, n, FS_OPT_IDEM_UNARY);
        }
        return n;
 }  /* equivalent_node_idempotent_unop */
@@ -1037,7 +998,7 @@ static ir_node *equivalent_node_idempotent_unop(ir_node *n) {
 /** Optimize Not(Not(x)) == x. */
 #define equivalent_node_Not    equivalent_node_idempotent_unop
 
-/** --x == x       ??? Is this possible or can --x raise an
+/** -(-x) == x       ??? Is this possible or can --x raise an
                        out of bounds exception if min =! max? */
 #define equivalent_node_Minus  equivalent_node_idempotent_unop
 
@@ -1183,9 +1144,20 @@ static ir_node *equivalent_node_Conv(ir_node *n) {
        ir_mode *a_mode = get_irn_mode(a);
 
        if (n_mode == a_mode) { /* No Conv necessary */
-               /* leave strict floating point Conv's */
-               if (get_Conv_strict(n))
-                       return n;
+               if (get_Conv_strict(n)) {
+                       /* special case: the predecessor might be a also a Conv */
+                       if (is_Conv(a)) {
+                               if (! get_Conv_strict(a)) {
+                                       /* first one is not strict, kick it */
+                                       set_Conv_op(n, get_Conv_op(a));
+                                       return n;
+                               }
+                               /* else both are strict conv, second is superflous */
+                       } else {
+                               /* leave strict floating point Conv's */
+                               return n;
+                       }
+               }
                n = a;
                DBG_OPT_ALGSIM0(oldn, n, FS_OPT_CONV);
        } else if (get_irn_op(a) == op_Conv) { /* Conv(Conv(b)) */
@@ -1199,7 +1171,7 @@ static ir_node *equivalent_node_Conv(ir_node *n) {
                        if (n_mode == mode_b) {
                                n = b; /* Convb(Conv*(xxxb(...))) == xxxb(...) */
                                DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
-                       } else if (mode_is_int(n_mode) || mode_is_character(n_mode)) {
+                       } else if (mode_is_int(n_mode)) {
                                if (smaller_mode(b_mode, a_mode)){
                                        n = b;        /* ConvS(ConvL(xxxS(...))) == xxxS(...) */
                                        DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_CONV);
@@ -1605,8 +1577,8 @@ static ir_node *equivalent_node_Bound(ir_node *n) {
                                /*
                                 * One could expect that we simply return the previous
                                 * Bound here. However, this would be wrong, as we could
-                                * add an exception Proj to a new location than.
-                                * So, we must turn in into a tuple
+                                * add an exception Proj to a new location then.
+                                * So, we must turn in into a tuple.
                                 */
                                ret_tuple = 1;
                        }
@@ -1940,7 +1912,7 @@ static ir_node *transform_node_Add(ir_node *n) {
                return n;
 
        if (mode_is_num(mode)) {
-               if (a == b) {
+               if (a == b && mode_is_int(mode)) {
                        ir_node *block = get_irn_n(n, -1);
 
                        n = new_rd_Mul(
@@ -2032,6 +2004,15 @@ static ir_node *transform_node_Add(ir_node *n) {
                                DBG_OPT_ALGSIM0(oldn, n, FS_OPT_ADD_MUL_A_X_A);
                        }
                }
+               /* Here we rely on constants be on the RIGHT side */
+               else if (get_mode_arithmetic(mode) == irma_twos_complement &&
+                        is_Not(a) && classify_Const(b) == CNST_ONE) {
+                       /* ~x + 1 = -x */
+                       ir_node *op = get_Not_op(a);
+                       ir_node *blk = get_irn_n(n, -1);
+                       n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, op, mode);
+                       DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_PLUS_1);
+               }
        }
        return n;
 }  /* transform_node_Add */
@@ -2053,16 +2034,61 @@ static ir_node *transform_node_Sub(ir_node *n) {
        a = get_Sub_left(n);
        b = get_Sub_right(n);
 
-       HANDLE_BINOP_PHI(tarval_sub, a,b,c);
-
        mode = get_irn_mode(n);
 
+restart:
+       HANDLE_BINOP_PHI(tarval_sub, a,b,c);
+
        /* for FP these optimizations are only allowed if fp_strict_algebraic is disabled */
        if (mode_is_float(mode) && (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic))
                return n;
 
+       if (is_Add(a)) {
+               if (mode_wrap_around(mode)) {
+                       ir_node *left  = get_Add_left(a);
+                       ir_node *right = get_Add_right(a);
+
+                       /* FIXME: Does the Conv's word only for two complement or generally? */
+                       if (left == b) {
+                               if (mode != get_irn_mode(right)) {
+                                       /* This Sub is an effective Cast */
+                                       right = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), right, mode);
+                               }
+                               n = right;
+                               DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
+                       } else if (right == b) {
+                               if (mode != get_irn_mode(left)) {
+                                       /* This Sub is an effective Cast */
+                                       left = new_r_Conv(get_irn_irg(n), get_irn_n(n, -1), left, mode);
+                               }
+                               n = left;
+                               DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_SUB);
+                       }
+               }
+       } else if (mode_is_int(mode) && is_Conv(a) && is_Conv(b)) {
+               ir_mode *mode = get_irn_mode(a);
+
+               if (mode == get_irn_mode(b)) {
+                       ir_mode *ma, *mb;
+
+                       a = get_Conv_op(a);
+                       b = get_Conv_op(b);
+
+                       /* check if it's allowed to skip the conv */
+                       ma = get_irn_mode(a);
+                       mb = get_irn_mode(b);
+
+                       if (mode_is_reference(ma) && mode_is_reference(mb)) {
+                               /* SubInt(ConvInt(aP), ConvInt(bP)) -> SubInt(aP,bP) */
+                               set_Sub_left(n, a);
+                               set_Sub_right(n, b);
+
+                               goto restart;
+                       }
+               }
+       }
        /* Beware of Sub(P, P) which cannot be optimized into a simple Minus ... */
-       if (mode_is_num(mode) && mode == get_irn_mode(a) && (classify_Const(a) == CNST_NULL)) {
+       else if (mode_is_num(mode) && mode == get_irn_mode(a) && (classify_Const(a) == CNST_NULL)) {
                n = new_rd_Minus(
                                get_irn_dbg_info(n),
                                current_ir_graph,
@@ -2136,7 +2162,6 @@ static ir_node *transform_node_Sub(ir_node *n) {
                set_Sub_right(n, add);
                DBG_OPT_ALGSIM0(n, n, FS_OPT_SUB_SUB_X_Y_Z);
        }
-
        return n;
 }  /* transform_node_Sub */
 
@@ -2167,6 +2192,24 @@ static ir_node *transform_node_Mul(ir_node *n) {
                        return n;
                }
        }
+       if (get_mode_arithmetic(mode) == irma_ieee754) {
+               if (is_Const(a)) {
+                       tarval *tv = get_Const_tarval(a);
+                       if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)) {
+                               n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), b, b, mode);
+                               DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
+                               return n;
+                       }
+               }
+               else if (is_Const(b)) {
+                       tarval *tv = get_Const_tarval(b);
+                       if (tarval_ieee754_get_exponent(tv) == 1 && tarval_ieee754_zero_mantissa(tv)) {
+                               n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, a, mode);
+                               DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_ADD_A_A);
+                               return n;
+                       }
+               }
+       }
        return arch_dep_replace_mul_with_shifts(n);
 }  /* transform_node_Mul */
 
@@ -2175,22 +2218,51 @@ static ir_node *transform_node_Mul(ir_node *n) {
  */
 static ir_node *transform_node_Div(ir_node *n) {
        tarval *tv = value_of(n);
+       ir_mode *mode = get_Div_resmode(n);
        ir_node *value = n;
 
-       /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
-
        if (tv != tarval_bad) {
                value = new_Const(get_tarval_mode(tv), tv);
 
                DBG_OPT_CSTEVAL(n, value);
-       } else /* Try architecture dependent optimization */
-               value = arch_dep_replace_div_by_const(n);
+               goto make_tuple;
+       } else {
+               ir_node *a = get_Div_left(n);
+               ir_node *b = get_Div_right(n);
+               ir_node *dummy;
+
+               if (a == b && value_not_zero(a, &dummy)) {
+                       /* BEWARE: we can optimize a/a to 1 only if this cannot cause a exception */
+                       value = new_Const(mode, get_mode_one(mode));
+                       DBG_OPT_CSTEVAL(n, value);
+                       goto make_tuple;
+               } else {
+                       if (mode_is_signed(mode) && is_Const(b)) {
+                               tarval *tv = get_Const_tarval(b);
+
+                               if (tv == get_mode_minus_one(mode)) {
+                                       /* a / -1 */
+                                       value = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, mode);
+                                       DBG_OPT_CSTEVAL(n, value);
+                                       goto make_tuple;
+                               }
+                       }
+                       /* Try architecture dependent optimization */
+                       value = arch_dep_replace_div_by_const(n);
+               }
+       }
 
        if (value != n) {
+               ir_node *mem, *blk;
+
+make_tuple:
                /* Turn Div into a tuple (mem, jmp, bad, value) */
-               ir_node *mem = get_Div_mem(n);
-               ir_node *blk = get_irn_n(n, -1);
+               mem = get_Div_mem(n);
+               blk = get_irn_n(n, -1);
 
+               /* skip a potential Pin */
+               if (is_Pin(mem))
+                       mem = get_Pin_op(mem);
                turn_into_tuple(n, pn_Div_max);
                set_Tuple_pred(n, pn_Div_M,         mem);
                set_Tuple_pred(n, pn_Div_X_regular, new_r_Jmp(current_ir_graph, blk));
@@ -2205,22 +2277,51 @@ static ir_node *transform_node_Div(ir_node *n) {
  */
 static ir_node *transform_node_Mod(ir_node *n) {
        tarval *tv = value_of(n);
+       ir_mode *mode = get_Mod_resmode(n);
        ir_node *value = n;
 
-       /* BEWARE: it is NOT possible to optimize a%a to 0, as this may cause a exception */
-
        if (tv != tarval_bad) {
                value = new_Const(get_tarval_mode(tv), tv);
 
                DBG_OPT_CSTEVAL(n, value);
-       } else /* Try architecture dependent optimization */
-               value = arch_dep_replace_mod_by_const(n);
+               goto make_tuple;
+       } else {
+               ir_node *a = get_Mod_left(n);
+               ir_node *b = get_Mod_right(n);
+               ir_node *dummy;
+
+               if (a == b && value_not_zero(a, &dummy)) {
+                       /* BEWARE: we can optimize a%a to 0 only if this cannot cause a exception */
+                       value = new_Const(mode, get_mode_null(mode));
+                       DBG_OPT_CSTEVAL(n, value);
+                       goto make_tuple;
+               } else {
+                       if (mode_is_signed(mode) && is_Const(b)) {
+                               tarval *tv = get_Const_tarval(b);
+
+                               if (tv == get_mode_minus_one(mode)) {
+                                       /* a % -1 = 0 */
+                                       value = new_Const(mode, get_mode_null(mode));
+                                       DBG_OPT_CSTEVAL(n, value);
+                                       goto make_tuple;
+                               }
+                       }
+                       /* Try architecture dependent optimization */
+                       value = arch_dep_replace_mod_by_const(n);
+               }
+       }
 
        if (value != n) {
+               ir_node *mem, *blk;
+
+make_tuple:
                /* Turn Mod into a tuple (mem, jmp, bad, value) */
-               ir_node *mem = get_Mod_mem(n);
-               ir_node *blk = get_irn_n(n, -1);
+               mem = get_Mod_mem(n);
+               blk = get_irn_n(n, -1);
 
+               /* skip a potential Pin */
+               if (is_Pin(mem))
+                       mem = get_Pin_op(mem);
                turn_into_tuple(n, pn_Mod_max);
                set_Tuple_pred(n, pn_Mod_M,         mem);
                set_Tuple_pred(n, pn_Mod_X_regular, new_r_Jmp(current_ir_graph, blk));
@@ -2234,51 +2335,69 @@ static ir_node *transform_node_Mod(ir_node *n) {
  * Transform a DivMod node.
  */
 static ir_node *transform_node_DivMod(ir_node *n) {
-       int evaluated = 0;
-
+       ir_node *dummy;
        ir_node *a = get_DivMod_left(n);
        ir_node *b = get_DivMod_right(n);
-       ir_mode *mode = get_irn_mode(a);
+       ir_mode *mode = get_DivMod_resmode(n);
        tarval *ta = value_of(a);
        tarval *tb = value_of(b);
-
-       if (!(mode_is_int(mode) && mode_is_int(get_irn_mode(b))))
-               return n;
-
-       /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
+       int evaluated = 0;
 
        if (tb != tarval_bad) {
                if (tb == get_mode_one(get_tarval_mode(tb))) {
-                       b = new_Const (mode, get_mode_null(mode));
-                       evaluated = 1;
-
+                       b = new_Const(mode, get_mode_null(mode));
                        DBG_OPT_CSTEVAL(n, b);
+                       goto make_tuple;
                } else if (ta != tarval_bad) {
                        tarval *resa, *resb;
-                       resa = tarval_div (ta, tb);
+                       resa = tarval_div(ta, tb);
                        if (resa == tarval_bad) return n; /* Causes exception!!! Model by replacing through
                                                             Jmp for X result!? */
-                       resb = tarval_mod (ta, tb);
+                       resb = tarval_mod(ta, tb);
                        if (resb == tarval_bad) return n; /* Causes exception! */
-                       a = new_Const (mode, resa);
-                       b = new_Const (mode, resb);
-                       evaluated = 1;
-
+                       a = new_Const(mode, resa);
+                       b = new_Const(mode, resb);
+                       DBG_OPT_CSTEVAL(n, a);
+                       DBG_OPT_CSTEVAL(n, b);
+                       goto make_tuple;
+               } else if (mode_is_signed(mode) && tb == get_mode_minus_one(mode)) {
+                       a = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, get_irn_n(n, -1), a, mode);
+                       b = new_Const(mode, get_mode_null(mode));
                        DBG_OPT_CSTEVAL(n, a);
                        DBG_OPT_CSTEVAL(n, b);
+                       goto make_tuple;
                } else { /* Try architecture dependent optimization */
                        arch_dep_replace_divmod_by_const(&a, &b, n);
                        evaluated = a != NULL;
                }
-       } else if (ta == get_mode_null(mode)) {
+       } else if (a == b) {
+               if (value_not_zero(a, &dummy)) {
+                       /* a/a && a != 0 */
+                       a = new_Const(mode, get_mode_one(mode));
+                       b = new_Const(mode, get_mode_null(mode));
+                       DBG_OPT_CSTEVAL(n, a);
+                       DBG_OPT_CSTEVAL(n, b);
+                       goto make_tuple;
+               } else {
+                       /* BEWARE: it is NOT possible to optimize a/a to 1, as this may cause a exception */
+                       return n;
+               }
+       } else if (ta == get_mode_null(mode) && value_not_zero(b, &dummy)) {
                /* 0 / non-Const = 0 */
                b = a;
-               evaluated = 1;
+               goto make_tuple;
        }
 
        if (evaluated) { /* replace by tuple */
-               ir_node *mem = get_DivMod_mem(n);
-               ir_node *blk = get_irn_n(n, -1);
+               ir_node *mem, *blk;
+
+make_tuple:
+               mem = get_DivMod_mem(n);
+               /* skip a potential Pin */
+               if (is_Pin(mem))
+                       mem = get_Pin_op(mem);
+
+               blk = get_irn_n(n, -1);
                turn_into_tuple(n, pn_DivMod_max);
                set_Tuple_pred(n, pn_DivMod_M,         mem);
                set_Tuple_pred(n, pn_DivMod_X_regular, new_r_Jmp(current_ir_graph, blk));
@@ -2290,6 +2409,46 @@ static ir_node *transform_node_DivMod(ir_node *n) {
        return n;
 }  /* transform_node_DivMod */
 
+/**
+ * Optimize x / c to x * (1/c)
+ */
+static ir_node *transform_node_Quot(ir_node *n) {
+       ir_mode *mode = get_Quot_resmode(n);
+       ir_node *oldn = n;
+
+       if (get_mode_arithmetic(mode) == irma_ieee754) {
+               ir_node *b = get_Quot_right(n);
+
+               if (is_Const(b)) {
+                       tarval *tv = get_Const_tarval(b);
+
+                       tv = tarval_quo(get_mode_one(mode), tv);
+
+                       /* Do the transformation if the result is either exact or we are not
+                          using strict rules. */
+                       if (tv != tarval_bad &&
+                           (tarval_ieee754_get_exact() || (get_irg_fp_model(current_ir_graph) & fp_strict_algebraic) == 0)) {
+                               ir_node *blk = get_irn_n(n, -1);
+                               ir_node *c = new_r_Const(current_ir_graph, blk, mode, tv);
+                               ir_node *a = get_Quot_left(n);
+                               ir_node *m = new_rd_Mul(get_irn_dbg_info(n), current_ir_graph, blk, a, c, mode);
+                               ir_node *mem = get_Quot_mem(n);
+
+                               /* skip a potential Pin */
+                               if (is_Pin(mem))
+                                       mem = get_Pin_op(mem);
+                               turn_into_tuple(n, pn_Quot_max);
+                               set_Tuple_pred(n, pn_Quot_M, mem);
+                               set_Tuple_pred(n, pn_Quot_X_regular, new_r_Jmp(current_ir_graph, blk));
+                               set_Tuple_pred(n, pn_Quot_X_except,  new_r_Bad(current_ir_graph));
+                               set_Tuple_pred(n, pn_Quot_res, m);
+                               DBG_OPT_ALGSIM1(oldn, a, b, m, FS_OPT_FP_INV_MUL);
+                       }
+               }
+       }
+       return n;
+}  /* transform_node_Quot */
+
 /**
  * Optimize Abs(x) into  x if x is Confirmed >= 0
  * Optimize Abs(x) into -x if x is Confirmed <= 0
@@ -2359,15 +2518,123 @@ static ir_node *transform_node_Cond(ir_node *n) {
        return n;
 }  /* transform_node_Cond */
 
+typedef ir_node* (*recursive_transform) (ir_node *n);
+
+/**
+ * makes use of distributive laws for and, or, eor
+ *     and(a OP c, b OP c) -> and(a, b) OP c
+ */
+static ir_node *transform_bitwise_distributive(ir_node *n,
+                                               recursive_transform trans_func)
+{
+       ir_node *oldn    = n;
+       ir_node *a       = get_binop_left(n);
+       ir_node *b       = get_binop_right(n);
+       ir_op   *op      = get_irn_op(a);
+       ir_op   *op_root = get_irn_op(n);
+
+       if(op != get_irn_op(b))
+               return n;
+
+       if (op == op_Conv) {
+               ir_node *a_op   = get_Conv_op(a);
+               ir_node *b_op   = get_Conv_op(b);
+               ir_mode *a_mode = get_irn_mode(a_op);
+               ir_mode *b_mode = get_irn_mode(b_op);
+               if(a_mode == b_mode && (mode_is_int(a_mode) || a_mode == mode_b)) {
+                       ir_node *blk = get_irn_n(n, -1);
+
+                       n = exact_copy(n);
+                       set_binop_left(n, a_op);
+                       set_binop_right(n, b_op);
+                       set_irn_mode(n, a_mode);
+                       n = trans_func(n);
+                       n = new_r_Conv(current_ir_graph, blk, n, get_irn_mode(oldn));
+
+                       DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_SHIFT_AND);
+                       return n;
+               }
+       }
+
+       if (op == op_Eor) {
+               /* nothing to gain here */
+               return n;
+       }
+
+       if (op == op_Shrs || op == op_Shr || op == op_Shl
+                       || op == op_And || op == op_Or || op == op_Eor) {
+               ir_node *a_left  = get_binop_left(a);
+               ir_node *a_right = get_binop_right(a);
+               ir_node *b_left  = get_binop_left(b);
+               ir_node *b_right = get_binop_right(b);
+               ir_node *c       = NULL;
+               ir_node *op1, *op2;
+
+               if (is_op_commutative(op)) {
+                       if (a_left == b_left) {
+                               c   = a_left;
+                               op1 = a_right;
+                               op2 = b_right;
+                       } else if(a_left == b_right) {
+                               c   = a_left;
+                               op1 = a_right;
+                               op2 = b_left;
+                       } else if(a_right == b_left) {
+                               c   = a_right;
+                               op1 = a_left;
+                               op2 = b_right;
+                       }
+               }
+               if(a_right == b_right) {
+                       c   = a_right;
+                       op1 = a_left;
+                       op2 = b_left;
+               }
+
+               if (c != NULL) {
+                       /* (a sop c) & (b sop c) => (a & b) sop c */
+                       ir_node *blk = get_irn_n(n, -1);
+
+                       ir_node *new_n = exact_copy(n);
+                       set_binop_left(new_n, op1);
+                       set_binop_right(new_n, op2);
+                       new_n = trans_func(new_n);
+
+                       if(op_root == op_Eor && op == op_Or) {
+                               dbg_info  *dbgi = get_irn_dbg_info(n);
+                               ir_graph  *irg  = current_ir_graph;
+                               ir_mode   *mode = get_irn_mode(c);
+
+                               c = new_rd_Not(dbgi, irg, blk, c, mode);
+                               n = new_rd_And(dbgi, irg, blk, new_n, c, mode);
+                       } else {
+                               n = exact_copy(a);
+                               set_irn_n(n, -1, blk);
+                               set_binop_left(n, new_n);
+                               set_binop_right(n, c);
+                       }
+
+
+                       DBG_OPT_ALGSIM1(oldn, a, b, n, FS_OPT_SHIFT_AND);
+                       return n;
+               }
+       }
+
+       return n;
+}
+
 /**
  * Transform an And.
  */
 static ir_node *transform_node_And(ir_node *n) {
-       ir_node *c, *oldn = n;
+       ir_node *c, *oldn;
        ir_node *a = get_And_left(n);
        ir_node *b = get_And_right(n);
 
        HANDLE_BINOP_PHI(tarval_and, a,b,c);
+
+       n = transform_bitwise_distributive(n, transform_node_And);
+
        return n;
 }  /* transform_node_And */
 
@@ -2404,6 +2671,8 @@ static ir_node *transform_node_Eor(ir_node *n) {
                n = new_r_Not(current_ir_graph, get_irn_n(n, -1), a, mode_b);
 
                DBG_OPT_ALGSIM0(oldn, n, FS_OPT_EOR_TO_NOT);
+       } else {
+               n = transform_bitwise_distributive(n, transform_node_Eor);
        }
 
        return n;
@@ -2415,30 +2684,54 @@ static ir_node *transform_node_Eor(ir_node *n) {
 static ir_node *transform_node_Not(ir_node *n) {
        ir_node *c, *oldn = n;
        ir_node *a = get_Not_op(n);
+       ir_op *op_a = get_irn_op(a);
 
        HANDLE_UNOP_PHI(tarval_not,a,c);
 
        /* check for a boolean Not */
        if (   (get_irn_mode(n) == mode_b)
-           && (get_irn_op(a) == op_Proj)
+           && (op_a == op_Proj)
            && (get_irn_mode(a) == mode_b)
            && (get_irn_op(get_Proj_pred(a)) == op_Cmp)) {
                /* We negate a Cmp. The Cmp has the negated result anyways! */
                n = new_r_Proj(current_ir_graph, get_irn_n(n, -1), get_Proj_pred(a),
                                mode_b, get_negated_pnc(get_Proj_proj(a), mode_b));
                DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_CMP);
+                return n;
+       }
+       if (op_a == op_Sub && classify_Const(get_Sub_right(a)) == CNST_ONE) {
+               /* ~(x-1) = -x */
+               ir_node *op = get_Sub_left(a);
+               ir_node *blk = get_irn_n(n, -1);
+               n = new_rd_Minus(get_irn_dbg_info(n), current_ir_graph, blk, op, get_irn_mode(n));
+               DBG_OPT_ALGSIM0(oldn, n, FS_OPT_NOT_MINUS_1);
        }
        return n;
 }  /* transform_node_Not */
 
 /**
  * Transform a Minus.
+ * Optimize:
+ *   -(~x) = x + 1
  */
 static ir_node *transform_node_Minus(ir_node *n) {
        ir_node *c, *oldn = n;
        ir_node *a = get_Minus_op(n);
+       ir_mode *mode;
 
        HANDLE_UNOP_PHI(tarval_neg,a,c);
+
+       mode = get_irn_mode(a);
+       if (get_mode_arithmetic(mode) == irma_twos_complement && is_Not(a)) {
+               /* -(~x) = x + 1 */
+               ir_node *op   = get_Not_op(a);
+               tarval *tv    = get_mode_one(mode);
+               ir_node *blk  = get_irn_n(n, -1);
+               ir_node *c    = new_r_Const(current_ir_graph, blk, mode, tv);
+               n = new_rd_Add(get_irn_dbg_info(n), current_ir_graph, blk, op, c, mode);
+               DBG_OPT_ALGSIM2(oldn, a, n, FS_OPT_MINUS_NOT);
+       }
+
        return n;
 }  /* transform_node_Minus */
 
@@ -2697,10 +2990,12 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) {
                                    (!mode_overflow_on_unary_Minus(mode) ||
                                    (mode_is_int(mode) && (proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg)))) {
                                        left = get_Minus_op(left);
-                                       tv = tarval_sub(get_mode_null(mode), tv);
+                                       tv = tarval_neg(tv);
 
-                                       proj_nr = get_inversed_pnc(proj_nr);
-                                       changed |= 2;
+                                       if (tv != tarval_bad) {
+                                               proj_nr = get_inversed_pnc(proj_nr);
+                                               changed |= 2;
+                                       }
                                }
 
                                /* for integer modes, we have more */
@@ -2717,16 +3012,20 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) {
                                                tarval_cmp(tv, get_mode_null(mode)) == pn_Cmp_Gt) {
                                                tv = tarval_sub(tv, get_mode_one(mode));
 
-                                               proj_nr ^= pn_Cmp_Eq;
-                                               changed |= 2;
+                                               if (tv != tarval_bad) {
+                                                       proj_nr ^= pn_Cmp_Eq;
+                                                       changed |= 2;
+                                               }
                                        }
                                        /* c < 0 : a > c  ==>  a >= (c+1)    a <= c  ==>  a < (c+1) */
                                        else if ((proj_nr == pn_Cmp_Gt || proj_nr == pn_Cmp_Le) &&
                                                tarval_cmp(tv, get_mode_null(mode)) == pn_Cmp_Lt) {
                                                tv = tarval_add(tv, get_mode_one(mode));
 
-                                               proj_nr ^= pn_Cmp_Eq;
-                                               changed |= 2;
+                                               if (tv != tarval_bad) {
+                                                       proj_nr ^= pn_Cmp_Eq;
+                                                       changed |= 2;
+                                               }
                                        }
 
                                        /* the following reassociations work only for == and != */
@@ -2738,7 +3037,9 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) {
                                                        left  = get_Sub_left(left);
 
                                                        tv = value_of(right);
-                                                       changed = 1;
+                                                       if (tv != tarval_bad) {
+                                                               changed = 1;
+                                                       }
                                                }
 
                                                if (tv != tarval_bad) {
@@ -2818,7 +3119,7 @@ static ir_node *transform_node_Proj_Cmp(ir_node *proj) {
                                 */
                                if ((proj_nr == pn_Cmp_Eq || proj_nr == pn_Cmp_Lg) &&
                                    (get_irn_op(left) == op_And)) {
-                                       if (is_single_bit_tarval(tv)) {
+                                       if (tarval_is_single_bit(tv)) {
                                                /* check for Constant's match. We have check hare the tarvals,
                                                   because our const might be changed */
                                                ir_node *la = get_And_left(left);
@@ -2956,6 +3257,11 @@ static void get_comm_Binop_Ops(ir_node *binop, ir_node **a, ir_node **c) {
  *        OR     c2      ===>               OR
  *           AND    c1
  *               OR
+ *
+ *
+ * value  c2                 value  c1
+ *     AND   c1    ===>           OR     if (c1 | c2) == 0x111..11
+ *        OR
  */
 static ir_node *transform_node_Or_bf_store(ir_node *or) {
        ir_node *and, *c1;
@@ -2967,63 +3273,78 @@ static ir_node *transform_node_Or_bf_store(ir_node *or) {
 
        tarval *tv1, *tv2, *tv3, *tv4, *tv, *n_tv4, *n_tv2;
 
-       get_comm_Binop_Ops(or, &and, &c1);
-       if ((get_irn_op(c1) != op_Const) || (get_irn_op(and) != op_And))
-               return or;
+       while (1) {
+               get_comm_Binop_Ops(or, &and, &c1);
+               if (!is_Const(c1) || !is_And(and))
+                       return or;
 
-       get_comm_Binop_Ops(and, &or_l, &c2);
-       if ((get_irn_op(c2) != op_Const) || (get_irn_op(or_l) != op_Or))
-               return or;
+               get_comm_Binop_Ops(and, &or_l, &c2);
+               if (!is_Const(c2))
+                       return or;
 
-       get_comm_Binop_Ops(or_l, &and_l, &c3);
-       if ((get_irn_op(c3) != op_Const) || (get_irn_op(and_l) != op_And))
-               return or;
+               tv1 = get_Const_tarval(c1);
+               tv2 = get_Const_tarval(c2);
 
-       get_comm_Binop_Ops(and_l, &value, &c4);
-       if (get_irn_op(c4) != op_Const)
-               return or;
+               tv = tarval_or(tv1, tv2);
+               if (classify_tarval(tv) == TV_CLASSIFY_ALL_ONE) {
+                       /* the AND does NOT clear a bit with isn't set be the OR */
+                       set_Or_left(or, or_l);
+                       set_Or_right(or, c1);
 
-       /* ok, found the pattern, check for conditions */
-       assert(mode == get_irn_mode(and));
-       assert(mode == get_irn_mode(or_l));
-       assert(mode == get_irn_mode(and_l));
+                       /* check for more */
+                       continue;
+               }
 
-       tv1 = get_Const_tarval(c1);
-       tv2 = get_Const_tarval(c2);
-       tv3 = get_Const_tarval(c3);
-       tv4 = get_Const_tarval(c4);
+               if (!is_Or(or_l))
+                       return or;
 
-       tv = tarval_or(tv4, tv2);
-       if (classify_tarval(tv) != TV_CLASSIFY_ALL_ONE) {
-               /* have at least one 0 at the same bit position */
-               return or;
-       }
+               get_comm_Binop_Ops(or_l, &and_l, &c3);
+               if (!is_Const(c3) || !is_And(and_l))
+                       return or;
 
-       n_tv4 = tarval_not(tv4);
-       if (tv3 != tarval_and(tv3, n_tv4)) {
-               /* bit in the or_mask is outside the and_mask */
-               return or;
-       }
+               get_comm_Binop_Ops(and_l, &value, &c4);
+               if (!is_Const(c4))
+                       return or;
 
-       n_tv2 = tarval_not(tv2);
-       if (tv1 != tarval_and(tv1, n_tv2)) {
-               /* bit in the or_mask is outside the and_mask */
-               return or;
-       }
+               /* ok, found the pattern, check for conditions */
+               assert(mode == get_irn_mode(and));
+               assert(mode == get_irn_mode(or_l));
+               assert(mode == get_irn_mode(and_l));
+
+               tv3 = get_Const_tarval(c3);
+               tv4 = get_Const_tarval(c4);
 
-       /* ok, all conditions met */
-       block = get_irn_n(or, -1);
+               tv = tarval_or(tv4, tv2);
+               if (classify_tarval(tv) != TV_CLASSIFY_ALL_ONE) {
+                       /* have at least one 0 at the same bit position */
+                       return or;
+               }
+
+               n_tv4 = tarval_not(tv4);
+               if (tv3 != tarval_and(tv3, n_tv4)) {
+                       /* bit in the or_mask is outside the and_mask */
+                       return or;
+               }
 
-       new_and = new_r_And(current_ir_graph, block,
-               value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode);
+               n_tv2 = tarval_not(tv2);
+               if (tv1 != tarval_and(tv1, n_tv2)) {
+                       /* bit in the or_mask is outside the and_mask */
+                       return or;
+               }
 
-       new_const = new_r_Const(current_ir_graph, block, mode, tarval_or(tv3, tv1));
+               /* ok, all conditions met */
+               block = get_irn_n(or, -1);
 
-       set_Or_left(or, new_and);
-       set_Or_right(or, new_const);
+               new_and = new_r_And(current_ir_graph, block,
+                       value, new_r_Const(current_ir_graph, block, mode, tarval_and(tv4, tv2)), mode);
 
-       /* check for more */
-       return transform_node_Or_bf_store(or);
+               new_const = new_r_Const(current_ir_graph, block, mode, tarval_or(tv3, tv1));
+
+               set_Or_left(or, new_and);
+               set_Or_right(or, new_const);
+
+               /* check for more */
+       }
 }  /* transform_node_Or_bf_store */
 
 /**
@@ -3145,6 +3466,10 @@ static ir_node *transform_node_Or(ir_node *n) {
 
        n = transform_node_Or_bf_store(n);
        n = transform_node_Or_Rot(n);
+       if (n != oldn)
+               return n;
+
+       n = transform_bitwise_distributive(n, transform_node_Or);
 
        return n;
 }  /* transform_node_Or */
@@ -3283,6 +3608,44 @@ static ir_node *transform_node_Mux(ir_node *n) {
        ir_node *oldn = n, *sel = get_Mux_sel(n);
        ir_mode *mode = get_irn_mode(n);
 
+       if (mode == mode_b) {
+               ir_node  *t     = get_Mux_true(n);
+               ir_node  *f     = get_Mux_false(n);
+               dbg_info *dbg   = get_irn_dbg_info(n);
+               ir_node  *block = get_irn_n(n, -1);
+               ir_graph *irg   = current_ir_graph;
+
+               if (is_Const(t)) {
+                       tarval *tv_t = get_Const_tarval(t);
+                       if (tv_t == tarval_b_true) {
+                               if (is_Const(f)) {
+                                       assert(get_Const_tarval(f) == tarval_b_false);
+                                       return sel;
+                               } else {
+                                       return new_rd_Or(dbg, irg, block, sel, f, mode_b);
+                               }
+                       } else {
+                               ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b);
+                               assert(tv_t == tarval_b_false);
+                               if (is_Const(f)) {
+                                       assert(get_Const_tarval(f) == tarval_b_true);
+                                       return not_sel;
+                               } else {
+                                       return new_rd_And(dbg, irg, block, not_sel, f, mode_b);
+                               }
+                       }
+               } else if (is_Const(f)) {
+                       tarval *tv_f = get_Const_tarval(f);
+                       if (tv_f == tarval_b_true) {
+                               ir_node* not_sel = new_rd_Not(dbg, irg, block, sel, mode_b);
+                               return new_rd_Or(dbg, irg, block, not_sel, t, mode_b);
+                       } else {
+                               assert(tv_f == tarval_b_false);
+                               return new_rd_And(dbg, irg, block, sel, t, mode_b);
+                       }
+               }
+       }
+
        if (get_irn_op(sel) == op_Proj && !mode_honor_signed_zeros(mode)) {
                ir_node *cmp = get_Proj_pred(sel);
                long proj_nr = get_Proj_proj(sel);
@@ -3422,8 +3785,19 @@ static ir_node *transform_node_Psi(ir_node *n) {
  * not be freed even if the equivalent node isn't the old one.
  */
 static ir_node *transform_node(ir_node *n) {
-       if (n->op->ops.transform_node)
-               n = n->op->ops.transform_node(n);
+       ir_node *oldn;
+
+       /*
+        * Transform_node is the only "optimizing transformation" that might
+        * return a node with a different opcode. We iterate HERE until fixpoint
+        * to get the final result.
+        */
+       do {
+               oldn = n;
+               if (n->op->ops.transform_node)
+                       n = n->op->ops.transform_node(n);
+       } while (oldn != n);
+
        return n;
 }  /* transform_node */
 
@@ -3450,6 +3824,7 @@ static ir_op_ops *firm_set_default_transform_node(ir_opcode code, ir_op_ops *ops
        CASE(Div);
        CASE(Mod);
        CASE(DivMod);
+       CASE(Quot);
        CASE(Abs);
        CASE(Cond);
        CASE(And);
@@ -3490,7 +3865,7 @@ static int node_cmp_attr_Const(ir_node *a, ir_node *b) {
 
 /** Compares the attributes of two Proj nodes. */
 static int node_cmp_attr_Proj(ir_node *a, ir_node *b) {
-       return get_irn_proj_attr (a) != get_irn_proj_attr (b);
+       return get_irn_proj_attr(a) != get_irn_proj_attr(b);
 }  /* node_cmp_attr_Proj */
 
 /** Compares the attributes of two Filter nodes. */
@@ -3500,21 +3875,25 @@ static int node_cmp_attr_Filter(ir_node *a, ir_node *b) {
 
 /** Compares the attributes of two Alloc nodes. */
 static int node_cmp_attr_Alloc(ir_node *a, ir_node *b) {
-       return (get_irn_alloc_attr(a).where != get_irn_alloc_attr(b).where)
-           || (get_irn_alloc_attr(a).type != get_irn_alloc_attr(b).type);
+       const alloc_attr *pa = get_irn_alloc_attr(a);
+       const alloc_attr *pb = get_irn_alloc_attr(b);
+       return (pa->where != pb->where) || (pa->type != pb->type);
 }  /* node_cmp_attr_Alloc */
 
 /** Compares the attributes of two Free nodes. */
 static int node_cmp_attr_Free(ir_node *a, ir_node *b) {
-       return (get_irn_free_attr(a).where != get_irn_free_attr(b).where)
-           || (get_irn_free_attr(a).type != get_irn_free_attr(b).type);
+       const free_attr *pa = get_irn_free_attr(a);
+       const free_attr *pb = get_irn_free_attr(b);
+       return (pa->where != pb->where) || (pa->type != pb->type);
 }  /* node_cmp_attr_Free */
 
 /** Compares the attributes of two SymConst nodes. */
 static int node_cmp_attr_SymConst(ir_node *a, ir_node *b) {
-       return (get_irn_symconst_attr(a).num != get_irn_symconst_attr(b).num)
-           || (get_irn_symconst_attr(a).sym.type_p != get_irn_symconst_attr(b).sym.type_p)
-           || (get_irn_symconst_attr(a).tp != get_irn_symconst_attr(b).tp);
+       const symconst_attr *pa = get_irn_symconst_attr(a);
+       const symconst_attr *pb = get_irn_symconst_attr(b);
+       return (pa->num        != pb->num)
+           || (pa->sym.type_p != pb->sym.type_p)
+           || (pa->tp         != pb->tp);
 }  /* node_cmp_attr_SymConst */
 
 /** Compares the attributes of two Call nodes. */
@@ -3524,11 +3903,14 @@ static int node_cmp_attr_Call(ir_node *a, ir_node *b) {
 
 /** Compares the attributes of two Sel nodes. */
 static int node_cmp_attr_Sel(ir_node *a, ir_node *b) {
-       return (get_irn_sel_attr(a).ent->kind  != get_irn_sel_attr(b).ent->kind)
-           || (get_irn_sel_attr(a).ent->name    != get_irn_sel_attr(b).ent->name)
-           || (get_irn_sel_attr(a).ent->owner   != get_irn_sel_attr(b).ent->owner)
-           || (get_irn_sel_attr(a).ent->ld_name != get_irn_sel_attr(b).ent->ld_name)
-           || (get_irn_sel_attr(a).ent->type    != get_irn_sel_attr(b).ent->type);
+       const ir_entity *a_ent = get_Sel_entity(a);
+       const ir_entity *b_ent = get_Sel_entity(b);
+       return
+               (a_ent->kind    != b_ent->kind)    ||
+               (a_ent->name    != b_ent->name)    ||
+               (a_ent->owner   != b_ent->owner)   ||
+               (a_ent->ld_name != b_ent->ld_name) ||
+               (a_ent->type    != b_ent->type);
 }  /* node_cmp_attr_Sel */
 
 /** Compares the attributes of two Phi nodes. */
@@ -3558,12 +3940,19 @@ static int node_cmp_attr_Load(ir_node *a, ir_node *b) {
            get_Load_volatility(b) == volatility_is_volatile)
                /* NEVER do CSE on volatile Loads */
                return 1;
+       /* do not CSE Loads with different alignment. Be conservative. */
+       if (get_Load_align(a) != get_Load_align(b))
+               return 1;
 
        return get_Load_mode(a) != get_Load_mode(b);
 }  /* node_cmp_attr_Load */
 
 /** Compares the attributes of two Store nodes. */
 static int node_cmp_attr_Store(ir_node *a, ir_node *b) {
+       /* do not CSE Stores with different alignment. Be conservative. */
+       if (get_Store_align(a) != get_Store_align(b))
+               return 1;
+
        /* NEVER do CSE on volatile Stores */
        return (get_Store_volatility(a) == volatility_is_volatile ||
                get_Store_volatility(b) == volatility_is_volatile);
@@ -3581,7 +3970,7 @@ static int node_cmp_attr_ASM(ir_node *a, ir_node *b) {
        const ir_asm_constraint *cb;
        ident **cla, **clb;
 
-       if (get_ASM_text(a) != get_ASM_text(b));
+       if (get_ASM_text(a) != get_ASM_text(b))
                return 1;
 
        /* Should we really check the constraints here? Should be better, but is strange. */