valueset: Remove the unused link field.
[libfirm] / ir / ir / iropt.c
index f9958a1..03f53bd 100644 (file)
@@ -51,6 +51,7 @@
 #include "bitfiddle.h"
 #include "be.h"
 #include "error.h"
+#include "firmstat_t.h"
 
 #include "entity_t.h"
 
@@ -181,48 +182,6 @@ static ir_tarval *computed_value_Sub(const ir_node *n)
        return tarval_bad;
 }
 
-/**
- * Return the value of a Carry.
- * Special : a op 0, 0 op b
- */
-static ir_tarval *computed_value_Carry(const ir_node *n)
-{
-       ir_node   *a  = get_binop_left(n);
-       ir_node   *b  = get_binop_right(n);
-       ir_mode   *m  = get_irn_mode(n);
-       ir_tarval *ta = value_of(a);
-       ir_tarval *tb = value_of(b);
-
-       if ((ta != tarval_bad) && (tb != tarval_bad)) {
-               tarval_add(ta, tb);
-               return tarval_carry() ? get_mode_one(m) : get_mode_null(m);
-       } else {
-               if (tarval_is_null(ta) || tarval_is_null(tb))
-                       return get_mode_null(m);
-       }
-       return tarval_bad;
-}
-
-/**
- * Return the value of a Borrow.
- * Special : a op 0
- */
-static ir_tarval *computed_value_Borrow(const ir_node *n)
-{
-       ir_node   *a  = get_binop_left(n);
-       ir_node   *b  = get_binop_right(n);
-       ir_mode   *m  = get_irn_mode(n);
-       ir_tarval *ta = value_of(a);
-       ir_tarval *tb = value_of(b);
-
-       if ((ta != tarval_bad) && (tb != tarval_bad)) {
-               return tarval_cmp(ta, tb) == ir_relation_less ? get_mode_one(m) : get_mode_null(m);
-       } else if (tarval_is_null(ta)) {
-               return get_mode_null(m);
-       }
-       return tarval_bad;
-}
-
 /**
  * Return the value of an unary Minus.
  */
@@ -947,15 +906,13 @@ static ir_node *equivalent_node_Sub(ir_node *n)
  *   We handle it anyway here but the better way would be a
  *   flag. This would be needed for Pascal for instance.
  */
-static ir_node *equivalent_node_idempotent_unop(ir_node *n)
+static ir_node *equivalent_node_involution(ir_node *n)
 {
        ir_node *oldn = n;
        ir_node *pred = get_unop_op(n);
-
-       /* optimize symmetric unop */
        if (get_irn_op(pred) == get_irn_op(n)) {
                n = get_unop_op(pred);
-               DBG_OPT_ALGSIM2(oldn, pred, n, FS_OPT_IDEM_UNARY);
+               DBG_OPT_ALGSIM2(oldn, pred, n, FS_OPT_INVOLUTION);
        }
        return n;
 }
@@ -1131,7 +1088,7 @@ static ir_node *equivalent_node_Phi(ir_node *n)
        ir_node *first_val = NULL; /* to shutup gcc */
 
        if (!get_opt_optimize() &&
-                       get_irg_phase_state(get_irn_irg(n)) != phase_building)
+           !irg_is_constrained(get_irn_irg(n), IR_GRAPH_CONSTRAINT_CONSTRUCTION))
                return n;
 
        n_preds = get_Phi_n_preds(n);
@@ -1236,56 +1193,6 @@ static ir_node *equivalent_node_Proj_CopyB(ir_node *proj)
        return proj;
 }
 
-/**
- * Optimize Bounds(idx, idx, upper) into idx.
- */
-static ir_node *equivalent_node_Proj_Bound(ir_node *proj)
-{
-       ir_node *oldn  = proj;
-       ir_node *bound = get_Proj_pred(proj);
-       ir_node *idx   = get_Bound_index(bound);
-       ir_node *pred  = skip_Proj(idx);
-       int ret_tuple  = 0;
-
-       if (idx == get_Bound_lower(bound))
-               ret_tuple = 1;
-       else if (is_Bound(pred)) {
-               /*
-                * idx was Bounds checked previously, it is still valid if
-                * lower <= pred_lower && pred_upper <= upper.
-                */
-               ir_node *lower = get_Bound_lower(bound);
-               ir_node *upper = get_Bound_upper(bound);
-               if (get_Bound_lower(pred) == lower &&
-                       get_Bound_upper(pred) == upper) {
-                       /*
-                        * One could expect that we simply return the previous
-                        * Bound here. However, this would be wrong, as we could
-                        * add an exception Proj to a new location then.
-                        * So, we must turn in into a tuple.
-                        */
-                       ret_tuple = 1;
-               }
-       }
-       if (ret_tuple) {
-               /* Turn Bound into a tuple (mem, jmp, bad, idx) */
-               switch (get_Proj_proj(proj)) {
-               case pn_Bound_M:
-                       DBG_OPT_EXC_REM(proj);
-                       proj = get_Bound_mem(bound);
-                       break;
-               case pn_Bound_res:
-                       proj = idx;
-                       DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
-                       break;
-               default:
-                       /* cannot optimize pn_Bound_X_regular, handled in transform ... */
-                       break;
-               }
-       }
-       return proj;
-}
-
 /**
  * Does all optimizations on nodes that must be done on its Projs
  * because of creating new nodes.
@@ -1511,44 +1418,40 @@ static ir_tarval *do_eval(eval_func eval, ir_tarval *a, ir_tarval *b, ir_mode *m
  */
 static ir_node *apply_binop_on_phi(ir_node *phi, ir_tarval *other, eval_func eval, ir_mode *mode, int left)
 {
-       ir_tarval *tv;
-       void      **res;
-       ir_node   *pred;
-       ir_graph  *irg;
-       int       i, n = get_irn_arity(phi);
-
-       NEW_ARR_A(void *, res, n);
+       int         n   = get_irn_arity(phi);
+       ir_tarval **tvs = ALLOCAN(ir_tarval*, n);
        if (left) {
-               for (i = 0; i < n; ++i) {
-                       pred = get_irn_n(phi, i);
-                       tv   = get_Const_tarval(pred);
-                       tv   = do_eval(eval, other, tv, mode);
+               for (int i = 0; i < n; ++i) {
+                       ir_node   *pred = get_irn_n(phi, i);
+                       ir_tarval *tv   = get_Const_tarval(pred);
+                       tv = do_eval(eval, other, tv, mode);
 
                        if (tv == tarval_bad) {
                                /* folding failed, bad */
                                return NULL;
                        }
-                       res[i] = tv;
+                       tvs[i] = tv;
                }
        } else {
-               for (i = 0; i < n; ++i) {
-                       pred = get_irn_n(phi, i);
-                       tv   = get_Const_tarval(pred);
-                       tv   = do_eval(eval, tv, other, mode);
+               for (int i = 0; i < n; ++i) {
+                       ir_node   *pred = get_irn_n(phi, i);
+                       ir_tarval *tv   = get_Const_tarval(pred);
+                       tv = do_eval(eval, tv, other, mode);
 
                        if (tv == tarval_bad) {
                                /* folding failed, bad */
                                return 0;
                        }
-                       res[i] = tv;
+                       tvs[i] = tv;
                }
        }
-       irg = get_irn_irg(phi);
-       for (i = 0; i < n; ++i) {
-               pred = get_irn_n(phi, i);
-               res[i] = new_r_Const(irg, (ir_tarval*)res[i]);
+       ir_graph *irg = get_irn_irg(phi);
+       ir_node **res = ALLOCAN(ir_node*, n);
+       for (int i = 0; i < n; ++i) {
+               res[i] = new_r_Const(irg, tvs[i]);
        }
-       return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
+       ir_node *block = get_nodes_block(phi);
+       return new_r_Phi(block, n, res, mode);
 }
 
 /**
@@ -1563,37 +1466,31 @@ static ir_node *apply_binop_on_phi(ir_node *phi, ir_tarval *other, eval_func eva
  */
 static ir_node *apply_binop_on_2_phis(ir_node *a, ir_node *b, eval_func eval, ir_mode *mode)
 {
-       ir_tarval *tv_l, *tv_r, *tv;
-       void     **res;
-       ir_node  *pred;
-       ir_graph *irg;
-       int      i, n;
-
        if (get_nodes_block(a) != get_nodes_block(b))
                return NULL;
 
-       n = get_irn_arity(a);
-       NEW_ARR_A(void *, res, n);
-
-       for (i = 0; i < n; ++i) {
-               pred = get_irn_n(a, i);
-               tv_l = get_Const_tarval(pred);
-               pred = get_irn_n(b, i);
-               tv_r = get_Const_tarval(pred);
-               tv   = do_eval(eval, tv_l, tv_r, mode);
+       int         n   = get_irn_arity(a);
+       ir_tarval **tvs = ALLOCAN(ir_tarval*, n);
+       for (int i = 0; i < n; ++i) {
+               ir_node   *pred_a = get_irn_n(a, i);
+               ir_tarval *tv_l   = get_Const_tarval(pred_a);
+               ir_node   *pred_b = get_irn_n(b, i);
+               ir_tarval *tv_r   = get_Const_tarval(pred_b);
+               ir_tarval *tv     = do_eval(eval, tv_l, tv_r, mode);
 
                if (tv == tarval_bad) {
                        /* folding failed, bad */
                        return NULL;
                }
-               res[i] = tv;
+               tvs[i] = tv;
        }
-       irg = get_irn_irg(a);
-       for (i = 0; i < n; ++i) {
-               pred = get_irn_n(a, i);
-               res[i] = new_r_Const(irg, (ir_tarval*)res[i]);
+       ir_graph *irg = get_irn_irg(a);
+       ir_node **res = ALLOCAN(ir_node*, n);
+       for (int i = 0; i < n; ++i) {
+               res[i] = new_r_Const(irg, tvs[i]);
        }
-       return new_r_Phi(get_nodes_block(a), n, (ir_node **)res, mode);
+       ir_node *block = get_nodes_block(a);
+       return new_r_Phi(block, n, res, mode);
 }
 
 /**
@@ -1606,32 +1503,27 @@ static ir_node *apply_binop_on_2_phis(ir_node *a, ir_node *b, eval_func eval, ir
  */
 static ir_node *apply_unop_on_phi(ir_node *phi, ir_tarval *(*eval)(ir_tarval *))
 {
-       ir_tarval *tv;
-       void     **res;
-       ir_node  *pred;
-       ir_mode  *mode;
-       ir_graph *irg;
-       int      i, n = get_irn_arity(phi);
-
-       NEW_ARR_A(void *, res, n);
-       for (i = 0; i < n; ++i) {
-               pred = get_irn_n(phi, i);
-               tv   = get_Const_tarval(pred);
-               tv   = eval(tv);
+       int         n   = get_irn_arity(phi);
+       ir_tarval **tvs = ALLOCAN(ir_tarval*, n);
+       for (int i = 0; i < n; ++i) {
+               ir_node   *pred = get_irn_n(phi, i);
+               ir_tarval *tv   = get_Const_tarval(pred);
+               tv = eval(tv);
 
                if (tv == tarval_bad) {
                        /* folding failed, bad */
                        return 0;
                }
-               res[i] = tv;
+               tvs[i] = tv;
        }
-       mode = get_irn_mode(phi);
-       irg  = get_irn_irg(phi);
-       for (i = 0; i < n; ++i) {
-               pred = get_irn_n(phi, i);
-               res[i] = new_r_Const(irg, (ir_tarval*)res[i]);
+       ir_graph *irg  = get_irn_irg(phi);
+       ir_node **res  = ALLOCAN(ir_node*, n);
+       for (int i = 0; i < n; ++i) {
+               res[i] = new_r_Const(irg, tvs[i]);
        }
-       return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
+       ir_node *block = get_nodes_block(phi);
+       ir_mode *mode  = get_irn_mode(phi);
+       return new_r_Phi(block, n, res, mode);
 }
 
 /**
@@ -1643,30 +1535,26 @@ static ir_node *apply_unop_on_phi(ir_node *phi, ir_tarval *(*eval)(ir_tarval *))
  */
 static ir_node *apply_conv_on_phi(ir_node *phi, ir_mode *mode)
 {
-       ir_tarval *tv;
-       void     **res;
-       ir_node  *pred;
-       ir_graph *irg;
-       int      i, n = get_irn_arity(phi);
-
-       NEW_ARR_A(void *, res, n);
-       for (i = 0; i < n; ++i) {
-               pred = get_irn_n(phi, i);
-               tv   = get_Const_tarval(pred);
-               tv   = tarval_convert_to(tv, mode);
+       int         n   = get_irn_arity(phi);
+       ir_tarval **tvs = ALLOCAN(ir_tarval*, n);
+       for (int i = 0; i < n; ++i) {
+               ir_node   *pred = get_irn_n(phi, i);
+               ir_tarval *tv   = get_Const_tarval(pred);
+               tv = tarval_convert_to(tv, mode);
 
                if (tv == tarval_bad) {
                        /* folding failed, bad */
                        return 0;
                }
-               res[i] = tv;
+               tvs[i] = tv;
        }
-       irg = get_irn_irg(phi);
-       for (i = 0; i < n; ++i) {
-               pred = get_irn_n(phi, i);
-               res[i] = new_r_Const(irg, (ir_tarval*)res[i]);
+       ir_graph *irg = get_irn_irg(phi);
+       ir_node **res = ALLOCAN(ir_node*, n);
+       for (int i = 0; i < n; ++i) {
+               res[i] = new_r_Const(irg, tvs[i]);
        }
-       return new_r_Phi(get_nodes_block(phi), n, (ir_node **)res, mode);
+       ir_node *block = get_nodes_block(phi);
+       return new_r_Phi(block, n, res, mode);
 }
 
 /**
@@ -2747,26 +2635,6 @@ restart:
                }
                DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
                return n;
-#if 0
-       } else if (is_Mul(b)) { /* a - (b * C) -> a + (b * -C) */
-               ir_node *m_right = get_Mul_right(b);
-               if (is_Const(m_right)) {
-                       ir_node *cnst2 = const_negate(m_right);
-                       if (cnst2 != NULL) {
-                               dbg_info *m_dbg   = get_irn_dbg_info(b);
-                               ir_node  *m_block = get_nodes_block(b);
-                               ir_node  *m_left  = get_Mul_left(b);
-                               ir_mode  *m_mode  = get_irn_mode(b);
-                               ir_node  *mul     = new_rd_Mul(m_dbg, m_block, m_left, cnst2, m_mode);
-                               dbg_info *a_dbg   = get_irn_dbg_info(n);
-                               ir_node  *a_block = get_nodes_block(n);
-
-                               n = new_rd_Add(a_dbg, a_block, a, mul, mode);
-                               DBG_OPT_ALGSIM0(oldn, n, FS_OPT_SUB_TO_ADD);
-                               return n;
-                       }
-               }
-#endif
        }
 
        /* Beware of Sub(P, P) which cannot be optimized into a simple Minus ... */
@@ -3185,34 +3053,22 @@ static ir_node *transform_node_Div(ir_node *n)
                assert(mode_is_float(mode));
 
                /* Optimize x/c to x*(1/c) */
-               if (get_mode_arithmetic(mode) == irma_ieee754) {
-                       ir_tarval *tv = value_of(b);
+               ir_tarval *tv = value_of(b);
 
-                       if (tv != tarval_bad) {
-                               int rem = tarval_fp_ops_enabled();
+               if (tv != tarval_bad) {
+                       tv = tarval_div(get_mode_one(mode), tv);
 
-                               /*
-                                * Floating point constant folding might be disabled here to
-                                * prevent rounding.
-                                * However, as we check for exact result, doing it is safe.
-                                * Switch it on.
-                                */
-                               tarval_enable_fp_ops(1);
-                               tv = tarval_div(get_mode_one(mode), tv);
-                               tarval_enable_fp_ops(rem);
-
-                               /* Do the transformation if the result is either exact or we are
-                                  not using strict rules. */
-                               if (tv != tarval_bad &&
-                                       (tarval_ieee754_get_exact() || (get_irg_fp_model(get_irn_irg(n)) & fp_strict_algebraic) == 0)) {
-                                       ir_node  *block = get_nodes_block(n);
-                                       ir_graph *irg   = get_irn_irg(block);
-                                       ir_node  *c     = new_r_Const(irg, tv);
-                                       dbg_info *dbgi  = get_irn_dbg_info(n);
-                                       value = new_rd_Mul(dbgi, block, a, c, mode);
+                       /* Do the transformation if the result is either exact or we are
+                          not using strict rules. */
+                       if (tv != tarval_bad &&
+                               (tarval_ieee754_get_exact() || (get_irg_fp_model(get_irn_irg(n)) & fp_strict_algebraic) == 0)) {
+                               ir_node  *block = get_nodes_block(n);
+                               ir_graph *irg   = get_irn_irg(block);
+                               ir_node  *c     = new_r_Const(irg, tv);
+                               dbg_info *dbgi  = get_irn_dbg_info(n);
+                               value = new_rd_Mul(dbgi, block, a, c, mode);
 
-                                       goto make_tuple;
-                               }
+                               goto make_tuple;
                        }
                }
        }
@@ -3354,8 +3210,8 @@ static ir_node *transform_node_Cond(ir_node *n)
                ta = compute_cmp_ext(a);
        }
 
-       if (ta != tarval_bad && get_irn_mode(a) == mode_b) {
-               /* It's a boolean Cond, branching on a boolean constant.
+       if (ta != tarval_bad) {
+               /* It's branching on a boolean constant.
                   Replace it by a tuple (Bad, Jmp) or (Jmp, Bad) */
                ir_node *blk = get_nodes_block(n);
                jmp = new_r_Jmp(blk);
@@ -3367,8 +3223,6 @@ static ir_node *transform_node_Cond(ir_node *n)
                        set_Tuple_pred(n, pn_Cond_false, jmp);
                        set_Tuple_pred(n, pn_Cond_true, new_r_Bad(irg, mode_X));
                }
-               /* We might generate an endless loop, so keep it alive. */
-               add_End_keepalive(get_irg_end(irg), blk);
                clear_irg_properties(irg, IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE);
        }
        return n;
@@ -4859,63 +4713,6 @@ static ir_node *transform_node_Proj_CopyB(ir_node *proj)
        return proj;
 }
 
-/**
- * Optimize Bounds(idx, idx, upper) into idx.
- */
-static ir_node *transform_node_Proj_Bound(ir_node *proj)
-{
-       ir_node *oldn  = proj;
-       ir_node *bound = get_Proj_pred(proj);
-       ir_node *idx   = get_Bound_index(bound);
-       ir_node *pred  = skip_Proj(idx);
-       int ret_tuple  = 0;
-
-       if (idx == get_Bound_lower(bound))
-               ret_tuple = 1;
-       else if (is_Bound(pred)) {
-               /*
-               * idx was Bounds checked previously, it is still valid if
-               * lower <= pred_lower && pred_upper <= upper.
-               */
-               ir_node *lower = get_Bound_lower(bound);
-               ir_node *upper = get_Bound_upper(bound);
-               if (get_Bound_lower(pred) == lower &&
-                       get_Bound_upper(pred) == upper) {
-                       /*
-                        * One could expect that we simply return the previous
-                        * Bound here. However, this would be wrong, as we could
-                        * add an exception Proj to a new location then.
-                        * So, we must turn in into a tuple.
-                        */
-                       ret_tuple = 1;
-               }
-       }
-       if (ret_tuple) {
-               /* Turn Bound into a tuple (mem, jmp, bad, idx) */
-               switch (get_Proj_proj(proj)) {
-               case pn_Bound_M:
-                       DBG_OPT_EXC_REM(proj);
-                       proj = get_Bound_mem(bound);
-                       break;
-               case pn_Bound_X_except:
-                       DBG_OPT_EXC_REM(proj);
-                       proj = new_r_Bad(get_irn_irg(proj), mode_X);
-                       break;
-               case pn_Bound_res:
-                       proj = idx;
-                       DBG_OPT_ALGSIM0(oldn, proj, FS_OPT_NOP);
-                       break;
-               case pn_Bound_X_regular:
-                       DBG_OPT_EXC_REM(proj);
-                       proj = new_r_Jmp(get_nodes_block(bound));
-                       break;
-               default:
-                       break;
-               }
-       }
-       return proj;
-}
-
 /**
  * Does all optimizations on nodes that must be done on its Projs
  * because of creating new nodes.
@@ -5511,16 +5308,8 @@ bool may_leave_out_middle_conv(ir_mode *m0, ir_mode *m1, ir_mode *m2)
 {
        int n_floats = mode_is_float(m0) + mode_is_float(m1) + mode_is_float(m2);
        if (n_floats == 1) {
-#if 0
-               int n_signed = mode_is_signed(m0) + mode_is_signed(m1)
-                            + mode_is_signed(m2);
-               /* we assume that float modes are always signed */
-               if ((n_signed & 1) != 1)
-                       return false;
-#else
                /* because overflow gives strange results we don't touch this case */
                return false;
-#endif
        } else if (n_floats == 2 && !mode_is_float(m1)) {
                return false;
        }
@@ -5597,26 +5386,6 @@ static ir_node *transform_node_Conv(ir_node *n)
                }
        }
 
-       /* shrink mode of load if possible. */
-       if (is_Proj(a)) {
-               ir_node *pred = get_Proj_pred(a);
-               if (is_Load(pred)) {
-                       /* only do it if we are the only user (otherwise the risk is too
-                        * great that we end up with 2 loads instead of one). */
-                       ir_graph *irg = get_irn_irg(n);
-                       if (edges_activated(irg) && get_irn_n_edges(a) == 1) {
-                               ir_mode *load_mode = get_Load_mode(pred);
-                               if (!mode_is_float(load_mode) && !mode_is_float(mode) &&
-                                       get_mode_size_bits(mode) <= get_mode_size_bits(load_mode)
-                                       && !be_get_backend_param()->byte_order_big_endian) {
-                                       set_Load_mode(pred, mode);
-                                       set_irn_mode(a, mode);
-                                       return a;
-                               }
-                       }
-               }
-       }
-
        return n;
 }
 
@@ -6117,66 +5886,87 @@ static ir_node *transform_node_Sync(ir_node *n)
        return n;
 }
 
+static ir_node *create_load_replacement_tuple(ir_node *n, ir_node *mem,
+                                              ir_node *res)
+{
+       ir_node  *block = get_nodes_block(n);
+       ir_graph *irg   = get_irn_irg(n);
+       ir_node  *in[pn_Load_max+1];
+       size_t    n_in  = 2;
+       in[pn_Load_M]   = mem;
+       in[pn_Load_res] = res;
+       if (ir_throws_exception(n)) {
+               in[pn_Load_X_regular] = new_r_Jmp(block);
+               in[pn_Load_X_except]  = new_r_Bad(irg, mode_X);
+               n_in                  = 4;
+               assert(pn_Load_max == 4);
+       }
+       ir_node  *tuple = new_r_Tuple(block, n_in, in);
+       return tuple;
+}
+
 static ir_node *transform_node_Load(ir_node *n)
 {
+       /* don't touch volatile loads */
+       if (get_Load_volatility(n) == volatility_is_volatile)
+               return n;
+
+       ir_node *ptr = get_Load_ptr(n);
+       const ir_node *confirm;
+       if (value_not_zero(ptr, &confirm) && confirm == NULL) {
+               set_irn_pinned(n, op_pin_state_floats);
+       }
+
        /* if our memory predecessor is a load from the same address, then reuse the
         * previous result */
        ir_node *mem = get_Load_mem(n);
-       ir_node *mem_pred;
-
        if (!is_Proj(mem))
                return n;
-       /* don't touch volatile loads */
-       if (get_Load_volatility(n) == volatility_is_volatile)
-               return n;
-       mem_pred = get_Proj_pred(mem);
+       ir_node *mem_pred = get_Proj_pred(mem);
        if (is_Load(mem_pred)) {
                ir_node *pred_load = mem_pred;
 
                /* conservatively compare the 2 loads. TODO: This could be less strict
                 * with fixup code in some situations (like smaller/bigger modes) */
-               if (get_Load_ptr(pred_load) != get_Load_ptr(n))
+               if (get_Load_ptr(pred_load) != ptr)
                        return n;
                if (get_Load_mode(pred_load) != get_Load_mode(n))
                        return n;
                /* all combinations of aligned/unaligned pred/n should be fine so we do
                 * not compare the unaligned attribute */
-               {
-                       ir_node  *block = get_nodes_block(n);
-                       ir_node  *jmp   = new_r_Jmp(block);
-                       ir_graph *irg   = get_irn_irg(n);
-                       ir_node  *bad   = new_r_Bad(irg, mode_X);
-                       ir_mode  *mode  = get_Load_mode(n);
-                       ir_node  *res   = new_r_Proj(pred_load, mode, pn_Load_res);
-                       ir_node  *in[]  = { mem, res, jmp, bad };
-                       ir_node  *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
-                       return tuple;
-               }
+               ir_mode  *mode  = get_Load_mode(n);
+               ir_node  *res   = new_r_Proj(pred_load, mode, pn_Load_res);
+               return create_load_replacement_tuple(n, mem, res);
        } else if (is_Store(mem_pred)) {
                ir_node *pred_store = mem_pred;
                ir_node *value      = get_Store_value(pred_store);
 
-               if (get_Store_ptr(pred_store) != get_Load_ptr(n))
+               if (get_Store_ptr(pred_store) != ptr)
                        return n;
                if (get_irn_mode(value) != get_Load_mode(n))
                        return n;
                /* all combinations of aligned/unaligned pred/n should be fine so we do
                 * not compare the unaligned attribute */
-               {
-                       ir_node  *block = get_nodes_block(n);
-                       ir_node  *jmp   = new_r_Jmp(block);
-                       ir_graph *irg   = get_irn_irg(n);
-                       ir_node  *bad   = new_r_Bad(irg, mode_X);
-                       ir_node  *res   = value;
-                       ir_node  *in[]  = { mem, res, jmp, bad };
-                       ir_node  *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
-                       return tuple;
-               }
+               return create_load_replacement_tuple(n, mem, value);
        }
 
        return n;
 }
 
+static ir_node *transform_node_Store(ir_node *n)
+{
+       /* don't touch volatile stores */
+       if (get_Store_volatility(n) == volatility_is_volatile)
+               return n;
+
+       ir_node *ptr = get_Store_ptr(n);
+       const ir_node *confirm;
+       if (value_not_zero(ptr, &confirm) && confirm == NULL) {
+               set_irn_pinned(n, op_pin_state_floats);
+       }
+       return n;
+}
+
 /**
  * optimize a trampoline Call into a direct Call
  */
@@ -6350,8 +6140,6 @@ void ir_register_opt_node_ops(void)
 {
        register_computed_value_func(op_Add,      computed_value_Add);
        register_computed_value_func(op_And,      computed_value_And);
-       register_computed_value_func(op_Borrow,   computed_value_Borrow);
-       register_computed_value_func(op_Carry,    computed_value_Carry);
        register_computed_value_func(op_Cmp,      computed_value_Cmp);
        register_computed_value_func(op_Confirm,  computed_value_Confirm);
        register_computed_value_func(op_Const,    computed_value_Const);
@@ -6378,10 +6166,10 @@ void ir_register_opt_node_ops(void)
        register_equivalent_node_func(op_Conv,    equivalent_node_Conv);
        register_equivalent_node_func(op_Eor,     equivalent_node_Eor);
        register_equivalent_node_func(op_Id,      equivalent_node_Id);
-       register_equivalent_node_func(op_Minus,   equivalent_node_idempotent_unop);
+       register_equivalent_node_func(op_Minus,   equivalent_node_involution);
        register_equivalent_node_func(op_Mul,     equivalent_node_Mul);
        register_equivalent_node_func(op_Mux,     equivalent_node_Mux);
-       register_equivalent_node_func(op_Not,     equivalent_node_idempotent_unop);
+       register_equivalent_node_func(op_Not,     equivalent_node_involution);
        register_equivalent_node_func(op_Or,      equivalent_node_Or);
        register_equivalent_node_func(op_Phi,     equivalent_node_Phi);
        register_equivalent_node_func(op_Proj,    equivalent_node_Proj);
@@ -6390,7 +6178,6 @@ void ir_register_opt_node_ops(void)
        register_equivalent_node_func(op_Shr,     equivalent_node_left_zero);
        register_equivalent_node_func(op_Shrs,    equivalent_node_left_zero);
        register_equivalent_node_func(op_Sub,     equivalent_node_Sub);
-       register_equivalent_node_func_proj(op_Bound, equivalent_node_Proj_Bound);
        register_equivalent_node_func_proj(op_CopyB, equivalent_node_Proj_CopyB);
        register_equivalent_node_func_proj(op_Div,   equivalent_node_Proj_Div);
        register_equivalent_node_func_proj(op_Tuple, equivalent_node_Proj_Tuple);
@@ -6418,10 +6205,10 @@ void ir_register_opt_node_ops(void)
        register_transform_node_func(op_Shl,    transform_node_Shl);
        register_transform_node_func(op_Shrs,   transform_node_Shrs);
        register_transform_node_func(op_Shr,    transform_node_Shr);
+       register_transform_node_func(op_Store,  transform_node_Store);
        register_transform_node_func(op_Sub,    transform_node_Sub);
        register_transform_node_func(op_Switch, transform_node_Switch);
        register_transform_node_func(op_Sync,   transform_node_Sync);
-       register_transform_node_func_proj(op_Bound, transform_node_Proj_Bound);
        register_transform_node_func_proj(op_CopyB, transform_node_Proj_CopyB);
        register_transform_node_func_proj(op_Div,   transform_node_Proj_Div);
        register_transform_node_func_proj(op_Load,  transform_node_Proj_Load);
@@ -6475,6 +6262,10 @@ int identities_cmp(const void *elt, const void *key)
                        if (!block_dominates(block_a, block_b)
                            && !block_dominates(block_b, block_a))
                            return 1;
+                       /* respect the workaround rule: do not move nodes which are only
+                        * held by keepalive edges */
+                       if (only_used_by_keepalive(a) || only_used_by_keepalive(b))
+                               return 1;
                }
        }
 
@@ -6767,8 +6558,6 @@ ir_node *optimize_in_place_2(ir_node *n)
 ir_node *optimize_in_place(ir_node *n)
 {
        ir_graph *irg = get_irn_irg(n);
-       /* Handle graph state */
-       assert(get_irg_phase_state(irg) != phase_building);
 
        if (get_opt_global_cse())
                set_irg_pinned(irg, op_pin_state_floats);