beifg: Simplify the implementation of be_ifg_foreach_node().
[libfirm] / ir / opt / ldstopt.c
index c77552e..e297788 100644 (file)
@@ -1,31 +1,14 @@
 /*
- * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
- *
  * This file is part of libFirm.
- *
- * This file may be distributed and/or modified under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation and appearing in the file LICENSE.GPL included in the
- * packaging of this file.
- *
- * Licensees holding valid libFirm Professional Edition licenses may use
- * this file in accordance with the libFirm Commercial License.
- * Agreement provided with the Software.
- *
- * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
- * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE.
+ * Copyright (C) 2012 University of Karlsruhe.
  */
 
 /**
  * @file
  * @brief   Load/Store optimizations.
  * @author  Michael Beck
- * @version $Id$
  */
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
 
 #include <string.h>
 
 #include "ircons_t.h"
 #include "irgmod.h"
 #include "irgwalk.h"
-#include "irvrfy.h"
+#include "irtools.h"
 #include "tv_t.h"
 #include "dbginfo_t.h"
 #include "iropt_dbg.h"
 #include "irflag_t.h"
-#include "array.h"
+#include "array_t.h"
 #include "irhooks.h"
 #include "iredges.h"
-#include "irtools.h"
-#include "opt_polymorphy.h"
+#include "irpass.h"
 #include "irmemory.h"
-#include "xmalloc.h"
-#include "irphase_t.h"
+#include "irnodehashmap.h"
 #include "irgopt.h"
+#include "set.h"
+#include "be.h"
 #include "debug.h"
 
 /** The debug handle. */
 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
 
-#ifdef DO_CACHEOPT
-#include "cacheopt/cachesim.h"
-#endif
-
 #undef IMAX
-#define IMAX(a,b)      ((a) > (b) ? (a) : (b))
+#define IMAX(a,b)   ((a) > (b) ? (a) : (b))
 
-#define MAX_PROJ       IMAX(IMAX(pn_Load_max, pn_Store_max), pn_Call_max)
+#define MAX_PROJ    IMAX(IMAX((long)pn_Load_max, (long)pn_Store_max), (long)pn_Call_max)
 
 enum changes_t {
        DF_CHANGED = 1,       /**< data flow changed */
@@ -73,14 +52,14 @@ enum changes_t {
 /**
  * walker environment
  */
-typedef struct _walk_env_t {
+typedef struct walk_env_t {
        struct obstack obst;          /**< list of all stores */
        unsigned changes;             /**< a bitmask of graph changes */
 } walk_env_t;
 
 /** A Load/Store info. */
-typedef struct _ldst_info_t {
-       ir_node  *projs[MAX_PROJ];    /**< list of Proj's of this node */
+typedef struct ldst_info_t {
+       ir_node  *projs[MAX_PROJ+1];  /**< list of Proj's of this node */
        ir_node  *exc_block;          /**< the exception block if available */
        int      exc_idx;             /**< predecessor index in the exception block */
        unsigned visited;             /**< visited counter for breaking loops */
@@ -97,7 +76,7 @@ enum block_flags_t {
 /**
  * a Block info.
  */
-typedef struct _block_info_t {
+typedef struct block_info_t {
        unsigned flags;               /**< flags for the block */
 } block_info_t;
 
@@ -111,30 +90,30 @@ static unsigned master_visited = 0;
 /**
  * get the Load/Store info of a node
  */
-static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst) {
-       ldst_info_t *info = get_irn_link(node);
+static ldst_info_t *get_ldst_info(ir_node *node, struct obstack *obst)
+{
+       ldst_info_t *info = (ldst_info_t*)get_irn_link(node);
 
        if (! info) {
-               info = obstack_alloc(obst, sizeof(*info));
-               memset(info, 0, sizeof(*info));
+               info = OALLOCZ(obst, ldst_info_t);
                set_irn_link(node, info);
        }
        return info;
-}  /* get_ldst_info */
+}
 
 /**
  * get the Block info of a node
  */
-static block_info_t *get_block_info(ir_node *node, struct obstack *obst) {
-       block_info_t *info = get_irn_link(node);
+static block_info_t *get_block_info(ir_node *node, struct obstack *obst)
+{
+       block_info_t *info = (block_info_t*)get_irn_link(node);
 
        if (! info) {
-               info = obstack_alloc(obst, sizeof(*info));
-               memset(info, 0, sizeof(*info));
+               info = OALLOCZ(obst, block_info_t);
                set_irn_link(node, info);
        }
        return info;
-}  /* get_block_info */
+}
 
 /**
  * update the projection info for a Load/Store
@@ -154,7 +133,7 @@ static unsigned update_projs(ldst_info_t *info, ir_node *proj)
                info->projs[nr] = proj;
                return 0;
        }
-}  /* update_projs */
+}
 
 /**
  * update the exception block info for a Load/Store node.
@@ -170,10 +149,7 @@ static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
        info->exc_block = block;
        info->exc_idx   = pos;
        return 0;
-}  /* update_exc */
-
-/** Return the number of uses of an address node */
-#define get_irn_n_uses(adr)     get_irn_n_edges(adr)
+}
 
 /**
  * walker, collects all Load/Store/Proj nodes
@@ -182,10 +158,10 @@ static unsigned update_exc(ldst_info_t *info, ir_node *block, int pos)
  */
 static void collect_nodes(ir_node *node, void *env)
 {
-       ir_opcode   opcode = get_irn_opcode(node);
+       walk_env_t  *wenv   = (walk_env_t *)env;
+       unsigned     opcode = get_irn_opcode(node);
        ir_node     *pred, *blk, *pred_blk;
        ldst_info_t *ldst_info;
-       walk_env_t  *wenv = env;
 
        if (opcode == iro_Proj) {
                pred   = get_Proj_pred(node);
@@ -221,7 +197,7 @@ static void collect_nodes(ir_node *node, void *env)
 
                        if (is_Proj(proj)) {
                                pred   = get_Proj_pred(proj);
-                               is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
+                               is_exc = is_x_except_Proj(proj);
                        }
 
                        /* ignore Bad predecessors, they will be removed later */
@@ -244,7 +220,7 @@ static void collect_nodes(ir_node *node, void *env)
                        }
                }
        }
-}  /* collect_nodes */
+}
 
 /**
  * Returns an entity if the address ptr points to a constant one.
@@ -263,7 +239,7 @@ static ir_entity *find_constant_entity(ir_node *ptr)
                        ir_type   *tp  = get_entity_owner(ent);
 
                        /* Do not fiddle with polymorphism. */
-                       if (is_Class_type(get_entity_owner(ent)) &&
+                       if (is_Class_type(tp) &&
                                ((get_entity_n_overwrites(ent)    != 0) ||
                                (get_entity_n_overwrittenby(ent) != 0)   ) )
                                return NULL;
@@ -273,10 +249,10 @@ static ir_entity *find_constant_entity(ir_node *ptr)
                                int i, n;
 
                                for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
-                                       ir_node *bound;
-                                       tarval *tlower, *tupper;
-                                       ir_node *index = get_Sel_index(ptr, i);
-                                       tarval *tv     = computed_value(index);
+                                       ir_node   *bound;
+                                       ir_tarval *tlower, *tupper;
+                                       ir_node   *index = get_Sel_index(ptr, i);
+                                       ir_tarval *tv    = computed_value(index);
 
                                        /* check if the index is constant */
                                        if (tv == tarval_bad)
@@ -290,16 +266,16 @@ static ir_entity *find_constant_entity(ir_node *ptr)
                                        if (tlower == tarval_bad || tupper == tarval_bad)
                                                return NULL;
 
-                                       if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
+                                       if (tarval_cmp(tv, tlower) == ir_relation_less)
                                                return NULL;
-                                       if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
+                                       if (tarval_cmp(tupper, tv) == ir_relation_less)
                                                return NULL;
 
                                        /* ok, bounds check finished */
                                }
                        }
 
-                       if (variability_constant == get_entity_variability(ent))
+                       if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT)
                                return ent;
 
                        /* try next */
@@ -322,7 +298,7 @@ static ir_entity *find_constant_entity(ir_node *ptr)
                        ir_node *l = get_Sub_left(ptr);
                        ir_node *r = get_Sub_right(ptr);
 
-                       if (get_irn_mode(l) == get_irn_mode(ptr) &&     is_Const(r))
+                       if (get_irn_mode(l) == get_irn_mode(ptr) && is_Const(r))
                                ptr = l;
                        else
                                return NULL;
@@ -332,190 +308,31 @@ static ir_entity *find_constant_entity(ir_node *ptr)
                } else
                        return NULL;
        }
-}  /* find_constant_entity */
+}
 
 /**
  * Return the Selection index of a Sel node from dimension n
  */
-static long get_Sel_array_index_long(ir_node *n, int dim) {
+static long get_Sel_array_index_long(ir_node *n, int dim)
+{
        ir_node *index = get_Sel_index(n, dim);
-       assert(is_Const(index));
        return get_tarval_long(get_Const_tarval(index));
-}  /* get_Sel_array_index_long */
-
-/**
- * Returns the accessed component graph path for an
- * node computing an address.
- *
- * @param ptr    the node computing the address
- * @param depth  current depth in steps upward from the root
- *               of the address
- */
-static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth) {
-       compound_graph_path *res = NULL;
-       ir_entity           *root, *field, *ent;
-       int                 path_len, pos, idx;
-       tarval              *tv;
-       ir_type             *tp;
-
-       if (is_SymConst(ptr)) {
-               /* a SymConst. If the depth is 0, this is an access to a global
-                * entity and we don't need a component path, else we know
-                * at least it's length.
-                */
-               assert(get_SymConst_kind(ptr) == symconst_addr_ent);
-               root = get_SymConst_entity(ptr);
-               res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
-       } else if (is_Sel(ptr)) {
-               /* it's a Sel, go up until we find the root */
-               res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
-               if (res == NULL)
-                       return NULL;
-
-               /* fill up the step in the path at the current position */
-               field    = get_Sel_entity(ptr);
-               path_len = get_compound_graph_path_length(res);
-               pos      = path_len - depth - 1;
-               set_compound_graph_path_node(res, pos, field);
-
-               if (is_Array_type(get_entity_owner(field))) {
-                       assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
-                       set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
-               }
-       } else if (is_Add(ptr)) {
-               ir_node *l    = get_Add_left(ptr);
-               ir_node *r    = get_Add_right(ptr);
-               ir_mode *mode = get_irn_mode(ptr);
-               tarval  *tmp;
-
-               if (is_Const(r) && get_irn_mode(l) == mode) {
-                       ptr = l;
-                       tv  = get_Const_tarval(r);
-               } else {
-                       ptr = r;
-                       tv  = get_Const_tarval(l);
-               }
-ptr_arith:
-               mode = get_tarval_mode(tv);
-               tmp  = tv;
-
-               /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
-               if (is_Sel(ptr)) {
-                       field = get_Sel_entity(ptr);
-               } else {
-                       field = get_SymConst_entity(ptr);
-               }
-               idx = 0;
-               for (ent = field;;) {
-                       unsigned size;
-                       tarval   *sz, *tv_index, *tlower, *tupper;
-                       ir_node  *bound;
-
-                       tp = get_entity_type(ent);
-                       if (! is_Array_type(tp))
-                               break;
-                       ent = get_array_element_entity(tp);
-                       size = get_type_size_bytes(get_entity_type(ent));
-                       sz   = new_tarval_from_long(size, mode);
-
-                       tv_index = tarval_div(tmp, sz);
-                       tmp      = tarval_mod(tmp, sz);
-
-                       if (tv_index == tarval_bad || tmp == tarval_bad)
-                               return NULL;
-
-                       assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
-                       bound  = get_array_lower_bound(tp, 0);
-                       tlower = computed_value(bound);
-                       bound  = get_array_upper_bound(tp, 0);
-                       tupper = computed_value(bound);
-
-                       if (tlower == tarval_bad || tupper == tarval_bad)
-                               return NULL;
-
-                       if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
-                               return NULL;
-                       if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
-                               return NULL;
-
-                       /* ok, bounds check finished */
-                       ++idx;
-               }
-               if (! tarval_is_null(tmp)) {
-                       /* access to some struct/union member */
-                       return NULL;
-               }
-
-               /* should be at least ONE array */
-               if (idx == 0)
-                       return NULL;
-
-               res = rec_get_accessed_path(ptr, depth + idx);
-               if (res == NULL)
-                       return NULL;
-
-               path_len = get_compound_graph_path_length(res);
-               pos      = path_len - depth - idx;
-
-               for (ent = field;;) {
-                       unsigned size;
-                       tarval   *sz, *tv_index;
-                       long     index;
-
-                       tp = get_entity_type(ent);
-                       if (! is_Array_type(tp))
-                               break;
-                       ent = get_array_element_entity(tp);
-                       set_compound_graph_path_node(res, pos, ent);
-
-                       size = get_type_size_bytes(get_entity_type(ent));
-                       sz   = new_tarval_from_long(size, mode);
-
-                       tv_index = tarval_div(tv, sz);
-                       tv       = tarval_mod(tv, sz);
-
-                       /* worked above, should work again */
-                       assert(tv_index != tarval_bad && tv != tarval_bad);
-
-                       /* bounds already checked above */
-                       index = get_tarval_long(tv_index);
-                       set_compound_graph_path_array_index(res, pos, index);
-                       ++pos;
-               }
-       } else if (is_Sub(ptr)) {
-               ir_node *l = get_Sub_left(ptr);
-               ir_node *r = get_Sub_right(ptr);
-
-               ptr = l;
-               tv  = get_Const_tarval(r);
-               tv  = tarval_neg(tv);
-               goto ptr_arith;
-       }
-       return res;
-}  /* rec_get_accessed_path */
-
-/**
- * Returns an access path or NULL.  The access path is only
- * valid, if the graph is in phase_high and _no_ address computation is used.
- */
-static compound_graph_path *get_accessed_path(ir_node *ptr) {
-       compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
-       return gr;
-}  /* get_accessed_path */
+}
 
 typedef struct path_entry {
        ir_entity         *ent;
        struct path_entry *next;
-       long              index;
+       size_t            index;
 } path_entry;
 
-static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) {
+static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
+{
        path_entry       entry, *p;
        ir_entity        *ent, *field;
        ir_initializer_t *initializer;
-       tarval           *tv;
+       ir_tarval        *tv;
        ir_type          *tp;
-       unsigned         n;
+       size_t           n;
 
        entry.next = next;
        if (is_SymConst(ptr)) {
@@ -538,7 +355,7 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) {
                                        continue;
                                }
                        }
-                       if (p->index >= (int) n)
+                       if (p->index >= n)
                                return NULL;
                        initializer = get_initializer_compound_value(initializer, p->index);
 
@@ -571,7 +388,7 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) {
                        assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
                        entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
                } else {
-                       int i, n_members = get_compound_n_members(tp);
+                       size_t i, n_members = get_compound_n_members(tp);
                        for (i = 0; i < n_members; ++i) {
                                if (get_compound_member(tp, i) == field)
                                        break;
@@ -584,17 +401,19 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next) {
                }
                return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
        }  else if (is_Add(ptr)) {
-               ir_node  *l = get_Add_left(ptr);
-               ir_node  *r = get_Add_right(ptr);
                ir_mode  *mode;
                unsigned pos;
 
-               if (is_Const(r)) {
-                       ptr = l;
-                       tv  = get_Const_tarval(r);
-               } else {
-                       ptr = r;
-                       tv  = get_Const_tarval(l);
+               {
+                       ir_node *l = get_Add_left(ptr);
+                       ir_node *r = get_Add_right(ptr);
+                       if (is_Const(r)) {
+                               ptr = l;
+                               tv  = get_Const_tarval(r);
+                       } else {
+                               ptr = r;
+                               tv  = get_Const_tarval(l);
+                       }
                }
 ptr_arith:
                mode = get_tarval_mode(tv);
@@ -625,10 +444,10 @@ ptr_arith:
                /* fill them up */
                pos = 0;
                for (ent = field;;) {
-                       unsigned size;
-                       tarval   *sz, *tv_index, *tlower, *tupper;
-                       long     index;
-                       ir_node  *bound;
+                       unsigned   size;
+                       ir_tarval *sz, *tv_index, *tlower, *tupper;
+                       long       index;
+                       ir_node   *bound;
 
                        tp = get_entity_type(ent);
                        if (! is_Array_type(tp))
@@ -655,9 +474,9 @@ ptr_arith:
                        if (tlower == tarval_bad || tupper == tarval_bad)
                                return NULL;
 
-                       if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
+                       if (tarval_cmp(tv_index, tlower) == ir_relation_less)
                                return NULL;
-                       if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
+                       if (tarval_cmp(tupper, tv_index) == ir_relation_less)
                                return NULL;
 
                        /* ok, bounds check finished */
@@ -683,7 +502,8 @@ ptr_arith:
        return NULL;
 }
 
-static ir_node *find_compound_ent_value(ir_node *ptr) {
+static ir_node *find_compound_ent_value(ir_node *ptr)
+{
        return rec_find_compound_ent_value(ptr, NULL);
 }
 
@@ -691,10 +511,11 @@ static ir_node *find_compound_ent_value(ir_node *ptr) {
 static void reduce_adr_usage(ir_node *ptr);
 
 /**
- * Update a Load that may lost it's usage.
+ * Update a Load that may have lost its users.
  */
-static void handle_load_update(ir_node *load) {
-       ldst_info_t *info = get_irn_link(load);
+static void handle_load_update(ir_node *load)
+{
+       ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
 
        /* do NOT touch volatile loads for now */
        if (get_Load_volatility(load) == volatility_is_volatile)
@@ -704,57 +525,70 @@ static void handle_load_update(ir_node *load) {
                ir_node *ptr = get_Load_ptr(load);
                ir_node *mem = get_Load_mem(load);
 
-               /* a Load which value is neither used nor exception checked, remove it */
+               /* a Load whose value is neither used nor exception checked, remove it */
                exchange(info->projs[pn_Load_M], mem);
                if (info->projs[pn_Load_X_regular])
-                       exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+                       exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
                kill_node(load);
                reduce_adr_usage(ptr);
        }
-}  /* handle_load_update */
+}
 
 /**
- * A Use of an address node is vanished. Check if this was a Proj
+ * A use of an address node has vanished. Check if this was a Proj
  * node and update the counters.
  */
-static void reduce_adr_usage(ir_node *ptr) {
-       if (is_Proj(ptr)) {
-               if (get_irn_n_edges(ptr) <= 0) {
-                       /* this Proj is dead now */
-                       ir_node *pred = get_Proj_pred(ptr);
-
-                       if (is_Load(pred)) {
-                               ldst_info_t *info = get_irn_link(pred);
-                               info->projs[get_Proj_proj(ptr)] = NULL;
-
-                               /* this node lost it's result proj, handle that */
-                               handle_load_update(pred);
-                       }
-               }
+static void reduce_adr_usage(ir_node *ptr)
+{
+       ir_node *pred;
+       if (!is_Proj(ptr))
+               return;
+       if (get_irn_n_edges(ptr) > 0)
+               return;
+
+       /* this Proj is dead now */
+       pred = get_Proj_pred(ptr);
+       if (is_Load(pred)) {
+               ldst_info_t *info = (ldst_info_t*)get_irn_link(pred);
+               info->projs[get_Proj_proj(ptr)] = NULL;
+
+               /* this node lost its result proj, handle that */
+               handle_load_update(pred);
        }
-}  /* reduce_adr_usage */
+}
 
 /**
  * Check, if an already existing value of mode old_mode can be converted
  * into the needed one new_mode without loss.
  */
-static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode) {
+static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode)
+{
+       unsigned old_size;
+       unsigned new_size;
        if (old_mode == new_mode)
-               return 1;
+               return true;
+
+       old_size = get_mode_size_bits(old_mode);
+       new_size = get_mode_size_bits(new_mode);
 
        /* if both modes are two-complement ones, we can always convert the
-          Stored value into the needed one. */
-       if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
+          Stored value into the needed one. (on big endian machines we currently
+          only support this for modes of same size) */
+       if (old_size >= new_size &&
                  get_mode_arithmetic(old_mode) == irma_twos_complement &&
-                 get_mode_arithmetic(new_mode) == irma_twos_complement)
-               return 1;
-       return 0;
-}  /* can_use_stored_value */
+                 get_mode_arithmetic(new_mode) == irma_twos_complement &&
+                 (!be_get_backend_param()->byte_order_big_endian
+               || old_size == new_size)) {
+               return true;
+       }
+       return false;
+}
 
 /**
- * Check whether a Call is at least pure, ie. does only read memory.
+ * Check whether a Call is at least pure, i.e. does only read memory.
  */
-static unsigned is_Call_pure(ir_node *call) {
+static unsigned is_Call_pure(ir_node *call)
+{
        ir_type *call_tp = get_Call_type(call);
        unsigned prop = get_method_additional_properties(call_tp);
 
@@ -763,14 +597,14 @@ static unsigned is_Call_pure(ir_node *call) {
                /* try the called entity */
                ir_node *ptr = get_Call_ptr(call);
 
-               if (is_Global(ptr)) {
-                       ir_entity *ent = get_Global_entity(ptr);
+               if (is_SymConst_addr_ent(ptr)) {
+                       ir_entity *ent = get_SymConst_entity(ptr);
 
                        prop = get_entity_additional_properties(ent);
                }
        }
        return (prop & (mtp_property_const|mtp_property_pure)) != 0;
-}  /* is_Call_pure */
+}
 
 static ir_node *get_base_and_offset(ir_node *ptr, long *pOffset)
 {
@@ -856,45 +690,47 @@ static int try_load_after_store(ir_node *load,
        store_mode     = get_irn_mode(get_Store_value(store));
        store_mode_len = get_mode_size_bytes(store_mode);
        delta          = load_offset - store_offset;
-       if (delta < 0 || delta + load_mode_len > store_mode_len)
-               return 0;
+       store_value    = get_Store_value(store);
 
-       if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
-           get_mode_arithmetic(load_mode)  != irma_twos_complement)
+       if (delta < 0 || delta+load_mode_len > store_mode_len)
                return 0;
 
-       store_value = get_Store_value(store);
-
-       /* produce a shift to adjust offset delta */
-       if (delta > 0) {
-               ir_node *cnst;
-
-               /* FIXME: only true for little endian */
-               cnst        = new_Const_long(mode_Iu, delta * 8);
-               store_value = new_r_Shr(current_ir_graph, get_nodes_block(load),
-                                       store_value, cnst, store_mode);
-       }
+       if (store_mode != load_mode &&
+           get_mode_arithmetic(store_mode) == irma_twos_complement &&
+           get_mode_arithmetic(load_mode)  == irma_twos_complement) {
+
+               /* produce a shift to adjust offset delta */
+               unsigned const shift = be_get_backend_param()->byte_order_big_endian
+                       ? store_mode_len - load_mode_len - delta
+                       : delta;
+               if (shift != 0) {
+                       ir_graph *const irg  = get_irn_irg(load);
+                       ir_node  *const cnst = new_r_Const_long(irg, mode_Iu, shift * 8);
+                       store_value = new_r_Shr(get_nodes_block(load),
+                                                                       store_value, cnst, store_mode);
+               }
 
-       /* add an convert if needed */
-       if (store_mode != load_mode) {
-               store_value = new_r_Conv(current_ir_graph, get_nodes_block(load),
-                                        store_value, load_mode);
+               store_value = new_r_Conv(get_nodes_block(load), store_value, load_mode);
+       } else {
+               /* we would need some kind of bitcast node here */
+               return 0;
        }
 
        DBG_OPT_RAW(load, store_value);
 
-       info = get_irn_link(load);
+       info = (ldst_info_t*)get_irn_link(load);
        if (info->projs[pn_Load_M])
                exchange(info->projs[pn_Load_M], get_Load_mem(load));
 
        res = 0;
        /* no exception */
        if (info->projs[pn_Load_X_except]) {
-               exchange( info->projs[pn_Load_X_except], new_Bad());
+               ir_graph *irg = get_irn_irg(load);
+               exchange( info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
                res |= CF_CHANGED;
        }
        if (info->projs[pn_Load_X_regular]) {
-               exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+               exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
                res |= CF_CHANGED;
        }
 
@@ -917,33 +753,34 @@ static int try_load_after_store(ir_node *load,
  *
  * INC_MASTER() must be called before dive into
  */
-static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
+static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
+{
        unsigned    res = 0;
-       ldst_info_t *info = get_irn_link(load);
+       ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
        ir_node     *pred;
        ir_node     *ptr       = get_Load_ptr(load);
        ir_node     *mem       = get_Load_mem(load);
        ir_mode     *load_mode = get_Load_mode(load);
 
        for (pred = curr; load != pred; ) {
-               ldst_info_t *pred_info = get_irn_link(pred);
+               ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
 
                /*
                 * a Load immediately after a Store -- a read after write.
                 * We may remove the Load, if both Load & Store does not have an
-                * exception handler OR they are in the same MacroBlock. In the latter
+                * exception handler OR they are in the same Block. In the latter
                 * case the Load cannot throw an exception when the previous Store was
                 * quiet.
                 *
                 * Why we need to check for Store Exception? If the Store cannot
                 * be executed (ROM) the exception handler might simply jump into
-                * the load MacroBlock :-(
+                * the load Block :-(
                 * We could make it a little bit better if we would know that the
                 * exception handler of the Store jumps directly to the end...
                 */
                if (is_Store(pred) && ((pred_info->projs[pn_Store_X_except] == NULL
                                && info->projs[pn_Load_X_except] == NULL)
-                               || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)))
+                               || get_nodes_block(load) == get_nodes_block(pred)))
                {
                        long    load_offset;
                        ir_node *base_ptr = get_base_and_offset(ptr, &load_offset);
@@ -955,14 +792,20 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
                           can_use_stored_value(get_Load_mode(pred), load_mode)) {
                        /*
                         * a Load after a Load -- a read after read.
-                        * We may remove the second Load, if it does not have an exception handler
-                        * OR they are in the same MacroBlock. In the later case the Load cannot
-                        * throw an exception when the previous Load was quiet.
+                        * We may remove the second Load, if it does not have an exception
+                        * handler OR they are in the same Block. In the later case
+                        * the Load cannot throw an exception when the previous Load was
+                        * quiet.
+                        *
+                        * Here, there is no need to check if the previous Load has an
+                        * exception hander because they would have exact the same
+                        * exception...
                         *
-                        * Here, there is no need to check if the previous Load has an exception
-                        * hander because they would have exact the same exception...
+                        * TODO: implement load-after-load with different mode for big
+                        *       endian
                         */
-                       if (info->projs[pn_Load_X_except] == NULL || get_nodes_MacroBlock(load) == get_nodes_MacroBlock(pred)) {
+                       if (info->projs[pn_Load_X_except] == NULL
+                                       || get_nodes_block(load) == get_nodes_block(pred)) {
                                ir_node *value;
 
                                DBG_OPT_RAR(load, pred);
@@ -971,13 +814,13 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
                                if (info->projs[pn_Load_res]) {
                                        if (pred_info->projs[pn_Load_res] == NULL) {
                                                /* create a new Proj again */
-                                               pred_info->projs[pn_Load_res] = new_r_Proj(current_ir_graph, get_nodes_block(pred), pred, get_Load_mode(pred), pn_Load_res);
+                                               pred_info->projs[pn_Load_res] = new_r_Proj(pred, get_Load_mode(pred), pn_Load_res);
                                        }
                                        value = pred_info->projs[pn_Load_res];
 
                                        /* add an convert if needed */
                                        if (get_Load_mode(pred) != load_mode) {
-                                               value = new_r_Conv(current_ir_graph, get_nodes_block(load), value, load_mode);
+                                               value = new_r_Conv(get_nodes_block(load), value, load_mode);
                                        }
 
                                        exchange(info->projs[pn_Load_res], value);
@@ -988,11 +831,12 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
 
                                /* no exception */
                                if (info->projs[pn_Load_X_except]) {
-                                       exchange(info->projs[pn_Load_X_except], new_Bad());
+                                       ir_graph *irg = get_irn_irg(load);
+                                       exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
                                        res |= CF_CHANGED;
                                }
                                if (info->projs[pn_Load_X_regular]) {
-                                       exchange( info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+                                       exchange( info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
                                        res |= CF_CHANGED;
                                }
 
@@ -1005,7 +849,6 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
                if (is_Store(pred)) {
                        /* check if we can pass through this store */
                        ir_alias_relation rel = get_alias_relation(
-                               current_ir_graph,
                                get_Store_ptr(pred),
                                get_irn_mode(get_Store_value(pred)),
                                ptr, load_mode);
@@ -1047,34 +890,27 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr) {
        }
 
        return res;
-}  /* follow_Mem_chain */
+}
 
-/*
- * Check if we can replace the load by a given const from
- * the const code irg.
- */
-ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c) {
-       ir_mode *c_mode = get_irn_mode(c);
-       ir_mode *l_mode = get_Load_mode(load);
-       ir_node *res    = NULL;
+ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
+{
+       ir_mode  *c_mode = get_irn_mode(c);
+       ir_mode  *l_mode = get_Load_mode(load);
+       ir_node  *block  = get_nodes_block(load);
+       dbg_info *dbgi   = get_irn_dbg_info(load);
+       ir_node  *res    = copy_const_value(dbgi, c, block);
 
        if (c_mode != l_mode) {
                /* check, if the mode matches OR can be easily converted info */
                if (is_reinterpret_cast(c_mode, l_mode)) {
-                       /* we can safely cast */
-                       dbg_info *dbg   = get_irn_dbg_info(load);
-                       ir_node  *block = get_nodes_block(load);
-
                        /* copy the value from the const code irg and cast it */
-                       res = copy_const_value(dbg, c);
-                       res = new_rd_Conv(dbg, current_ir_graph, block, res, l_mode);
+                       res = new_rd_Conv(dbgi, block, res, l_mode);
+               } else {
+                       return NULL;
                }
-       } else {
-               /* copy the value from the const code irg */
-               res = copy_const_value(get_irn_dbg_info(load), c);
        }
        return res;
-}  /* can_replace_load_by_const */
+}
 
 /**
  * optimize a Load
@@ -1083,7 +919,7 @@ ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c) {
  */
 static unsigned optimize_load(ir_node *load)
 {
-       ldst_info_t *info = get_irn_link(load);
+       ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
        ir_node     *mem, *ptr, *value;
        ir_entity   *ent;
        long        dummy;
@@ -1096,44 +932,17 @@ static unsigned optimize_load(ir_node *load)
        /* the address of the load to be optimized */
        ptr = get_Load_ptr(load);
 
-       /*
-        * Check if we can remove the exception from a Load:
-        * This can be done, if the address is from an Sel(Alloc) and
-        * the Sel type is a subtype of the allocated type.
-        *
-        * This optimizes some often used OO constructs,
-        * like x = new O; x->t;
-        */
-       if (info->projs[pn_Load_X_except]) {
-               ir_node *addr = ptr;
-
-               /* find base address */
-               while (is_Sel(addr))
-                       addr = get_Sel_ptr(addr);
-               if (is_Alloc(skip_Proj(skip_Cast(addr)))) {
-                       /* simple case: a direct load after an Alloc. Firm Alloc throw
-                        * an exception in case of out-of-memory. So, there is no way for an
-                        * exception in this load.
-                        * This code is constructed by the "exception lowering" in the Jack compiler.
-                        */
-                       exchange(info->projs[pn_Load_X_except], new_Bad());
-                       info->projs[pn_Load_X_except] = NULL;
-                       exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
-                       info->projs[pn_Load_X_regular] = NULL;
-                       res |= CF_CHANGED;
-               }
-       }
-
        /* The mem of the Load. Must still be returned after optimization. */
-       mem  = get_Load_mem(load);
+       mem = get_Load_mem(load);
 
-       if (! info->projs[pn_Load_res] && ! info->projs[pn_Load_X_except]) {
-               /* a Load which value is neither used nor exception checked, remove it */
+       if (info->projs[pn_Load_res] == NULL
+                       && info->projs[pn_Load_X_except] == NULL) {
+               /* the value is never used and we don't care about exceptions, remove */
                exchange(info->projs[pn_Load_M], mem);
 
                if (info->projs[pn_Load_X_regular]) {
                        /* should not happen, but if it does, remove it */
-                       exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+                       exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
                        res |= CF_CHANGED;
                }
                kill_node(load);
@@ -1141,60 +950,50 @@ static unsigned optimize_load(ir_node *load)
                return res | DF_CHANGED;
        }
 
-       /* Load from a constant polymorphic field, where we can resolve
-          polymorphism. */
-       value = transform_polymorph_Load(load);
-       if (value == load) {
-               value = NULL;
-               /* check if we can determine the entity that will be loaded */
-               ent = find_constant_entity(ptr);
-               if (ent != NULL) {
-                       if ((allocation_static == get_entity_allocation(ent)) &&
-                               (visibility_external_allocated != get_entity_visibility(ent))) {
-                               /* a static allocation that is not external: there should be NO exception
-                                * when loading even if we cannot replace the load itself. */
-
-                               /* no exception, clear the info field as it might be checked later again */
-                               if (info->projs[pn_Load_X_except]) {
-                                       exchange(info->projs[pn_Load_X_except], new_Bad());
-                                       info->projs[pn_Load_X_except] = NULL;
-                                       res |= CF_CHANGED;
-                               }
-                               if (info->projs[pn_Load_X_regular]) {
-                                       exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
-                                       info->projs[pn_Load_X_regular] = NULL;
-                                       res |= CF_CHANGED;
-                               }
-
-                               if (variability_constant == get_entity_variability(ent)) {
-                                       if (is_atomic_entity(ent)) {
-                                               /* Might not be atomic after
-                                                  lowering of Sels.  In this
-                                                  case we could also load, but
-                                                  it's more complicated. */
-                                               /* more simpler case: we load the content of a constant value:
-                                                * replace it by the constant itself
-                                                */
-                                               value = get_atomic_ent_value(ent);
-                                       } else {
-                                               if (ent->has_initializer) {
-                                                       /* new style initializer */
-                                                       value = find_compound_ent_value(ptr);
-                                               } else {
-                                                       /* old style initializer */
-                                                       compound_graph_path *path = get_accessed_path(ptr);
+       value = NULL;
+       /* check if we can determine the entity that will be loaded */
+       ent = find_constant_entity(ptr);
+       if (ent != NULL
+                       && get_entity_visibility(ent) != ir_visibility_external) {
+               /* a static allocation that is not external: there should be NO
+                * exception when loading even if we cannot replace the load itself.
+                */
 
-                                                       if (path != NULL) {
-                                                               assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
+               /* no exception, clear the info field as it might be checked later again */
+               if (info->projs[pn_Load_X_except]) {
+                       ir_graph *irg = get_irn_irg(load);
+                       exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
+                       info->projs[pn_Load_X_except] = NULL;
+                       res |= CF_CHANGED;
+               }
+               if (info->projs[pn_Load_X_regular]) {
+                       exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
+                       info->projs[pn_Load_X_regular] = NULL;
+                       res |= CF_CHANGED;
+               }
 
-                                                               value = get_compound_ent_value_by_path(ent, path);
-                                                               DB((dbg, LEVEL_1, "  Constant access at %F%F resulted in %+F\n", ent, path, value));
-                                                               free_compound_graph_path(path);
-                                                       }
+               if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
+                       if (has_entity_initializer(ent)) {
+                               /* new style initializer */
+                               value = find_compound_ent_value(ptr);
+                       }
+                       if (value != NULL) {
+                               ir_graph *irg = get_irn_irg(load);
+                               value = can_replace_load_by_const(load, value);
+                               if (value != NULL && is_Sel(ptr)) {
+                                       /* frontend has inserted masking operations after bitfield accesses,
+                                        * so we might have to shift the const. */
+                                       unsigned char bit_offset = get_entity_offset_bits_remainder(get_Sel_entity(ptr));
+                                       if (bit_offset != 0) {
+                                               if (is_Const(value)) {
+                                                       ir_tarval *tv_old = get_Const_tarval(value);
+                                                       ir_tarval *tv_offset = new_tarval_from_long(bit_offset, mode_Bu);
+                                                       ir_tarval *tv_new = tarval_shl(tv_old, tv_offset);
+                                                       value = new_r_Const(irg, tv_new);
+                                               } else {
+                                                       value = NULL;
                                                }
                                        }
-                                       if (value != NULL)
-                                               value = can_replace_load_by_const(load, value);
                                }
                        }
                }
@@ -1202,12 +1001,13 @@ static unsigned optimize_load(ir_node *load)
        if (value != NULL) {
                /* we completely replace the load by this value */
                if (info->projs[pn_Load_X_except]) {
-                       exchange(info->projs[pn_Load_X_except], new_Bad());
+                       ir_graph *irg = get_irn_irg(load);
+                       exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
                        info->projs[pn_Load_X_except] = NULL;
                        res |= CF_CHANGED;
                }
                if (info->projs[pn_Load_X_regular]) {
-                       exchange(info->projs[pn_Load_X_regular], new_r_Jmp(current_ir_graph, get_nodes_block(load)));
+                       exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
                        info->projs[pn_Load_X_regular] = NULL;
                        res |= CF_CHANGED;
                }
@@ -1226,7 +1026,7 @@ static unsigned optimize_load(ir_node *load)
 
        /* Check, if the address of this load is used more than once.
         * If not, more load cannot be removed in any case. */
-       if (get_irn_n_uses(ptr) <= 1 && get_irn_n_uses(get_base_and_offset(ptr, &dummy)) <= 1)
+       if (get_irn_n_edges(ptr) <= 1 && get_irn_n_edges(get_base_and_offset(ptr, &dummy)) <= 1)
                return res;
 
        /*
@@ -1239,7 +1039,7 @@ static unsigned optimize_load(ir_node *load)
        INC_MASTER();
        res = follow_Mem_chain(load, skip_Proj(mem));
        return res;
-}  /* optimize_load */
+}
 
 /**
  * Check whether a value of mode new_mode would completely overwrite a value
@@ -1248,50 +1048,96 @@ static unsigned optimize_load(ir_node *load)
 static int is_completely_overwritten(ir_mode *old_mode, ir_mode *new_mode)
 {
        return get_mode_size_bits(new_mode) >= get_mode_size_bits(old_mode);
-}  /* is_completely_overwritten */
+}
+
+/**
+ * Check whether small is a part of large (starting at same address).
+ */
+static int is_partially_same(ir_node *small, ir_node *large)
+{
+       ir_mode *sm = get_irn_mode(small);
+       ir_mode *lm = get_irn_mode(large);
+
+       /* FIXME: Check endianness */
+       return is_Conv(small) && get_Conv_op(small) == large
+           && get_mode_size_bytes(sm) < get_mode_size_bytes(lm)
+           && get_mode_arithmetic(sm) == irma_twos_complement
+           && get_mode_arithmetic(lm) == irma_twos_complement;
+}
 
 /**
  * follow the memory chain as long as there are only Loads and alias free Stores.
  *
  * INC_MASTER() must be called before dive into
  */
-static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
+static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr)
+{
        unsigned res = 0;
-       ldst_info_t *info = get_irn_link(store);
+       ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
        ir_node *pred;
        ir_node *ptr = get_Store_ptr(store);
        ir_node *mem = get_Store_mem(store);
        ir_node *value = get_Store_value(store);
        ir_mode *mode  = get_irn_mode(value);
        ir_node *block = get_nodes_block(store);
-       ir_node *mblk  = get_Block_MacroBlock(block);
 
        for (pred = curr; pred != store;) {
-               ldst_info_t *pred_info = get_irn_link(pred);
+               ldst_info_t *pred_info = (ldst_info_t*)get_irn_link(pred);
 
                /*
                 * BEWARE: one might think that checking the modes is useless, because
                 * if the pointers are identical, they refer to the same object.
                 * This is only true in strong typed languages, not is C were the following
                 * is possible *(ir_type1 *)p = a; *(ir_type2 *)p = b ...
-                * However, if the mode that is written have a bigger  or equal size the the old
-                * one, the old value is completely overwritten and can be killed ...
+                * However, if the size of the mode that is written is bigger or equal the
+                * size of the old one, the old value is completely overwritten and can be
+                * killed ...
                 */
                if (is_Store(pred) && get_Store_ptr(pred) == ptr &&
-                   get_nodes_MacroBlock(pred) == mblk &&
-                   is_completely_overwritten(get_irn_mode(get_Store_value(pred)), mode)) {
+            get_nodes_block(pred) == block) {
+                       /*
+                        * a Store after a Store in the same Block -- a write after write.
+                        */
+
                        /*
-                        * a Store after a Store in the same MacroBlock -- a write after write.
-                        * We may remove the first Store, if it does not have an exception handler.
+                        * We may remove the first Store, if the old value is completely
+                        * overwritten or the old value is a part of the new value,
+                        * and if it does not have an exception handler.
                         *
                         * TODO: What, if both have the same exception handler ???
                         */
-                       if (get_Store_volatility(pred) != volatility_is_volatile && !pred_info->projs[pn_Store_X_except]) {
-                               DBG_OPT_WAW(pred, store);
-                               exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
-                               kill_node(pred);
-                               reduce_adr_usage(ptr);
-                               return DF_CHANGED;
+                       if (get_Store_volatility(pred) != volatility_is_volatile
+                               && !pred_info->projs[pn_Store_X_except]) {
+                               ir_node *predvalue = get_Store_value(pred);
+                               ir_mode *predmode  = get_irn_mode(predvalue);
+
+                               if (is_completely_overwritten(predmode, mode)
+                                       || is_partially_same(predvalue, value)) {
+                                       DBG_OPT_WAW(pred, store);
+                                       exchange(pred_info->projs[pn_Store_M], get_Store_mem(pred));
+                                       kill_node(pred);
+                                       reduce_adr_usage(ptr);
+                                       return DF_CHANGED;
+                               }
+                       }
+
+                       /*
+                        * We may remove the Store, if the old value already contains
+                        * the new value, and if it does not have an exception handler.
+                        *
+                        * TODO: What, if both have the same exception handler ???
+                        */
+                       if (get_Store_volatility(store) != volatility_is_volatile
+                               && !info->projs[pn_Store_X_except]) {
+                               ir_node *predvalue = get_Store_value(pred);
+
+                               if (is_partially_same(value, predvalue)) {
+                                       DBG_OPT_WAW(pred, store);
+                                       exchange(info->projs[pn_Store_M], mem);
+                                       kill_node(store);
+                                       reduce_adr_usage(ptr);
+                                       return DF_CHANGED;
+                               }
                        }
                } else if (is_Load(pred) && get_Load_ptr(pred) == ptr &&
                           value == pred_info->projs[pn_Load_res]) {
@@ -1313,7 +1159,6 @@ static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
                if (is_Store(pred)) {
                        /* check if we can pass through this store */
                        ir_alias_relation rel = get_alias_relation(
-                               current_ir_graph,
                                get_Store_ptr(pred),
                                get_irn_mode(get_Store_value(pred)),
                                ptr, mode);
@@ -1323,7 +1168,7 @@ static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
                        pred = skip_Proj(get_Store_mem(pred));
                } else if (is_Load(pred)) {
                        ir_alias_relation rel = get_alias_relation(
-                               current_ir_graph, get_Load_ptr(pred), get_Load_mode(pred),
+                               get_Load_ptr(pred), get_Load_mode(pred),
                                ptr, mode);
                        if (rel != ir_no_alias)
                                break;
@@ -1351,12 +1196,12 @@ static unsigned follow_Mem_chain_for_Store(ir_node *store, ir_node *curr) {
                }
        }
        return res;
-}  /* follow_Mem_chain_for_Store */
+}
 
 /** find entity used as base for an address calculation */
 static ir_entity *find_entity(ir_node *ptr)
 {
-       switch(get_irn_opcode(ptr)) {
+       switch (get_irn_opcode(ptr)) {
        case iro_SymConst:
                return get_SymConst_entity(ptr);
        case iro_Sel: {
@@ -1387,7 +1232,8 @@ static ir_entity *find_entity(ir_node *ptr)
  *
  * @param store  the Store node
  */
-static unsigned optimize_store(ir_node *store) {
+static unsigned optimize_store(ir_node *store)
+{
        ir_node   *ptr;
        ir_node   *mem;
        ir_entity *entity;
@@ -1400,8 +1246,9 @@ static unsigned optimize_store(ir_node *store) {
 
        /* a store to an entity which is never read is unnecessary */
        if (entity != NULL && !(get_entity_usage(entity) & ir_usage_read)) {
-               ldst_info_t *info = get_irn_link(store);
+               ldst_info_t *info = (ldst_info_t*)get_irn_link(store);
                if (info->projs[pn_Store_X_except] == NULL) {
+                       DB((dbg, LEVEL_1, "  Killing useless %+F to never read entity %+F\n", store, entity));
                        exchange(info->projs[pn_Store_M], get_Store_mem(store));
                        kill_node(store);
                        reduce_adr_usage(ptr);
@@ -1411,7 +1258,7 @@ static unsigned optimize_store(ir_node *store) {
 
        /* Check, if the address of this Store is used more than once.
         * If not, this Store cannot be removed in any case. */
-       if (get_irn_n_uses(ptr) <= 1)
+       if (get_irn_n_edges(ptr) <= 1)
                return 0;
 
        mem = get_Store_mem(store);
@@ -1420,7 +1267,23 @@ static unsigned optimize_store(ir_node *store) {
        INC_MASTER();
 
        return follow_Mem_chain_for_Store(store, skip_Proj(mem));
-}  /* optimize_store */
+}
+
+/* check if a node has more than one real user. Keepalive edges do not count as
+ * real users */
+static bool has_multiple_users(const ir_node *node)
+{
+       unsigned real_users = 0;
+       foreach_out_edge(node, edge) {
+               ir_node *user = get_edge_src_irn(edge);
+               if (is_End(user))
+                       continue;
+               ++real_users;
+               if (real_users > 1)
+                       return true;
+       }
+       return false;
+}
 
 /**
  * walker, optimizes Phi after Stores to identical places:
@@ -1444,7 +1307,10 @@ static unsigned optimize_store(ir_node *store) {
 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
 {
        int i, n;
-       ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
+       ir_node *store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
+#ifdef DO_CACHEOPT
+       ir_node *old_store;
+#endif
        ir_mode *mode;
        ir_node **inM, **inD, **projMs;
        int *idx;
@@ -1463,23 +1329,21 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
 
        /* must be only one user */
        projM = get_Phi_pred(phi, 0);
-       if (get_irn_n_edges(projM) != 1)
+       if (has_multiple_users(projM))
                return 0;
 
        store = skip_Proj(projM);
+#ifdef DO_CACHEOPT
        old_store = store;
+#endif
        if (!is_Store(store))
                return 0;
 
        block = get_nodes_block(store);
 
-       /* abort on dead blocks */
-       if (is_Block_dead(block))
-               return 0;
-
        /* check if the block is post dominated by Phi-block
           and has no exception exit */
-       bl_info = get_irn_link(block);
+       bl_info = (block_info_t*)get_irn_link(block);
        if (bl_info->flags & BLOCK_HAS_EXC)
                return 0;
 
@@ -1490,13 +1354,13 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
        /* this is the address of the store */
        ptr  = get_Store_ptr(store);
        mode = get_irn_mode(get_Store_value(store));
-       info = get_irn_link(store);
+       info = (ldst_info_t*)get_irn_link(store);
        exc  = info->exc_block;
 
        for (i = 1; i < n; ++i) {
                ir_node *pred = get_Phi_pred(phi, i);
 
-               if (get_irn_n_edges(pred) != 1)
+               if (has_multiple_users(pred))
                        return 0;
 
                pred = skip_Proj(pred);
@@ -1506,22 +1370,19 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
                if (ptr != get_Store_ptr(pred) || mode != get_irn_mode(get_Store_value(pred)))
                        return 0;
 
-               info = get_irn_link(pred);
+               info = (ldst_info_t*)get_irn_link(pred);
 
                /* check, if all stores have the same exception flow */
                if (exc != info->exc_block)
                        return 0;
 
-               /* abort on dead blocks */
                block = get_nodes_block(pred);
-               if (is_Block_dead(block))
-                       return 0;
 
                /* check if the block is post dominated by Phi-block
                   and has no exception exit. Note that block must be different from
                   Phi-block, else we would move a Store from end End of a block to its
                   Start... */
-               bl_info = get_irn_link(block);
+               bl_info = (block_info_t*)get_irn_link(block);
                if (bl_info->flags & BLOCK_HAS_EXC)
                        return 0;
                if (block == phi_block || ! block_postdominates(phi_block, block))
@@ -1555,13 +1416,10 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
           memory Proj.
         */
        for (i = n - 1; i >= 0; --i) {
-               ir_node *store;
-
                projMs[i] = get_Phi_pred(phi, i);
-               assert(is_Proj(projMs[i]));
 
-               store = get_Proj_pred(projMs[i]);
-               info  = get_irn_link(store);
+               ir_node *const store = get_Proj_pred(projMs[i]);
+               info  = (ldst_info_t*)get_irn_link(store);
 
                inM[i] = get_Store_mem(store);
                inD[i] = get_Store_value(store);
@@ -1570,16 +1428,16 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
        block = get_nodes_block(phi);
 
        /* second step: create a new memory Phi */
-       phiM = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inM, mode_M);
+       phiM = new_rd_Phi(get_irn_dbg_info(phi), block, n, inM, mode_M);
 
        /* third step: create a new data Phi */
-       phiD = new_rd_Phi(get_irn_dbg_info(phi), current_ir_graph, block, n, inD, mode);
+       phiD = new_rd_Phi(get_irn_dbg_info(phi), block, n, inD, mode);
 
        /* rewire memory and kill the node */
        for (i = n - 1; i >= 0; --i) {
                ir_node *proj  = projMs[i];
 
-               if(is_Proj(proj)) {
+               if (is_Proj(proj)) {
                        ir_node *store = get_Proj_pred(proj);
                        exchange(proj, inM[i]);
                        kill_node(store);
@@ -1587,19 +1445,19 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
        }
 
        /* fourth step: create the Store */
-       store = new_rd_Store(db, current_ir_graph, block, phiM, ptr, phiD);
+       store = new_rd_Store(db, block, phiM, ptr, phiD, cons_none);
 #ifdef DO_CACHEOPT
        co_set_irn_name(store, co_get_irn_ident(old_store));
 #endif
 
-       projM = new_rd_Proj(NULL, current_ir_graph, block, store, mode_M, pn_Store_M);
+       projM = new_rd_Proj(NULL, store, mode_M, pn_Store_M);
 
        info = get_ldst_info(store, &wenv->obst);
        info->projs[pn_Store_M] = projM;
 
        /* fifths step: repair exception flow */
        if (exc) {
-               ir_node *projX = new_rd_Proj(NULL, current_ir_graph, block, store, mode_X, pn_Store_X_except);
+               ir_node *projX = new_rd_Proj(NULL, store, mode_X, pn_Store_X_except);
 
                info->projs[pn_Store_X_except] = projX;
                info->exc_block                = exc;
@@ -1620,13 +1478,52 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
        exchange(phi, projM);
 
        return res | DF_CHANGED;
-}  /* optimize_phi */
+}
+
+static int optimize_conv_load(ir_node *conv)
+{
+       ir_node *op = get_Conv_op(conv);
+       if (!is_Proj(op))
+               return 0;
+       if (has_multiple_users(op))
+               return 0;
+       /* shrink mode of load if possible. */
+       ir_node *load = get_Proj_pred(op);
+       if (!is_Load(load))
+               return 0;
+
+       /* only do it if we are the only user (otherwise the risk is too
+        * great that we end up with 2 loads instead of one). */
+       ir_mode *mode      = get_irn_mode(conv);
+       ir_mode *load_mode = get_Load_mode(load);
+       int      bits_diff
+               = get_mode_size_bits(load_mode) - get_mode_size_bits(mode);
+       if (mode_is_float(load_mode) || mode_is_float(mode) || bits_diff < 0)
+           return 0;
+
+       if (be_get_backend_param()->byte_order_big_endian) {
+               if (bits_diff % 8 != 0)
+                       return 0;
+               ir_graph *irg   = get_irn_irg(conv);
+               ir_node  *ptr   = get_Load_ptr(load);
+               ir_mode  *mode  = get_irn_mode(ptr);
+               ir_node  *delta = new_r_Const_long(irg, mode, bits_diff/8);
+               ir_node  *block = get_nodes_block(load);
+               ir_node  *add   = new_r_Add(block, ptr, delta, mode);
+               set_Load_ptr(load, add);
+       }
+       set_Load_mode(load, mode);
+       set_irn_mode(op, mode);
+       exchange(conv, op);
+       return DF_CHANGED;
+}
 
 /**
  * walker, do the optimizations
  */
-static void do_load_store_optimize(ir_node *n, void *env) {
-       walk_env_t *wenv = env;
+static void do_load_store_optimize(ir_node *n, void *env)
+{
+       walk_env_t *wenv = (walk_env_t*)env;
 
        switch (get_irn_opcode(n)) {
 
@@ -1642,21 +1539,24 @@ static void do_load_store_optimize(ir_node *n, void *env) {
                wenv->changes |= optimize_phi(n, wenv);
                break;
 
+       case iro_Conv:
+               wenv->changes |= optimize_conv_load(n);
+               break;
+
        default:
-               ;
+               break;
        }
-}  /* do_load_store_optimize */
+}
 
 /** A scc. */
 typedef struct scc {
-       ir_node *head;          /**< the head of the list */
+       ir_node *head;      /**< the head of the list */
 } scc;
 
 /** A node entry. */
 typedef struct node_entry {
        unsigned DFSnum;    /**< the DFS number of this node */
        unsigned low;       /**< the low number of this node */
-       ir_node  *header;   /**< the header of this node */
        int      in_stack;  /**< flag, set if the node is on the stack */
        ir_node  *next;     /**< link to the next node the the same scc */
        scc      *pscc;     /**< the scc of this node */
@@ -1665,29 +1565,30 @@ typedef struct node_entry {
 
 /** A loop entry. */
 typedef struct loop_env {
-       ir_phase ph;           /**< the phase object */
-       ir_node  **stack;      /**< the node stack */
-       int      tos;          /**< tos index */
-       unsigned nextDFSnum;   /**< the current DFS number */
-       unsigned POnum;        /**< current post order number */
-
-       unsigned changes;      /**< a bitmask of graph changes */
+       ir_nodehashmap_t map;
+       struct obstack   obst;
+       ir_node          **stack;      /**< the node stack */
+       size_t           tos;          /**< tos index */
+       unsigned         nextDFSnum;   /**< the current DFS number */
+       unsigned         POnum;        /**< current post order number */
+
+       unsigned         changes;      /**< a bitmask of graph changes */
 } loop_env;
 
 /**
 * Gets the node_entry of a node
 */
-static node_entry *get_irn_ne(ir_node *irn, loop_env *env) {
-       ir_phase   *ph = &env->ph;
-       node_entry *e  = phase_get_irn_data(&env->ph, irn);
+static node_entry *get_irn_ne(ir_node *irn, loop_env *env)
+{
+       node_entry *e = ir_nodehashmap_get(node_entry, &env->map, irn);
 
-       if (! e) {
-               e = phase_alloc(ph, sizeof(*e));
+       if (e == NULL) {
+               e = OALLOC(&env->obst, node_entry);
                memset(e, 0, sizeof(*e));
-               phase_set_irn_data(ph, irn, e);
+               ir_nodehashmap_insert(&env->map, irn, e);
        }
        return e;
-}  /* get_irn_ne */
+}
 
 /**
  * Push a node onto the stack.
@@ -1695,17 +1596,18 @@ static node_entry *get_irn_ne(ir_node *irn, loop_env *env) {
  * @param env   the loop environment
  * @param n     the node to push
  */
-static void push(loop_env *env, ir_node *n) {
+static void push(loop_env *env, ir_node *n)
+{
        node_entry *e;
 
        if (env->tos == ARR_LEN(env->stack)) {
-               int nlen = ARR_LEN(env->stack) * 2;
+               size_t nlen = ARR_LEN(env->stack) * 2;
                ARR_RESIZE(ir_node *, env->stack, nlen);
        }
        env->stack[env->tos++] = n;
        e = get_irn_ne(n, env);
        e->in_stack = 1;
-}  /* push */
+}
 
 /**
  * pop a node from the stack
@@ -1714,13 +1616,14 @@ static void push(loop_env *env, ir_node *n) {
  *
  * @return  The topmost node
  */
-static ir_node *pop(loop_env *env) {
+static ir_node *pop(loop_env *env)
+{
        ir_node *n = env->stack[--env->tos];
        node_entry *e = get_irn_ne(n, env);
 
        e->in_stack = 0;
        return n;
-}  /* pop */
+}
 
 /**
  * Check if irn is a region constant.
@@ -1729,11 +1632,12 @@ static ir_node *pop(loop_env *env) {
  * @param irn           the node to check
  * @param header_block  the header block of the induction variable
  */
-static int is_rc(ir_node *irn, ir_node *header_block) {
+static int is_rc(ir_node *irn, ir_node *header_block)
+{
        ir_node *block = get_nodes_block(irn);
 
        return (block != header_block) && block_dominates(block, header_block);
-}  /* is_rc */
+}
 
 typedef struct phi_entry phi_entry;
 struct phi_entry {
@@ -1743,17 +1647,47 @@ struct phi_entry {
        phi_entry *next;
 };
 
+/**
+ * An entry in the avail set.
+ */
+typedef struct avail_entry_t {
+       ir_node *ptr;   /**< the address pointer */
+       ir_mode *mode;  /**< the load mode */
+       ir_node *load;  /**< the associated Load */
+} avail_entry_t;
+
+/**
+ * Compare two avail entries.
+ */
+static int cmp_avail_entry(const void *elt, const void *key, size_t size)
+{
+       const avail_entry_t *a = (const avail_entry_t*)elt;
+       const avail_entry_t *b = (const avail_entry_t*)key;
+       (void) size;
+
+       return a->ptr != b->ptr || a->mode != b->mode;
+}
+
+/**
+ * Calculate the hash value of an avail entry.
+ */
+static unsigned hash_cache_entry(const avail_entry_t *entry)
+{
+       return get_irn_idx(entry->ptr) * 9 + hash_ptr(entry->mode);
+}
+
 /**
  * Move loops out of loops if possible.
  *
  * @param pscc   the loop described by an SCC
  * @param env    the loop environment
  */
-static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
+static void move_loads_out_of_loops(scc *pscc, loop_env *env)
+{
        ir_node   *phi, *load, *next, *other, *next_other;
-       ir_entity *ent;
        int       j;
        phi_entry *phi_list = NULL;
+       set       *avail;
 
        /* collect all outer memories */
        for (phi = pscc->head; phi != NULL; phi = next) {
@@ -1764,7 +1698,7 @@ static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
                if (! is_Phi(phi))
                        continue;
 
-               assert(get_irn_mode(phi) == mode_M && "DFS geturn non-memory Phi");
+               assert(get_irn_mode(phi) == mode_M && "DFS return non-memory Phi");
 
                for (j = get_irn_arity(phi) - 1; j >= 0; --j) {
                        ir_node    *pred = get_irn_n(phi, j);
@@ -1772,7 +1706,7 @@ static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
 
                        if (pe->pscc != ne->pscc) {
                                /* not in the same SCC, is region const */
-                               phi_entry *pe = phase_alloc(&env->ph, sizeof(*pe));
+                               phi_entry *pe = OALLOC(&env->obst, phi_entry);
 
                                pe->phi  = phi;
                                pe->pos  = j;
@@ -1784,23 +1718,28 @@ static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
        /* no Phis no fun */
        assert(phi_list != NULL && "DFS found a loop without Phi");
 
+       /* for now, we cannot handle more than one input (only reducible cf) */
+       if (phi_list->next != NULL)
+               return;
+
+       avail = new_set(cmp_avail_entry, 8);
+
        for (load = pscc->head; load; load = next) {
                ir_mode *load_mode;
                node_entry *ne = get_irn_ne(load, env);
                next = ne->next;
 
                if (is_Load(load)) {
-                       ldst_info_t *info = get_irn_link(load);
+                       ldst_info_t *info = (ldst_info_t*)get_irn_link(load);
                        ir_node     *ptr = get_Load_ptr(load);
 
                        /* for now, we cannot handle Loads with exceptions */
                        if (info->projs[pn_Load_res] == NULL || info->projs[pn_Load_X_regular] != NULL || info->projs[pn_Load_X_except] != NULL)
                                continue;
 
-                       /* for now, we can only handle Load(Global) */
-                       if (! is_Global(ptr))
+                       /* for now, we can only move Load(Global) */
+                       if (! is_SymConst_addr_ent(ptr))
                                continue;
-                       ent = get_Global_entity(ptr);
                        load_mode = get_Load_mode(load);
                        for (other = pscc->head; other != NULL; other = next_other) {
                                node_entry *ne = get_irn_ne(other, env);
@@ -1808,7 +1747,6 @@ static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
 
                                if (is_Store(other)) {
                                        ir_alias_relation rel = get_alias_relation(
-                                               current_ir_graph,
                                                get_Store_ptr(other),
                                                get_irn_mode(get_Store_value(other)),
                                                ptr, load_mode);
@@ -1816,17 +1754,13 @@ static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
                                        if (rel != ir_no_alias)
                                                break;
                                }
-                               /* only pure Calls are allowed here, so ignore them */
+                               /* only Phis and pure Calls are allowed here, so ignore them */
                        }
                        if (other == NULL) {
-                               ldst_info_t *ninfo;
+                               ldst_info_t *ninfo = NULL;
                                phi_entry   *pe;
                                dbg_info    *db;
 
-                               /* for now, we cannot handle more than one input */
-                               if (phi_list->next != NULL)
-                                       return;
-
                                /* yep, no aliasing Store found, Load can be moved */
                                DB((dbg, LEVEL_1, "  Found a Load that could be moved: %+F\n", load));
 
@@ -1837,16 +1771,31 @@ static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
                                        ir_node *blk  = get_nodes_block(phi);
                                        ir_node *pred = get_Block_cfgpred_block(blk, pos);
                                        ir_node *irn, *mem;
+                                       avail_entry_t entry, *res;
 
-                                       pe->load = irn = new_rd_Load(db, current_ir_graph, pred, get_Phi_pred(phi, pos), ptr, load_mode);
-                                       ninfo = get_ldst_info(irn, phase_obst(&env->ph));
-
-                                       ninfo->projs[pn_Load_M] = mem = new_r_Proj(current_ir_graph, pred, irn, mode_M, pn_Load_M);
-                                       set_Phi_pred(phi, pos, mem);
+                                       entry.ptr  = ptr;
+                                       entry.mode = load_mode;
+                                       res = set_find(avail_entry_t, avail, &entry, sizeof(entry), hash_cache_entry(&entry));
+                                       if (res != NULL) {
+                                               irn = res->load;
+                                       } else {
+                                               irn = new_rd_Load(db, pred, get_Phi_pred(phi, pos), ptr, load_mode, cons_none);
+                                               entry.load = irn;
+                                               (void)set_insert(avail_entry_t, avail, &entry, sizeof(entry), hash_cache_entry(&entry));
+                                               DB((dbg, LEVEL_1, "  Created %+F in %+F\n", irn, pred));
+                                       }
+                                       pe->load = irn;
+                                       ninfo = get_ldst_info(irn, &env->obst);
 
-                                       ninfo->projs[pn_Load_res] = new_r_Proj(current_ir_graph, pred, irn, load_mode, pn_Load_res);
+                                       ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
+                                       if (res == NULL) {
+                                               /* irn is from cache, so do not set phi pred again.
+                                                * There might be other Loads between phi and irn already.
+                                                */
+                                               set_Phi_pred(phi, pos, mem);
+                                       }
 
-                                       DB((dbg, LEVEL_1, "  Created %+F in %+F\n", irn, pred));
+                                       ninfo->projs[pn_Load_res] = new_r_Proj(irn, load_mode, pn_Load_res);
                                }
 
                                /* now kill the old Load */
@@ -1857,7 +1806,8 @@ static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
                        }
                }
        }
-}  /* move_loads_out_of_loops */
+       del_set(avail);
+}
 
 /**
  * Process a loop SCC.
@@ -1865,7 +1815,8 @@ static void move_loads_out_of_loops(scc *pscc, loop_env *env) {
  * @param pscc  the SCC
  * @param env   the loop environment
  */
-static void process_loop(scc *pscc, loop_env *env) {
+static void process_loop(scc *pscc, loop_env *env)
+{
        ir_node *irn, *next, *header = NULL;
        node_entry *b, *h = NULL;
        int j, only_phi, num_outside, process = 0;
@@ -1879,13 +1830,12 @@ static void process_loop(scc *pscc, loop_env *env) {
                next = e->next;
                b = get_irn_ne(block, env);
 
-               if (header) {
+               if (header != NULL) {
                        if (h->POnum < b->POnum) {
                                header = block;
                                h      = b;
                        }
-               }
-               else {
+               } else {
                        header = block;
                        h      = b;
                }
@@ -1940,10 +1890,12 @@ static void process_loop(scc *pscc, loop_env *env) {
                                                /* not a memory loop */
                                                goto fail;
                                        }
-                                       if (! out_rc) {
+                                       if (out_rc == NULL) {
+                                               /* first region constant */
                                                out_rc = pred;
                                                ++num_outside;
                                        } else if (out_rc != pred) {
+                                               /* another region constant */
                                                ++num_outside;
                                        }
                                }
@@ -1963,27 +1915,25 @@ static void process_loop(scc *pscc, loop_env *env) {
                for (irn = pscc->head; irn; irn = next) {
                        node_entry *e = get_irn_ne(irn, env);
                        next = e->next;
-                       e->header = NULL;
                        exchange(irn, out_rc);
                }
                env->changes |= DF_CHANGED;
                return;
        }
 
-       /* set the header for every node in this scc */
+#ifdef DEBUG_libfirm
        for (irn = pscc->head; irn; irn = next) {
                node_entry *e = get_irn_ne(irn, env);
-               e->header = header;
                next = e->next;
                DB((dbg, LEVEL_2, " %+F,", irn));
        }
        DB((dbg, LEVEL_2, "\n"));
-
+#endif
        move_loads_out_of_loops(pscc, env);
 
 fail:
        ;
-}  /* process_loop */
+}
 
 /**
  * Process a SCC.
@@ -1991,7 +1941,8 @@ fail:
  * @param pscc  the SCC
  * @param env   the loop environment
  */
-static void process_scc(scc *pscc, loop_env *env) {
+static void process_scc(scc *pscc, loop_env *env)
+{
        ir_node *head = pscc->head;
        node_entry *e = get_irn_ne(head, env);
 
@@ -2015,7 +1966,7 @@ static void process_scc(scc *pscc, loop_env *env) {
                /* this SCC has more than one member */
                process_loop(pscc, env);
        }
-}  /* process_scc */
+}
 
 /**
  * Do Tarjan's SCC algorithm and drive load/store optimization.
@@ -2041,7 +1992,7 @@ static void dfs(ir_node *irn, loop_env *env)
                        ir_node *pred = get_irn_n(irn, i);
                        node_entry *o = get_irn_ne(pred, env);
 
-                       if (irn_not_visited(pred)) {
+                       if (!irn_visited(pred)) {
                                dfs(pred, env);
                                node->low = MIN(node->low, o->low);
                        }
@@ -2049,10 +2000,10 @@ static void dfs(ir_node *irn, loop_env *env)
                                node->low = MIN(o->DFSnum, node->low);
                }
        } else if (is_fragile_op(irn)) {
-               ir_node *pred = get_fragile_op_mem(irn);
+               ir_node *pred = get_memop_mem(irn);
                node_entry *o = get_irn_ne(pred, env);
 
-               if (irn_not_visited(pred)) {
+               if (!irn_visited(pred)) {
                        dfs(pred, env);
                        node->low = MIN(node->low, o->low);
                }
@@ -2062,7 +2013,7 @@ static void dfs(ir_node *irn, loop_env *env)
                ir_node *pred = get_Proj_pred(irn);
                node_entry *o = get_irn_ne(pred, env);
 
-               if (irn_not_visited(pred)) {
+               if (!irn_visited(pred)) {
                        dfs(pred, env);
                        node->low = MIN(node->low, o->low);
                }
@@ -2074,7 +2025,7 @@ static void dfs(ir_node *irn, loop_env *env)
        }
 
        if (node->low == node->DFSnum) {
-               scc *pscc = phase_alloc(&env->ph, sizeof(*pscc));
+               scc *pscc = OALLOC(&env->obst, scc);
                ir_node *x;
 
                pscc->head = NULL;
@@ -2090,7 +2041,7 @@ static void dfs(ir_node *irn, loop_env *env)
 
                process_scc(pscc, env);
        }
-}  /* dfs */
+}
 
 /**
  * Do the DFS on the memory edges a graph.
@@ -2098,12 +2049,11 @@ static void dfs(ir_node *irn, loop_env *env)
  * @param irg  the graph to process
  * @param env  the loop environment
  */
-static void do_dfs(ir_graph *irg, loop_env *env) {
-       ir_graph *rem = current_ir_graph;
+static void do_dfs(ir_graph *irg, loop_env *env)
+{
        ir_node  *endblk, *end;
        int      i;
 
-       current_ir_graph = irg;
        inc_irg_visited(irg);
 
        /* visit all memory nodes */
@@ -2112,13 +2062,15 @@ static void do_dfs(ir_graph *irg, loop_env *env) {
                ir_node *pred = get_Block_cfgpred(endblk, i);
 
                pred = skip_Proj(pred);
-               if (is_Return(pred))
+               if (is_Return(pred)) {
                        dfs(get_Return_mem(pred), env);
-               else if (is_Raise(pred))
+               } else if (is_Raise(pred)) {
                        dfs(get_Raise_mem(pred), env);
-               else if (is_fragile_op(pred))
-                       dfs(get_fragile_op_mem(pred), env);
-               else {
+               } else if (is_fragile_op(pred)) {
+                       dfs(get_memop_mem(pred), env);
+               } else if (is_Bad(pred)) {
+                       /* ignore non-optimized block predecessor */
+               } else {
                        assert(0 && "Unknown EndBlock predecessor");
                }
        }
@@ -2128,28 +2080,18 @@ static void do_dfs(ir_graph *irg, loop_env *env) {
        for (i = get_End_n_keepalives(end) - 1; i >= 0; --i) {
                ir_node *ka = get_End_keepalive(end, i);
 
-               if (is_Phi(ka) && irn_not_visited(ka))
+               if (is_Phi(ka) && !irn_visited(ka))
                        dfs(ka, env);
        }
-       current_ir_graph = rem;
-}  /* do_dfs */
-
-/**
- * Initialize new phase data. We do this always explicit, so return NULL here
- */
-static void *init_loop_data(ir_phase *ph, const ir_node *irn, void *data) {
-       (void)ph;
-       (void)irn;
-       (void)data;
-       return NULL;
-}  /* init_loop_data */
+}
 
 /**
  * Optimize Loads/Stores in loops.
  *
  * @param irg  the graph
  */
-static int optimize_loops(ir_graph *irg) {
+static int optimize_loops(ir_graph *irg)
+{
        loop_env env;
 
        env.stack         = NEW_ARR_F(ir_node *, 128);
@@ -2157,39 +2099,36 @@ static int optimize_loops(ir_graph *irg) {
        env.nextDFSnum    = 0;
        env.POnum         = 0;
        env.changes       = 0;
-       phase_init(&env.ph, "ldstopt", irg, PHASE_DEFAULT_GROWTH, init_loop_data, NULL);
+       ir_nodehashmap_init(&env.map);
+       obstack_init(&env.obst);
 
        /* calculate the SCC's and drive loop optimization. */
        do_dfs(irg, &env);
 
        DEL_ARR_F(env.stack);
-       phase_free(&env.ph);
+       obstack_free(&env.obst, NULL);
+       ir_nodehashmap_destroy(&env.map);
 
        return env.changes;
-}  /* optimize_loops */
+}
 
-/*
- * do the load store optimization
- */
-void optimize_load_store(ir_graph *irg) {
+void optimize_load_store(ir_graph *irg)
+{
        walk_env_t env;
 
+       assure_irg_properties(irg,
+               IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE
+               | IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES
+               | IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES
+               | IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
+               | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
+
        FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
 
-       assert(get_irg_phase_state(irg) != phase_building);
        assert(get_irg_pinned(irg) != op_pin_state_floats &&
                "LoadStore optimization needs pinned graph");
 
-       /* we need landing pads */
-       remove_critical_cf_edges(irg);
-
-       edges_assure(irg);
-
-       /* for Phi optimization post-dominators are needed ... */
-       assure_postdoms(irg);
-
        if (get_opt_alias_analysis()) {
-               assure_irg_entity_usage_computed(irg);
                assure_irp_globals_entity_usage_computed();
        }
 
@@ -2207,15 +2146,15 @@ void optimize_load_store(ir_graph *irg) {
 
        obstack_free(&env.obst, NULL);
 
-       /* Handle graph state */
-       if (env.changes) {
-               set_irg_outs_inconsistent(irg);
-               set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
-       }
+       confirm_irg_properties(irg,
+               env.changes
+               ? env.changes & CF_CHANGED
+                       ? IR_GRAPH_PROPERTIES_NONE
+                       : IR_GRAPH_PROPERTIES_CONTROL_FLOW
+               : IR_GRAPH_PROPERTIES_ALL);
+}
 
-       if (env.changes & CF_CHANGED) {
-               /* is this really needed: Yes, control flow changed, block might
-               have Bad() predecessors. */
-               set_irg_doms_inconsistent(irg);
-       }
-}  /* optimize_load_store */
+ir_graph_pass_t *optimize_load_store_pass(const char *name)
+{
+       return def_graph_pass(name ? name : "ldst", optimize_load_store);
+}