trverify: cleanup, check irg.entity == entity.irg
[libfirm] / ir / opt / ldstopt.c
index f8e2b86..a396787 100644 (file)
@@ -21,7 +21,6 @@
  * @file
  * @brief   Load/Store optimizations.
  * @author  Michael Beck
- * @version $Id$
  */
 #include "config.h"
 
@@ -35,6 +34,7 @@
 #include "ircons_t.h"
 #include "irgmod.h"
 #include "irgwalk.h"
+#include "irtools.h"
 #include "tv_t.h"
 #include "dbginfo_t.h"
 #include "iropt_dbg.h"
 #include "irhooks.h"
 #include "iredges.h"
 #include "irpass.h"
-#include "opt_polymorphy.h"
 #include "irmemory.h"
-#include "irphase_t.h"
+#include "irnodehashmap.h"
 #include "irgopt.h"
 #include "set.h"
 #include "be.h"
 #include "debug.h"
+#include "opt_manage.h"
 
 /** The debug handle. */
 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
@@ -74,7 +74,7 @@ typedef struct walk_env_t {
 
 /** A Load/Store info. */
 typedef struct ldst_info_t {
-       ir_node  *projs[MAX_PROJ];    /**< list of Proj's of this node */
+       ir_node  *projs[MAX_PROJ+1];  /**< list of Proj's of this node */
        ir_node  *exc_block;          /**< the exception block if available */
        int      exc_idx;             /**< predecessor index in the exception block */
        unsigned visited;             /**< visited counter for breaking loops */
@@ -215,7 +215,7 @@ static void collect_nodes(ir_node *node, void *env)
 
                        if (is_Proj(proj)) {
                                pred   = get_Proj_pred(proj);
-                               is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
+                               is_exc = is_x_except_Proj(proj);
                        }
 
                        /* ignore Bad predecessors, they will be removed later */
@@ -346,11 +346,11 @@ static long get_Sel_array_index_long(ir_node *n, int dim)
  * @param depth  current depth in steps upward from the root
  *               of the address
  */
-static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth)
+static compound_graph_path *rec_get_accessed_path(ir_node *ptr, size_t depth)
 {
        compound_graph_path *res = NULL;
        ir_entity           *root, *field, *ent;
-       int                 path_len, pos, idx;
+       size_t              path_len, pos, idx;
        ir_tarval           *tv;
        ir_type             *tp;
 
@@ -505,7 +505,7 @@ static compound_graph_path *get_accessed_path(ir_node *ptr)
 typedef struct path_entry {
        ir_entity         *ent;
        struct path_entry *next;
-       long              index;
+       size_t            index;
 } path_entry;
 
 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
@@ -515,7 +515,7 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
        ir_initializer_t *initializer;
        ir_tarval        *tv;
        ir_type          *tp;
-       unsigned         n;
+       size_t           n;
 
        entry.next = next;
        if (is_SymConst(ptr)) {
@@ -538,7 +538,7 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
                                        continue;
                                }
                        }
-                       if (p->index >= (int) n)
+                       if (p->index >= n)
                                return NULL;
                        initializer = get_initializer_compound_value(initializer, p->index);
 
@@ -571,7 +571,7 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
                        assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
                        entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
                } else {
-                       int i, n_members = get_compound_n_members(tp);
+                       size_t i, n_members = get_compound_n_members(tp);
                        for (i = 0; i < n_members; ++i) {
                                if (get_compound_member(tp, i) == field)
                                        break;
@@ -746,20 +746,29 @@ static void reduce_adr_usage(ir_node *ptr)
  */
 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode)
 {
+       unsigned old_size;
+       unsigned new_size;
        if (old_mode == new_mode)
-               return 1;
+               return true;
+
+       old_size = get_mode_size_bits(old_mode);
+       new_size = get_mode_size_bits(new_mode);
 
        /* if both modes are two-complement ones, we can always convert the
-          Stored value into the needed one. */
-       if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
+          Stored value into the needed one. (on big endian machines we currently
+          only support this for modes of same size) */
+       if (old_size >= new_size &&
                  get_mode_arithmetic(old_mode) == irma_twos_complement &&
-                 get_mode_arithmetic(new_mode) == irma_twos_complement)
-               return 1;
-       return 0;
-}  /* can_use_stored_value */
+                 get_mode_arithmetic(new_mode) == irma_twos_complement &&
+                 (!be_get_backend_param()->byte_order_big_endian
+               || old_size == new_size)) {
+               return true;
+       }
+       return false;
+}
 
 /**
- * Check whether a Call is at least pure, ie. does only read memory.
+ * Check whether a Call is at least pure, i.e. does only read memory.
  */
 static unsigned is_Call_pure(ir_node *call)
 {
@@ -771,8 +780,8 @@ static unsigned is_Call_pure(ir_node *call)
                /* try the called entity */
                ir_node *ptr = get_Call_ptr(call);
 
-               if (is_Global(ptr)) {
-                       ir_entity *ent = get_Global_entity(ptr);
+               if (is_SymConst_addr_ent(ptr)) {
+                       ir_entity *ent = get_SymConst_entity(ptr);
 
                        prop = get_entity_additional_properties(ent);
                }
@@ -867,7 +876,10 @@ static int try_load_after_store(ir_node *load,
        store_value    = get_Store_value(store);
 
        if (delta != 0 || store_mode != load_mode) {
-               if (delta < 0 || delta + load_mode_len > store_mode_len)
+               /* TODO: implement for big-endian */
+               if (delta < 0 || delta + load_mode_len > store_mode_len
+                               || (be_get_backend_param()->byte_order_big_endian
+                                   && load_mode_len != store_mode_len))
                        return 0;
 
                if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
@@ -880,7 +892,6 @@ static int try_load_after_store(ir_node *load,
                        ir_node *cnst;
                        ir_graph *irg = get_irn_irg(load);
 
-                       /* FIXME: only true for little endian */
                        cnst        = new_r_Const_long(irg, mode_Iu, delta * 8);
                        store_value = new_r_Shr(get_nodes_block(load),
                                                                        store_value, cnst, store_mode);
@@ -902,7 +913,7 @@ static int try_load_after_store(ir_node *load,
        /* no exception */
        if (info->projs[pn_Load_X_except]) {
                ir_graph *irg = get_irn_irg(load);
-               exchange( info->projs[pn_Load_X_except], new_r_Bad(irg));
+               exchange( info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
                res |= CF_CHANGED;
        }
        if (info->projs[pn_Load_X_regular]) {
@@ -976,6 +987,9 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
                         * Here, there is no need to check if the previous Load has an
                         * exception hander because they would have exact the same
                         * exception...
+                        *
+                        * TODO: implement load-after-load with different mode for big
+                        *       endian
                         */
                        if (info->projs[pn_Load_X_except] == NULL
                                        || get_nodes_block(load) == get_nodes_block(pred)) {
@@ -1005,7 +1019,7 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
                                /* no exception */
                                if (info->projs[pn_Load_X_except]) {
                                        ir_graph *irg = get_irn_irg(load);
-                                       exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
+                                       exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
                                        res |= CF_CHANGED;
                                }
                                if (info->projs[pn_Load_X_regular]) {
@@ -1082,8 +1096,9 @@ ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
                if (is_reinterpret_cast(c_mode, l_mode)) {
                        /* copy the value from the const code irg and cast it */
                        res = new_rd_Conv(dbgi, block, res, l_mode);
+               } else {
+                       return NULL;
                }
-               return NULL;
        }
        return res;
 }
@@ -1126,50 +1141,57 @@ static unsigned optimize_load(ir_node *load)
                return res | DF_CHANGED;
        }
 
-       /* Load from a constant polymorphic field, where we can resolve
-          polymorphism. */
-       value = transform_polymorph_Load(load);
-       if (value == load) {
-               value = NULL;
-               /* check if we can determine the entity that will be loaded */
-               ent = find_constant_entity(ptr);
-               if (ent != NULL
-                               && get_entity_visibility(ent) != ir_visibility_external) {
-                       /* a static allocation that is not external: there should be NO
-                        * exception when loading even if we cannot replace the load itself.
-                        */
+       value = NULL;
+       /* check if we can determine the entity that will be loaded */
+       ent = find_constant_entity(ptr);
+       if (ent != NULL
+                       && get_entity_visibility(ent) != ir_visibility_external) {
+               /* a static allocation that is not external: there should be NO
+                * exception when loading even if we cannot replace the load itself.
+                */
 
-                       /* no exception, clear the info field as it might be checked later again */
-                       if (info->projs[pn_Load_X_except]) {
-                               ir_graph *irg = get_irn_irg(load);
-                               exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
-                               info->projs[pn_Load_X_except] = NULL;
-                               res |= CF_CHANGED;
-                       }
-                       if (info->projs[pn_Load_X_regular]) {
-                               exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
-                               info->projs[pn_Load_X_regular] = NULL;
-                               res |= CF_CHANGED;
-                       }
+               /* no exception, clear the info field as it might be checked later again */
+               if (info->projs[pn_Load_X_except]) {
+                       ir_graph *irg = get_irn_irg(load);
+                       exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
+                       info->projs[pn_Load_X_except] = NULL;
+                       res |= CF_CHANGED;
+               }
+               if (info->projs[pn_Load_X_regular]) {
+                       exchange(info->projs[pn_Load_X_regular], new_r_Jmp(get_nodes_block(load)));
+                       info->projs[pn_Load_X_regular] = NULL;
+                       res |= CF_CHANGED;
+               }
 
-                       if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
-                               if (ent->initializer != NULL) {
-                                       /* new style initializer */
-                                       value = find_compound_ent_value(ptr);
-                               } else if (entity_has_compound_ent_values(ent)) {
-                                       /* old style initializer */
-                                       compound_graph_path *path = get_accessed_path(ptr);
+               if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
+                       if (has_entity_initializer(ent)) {
+                               /* new style initializer */
+                               value = find_compound_ent_value(ptr);
+                       } else if (entity_has_compound_ent_values(ent)) {
+                               /* old style initializer */
+                               compound_graph_path *path = get_accessed_path(ptr);
 
-                                       if (path != NULL) {
-                                               assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
+                               if (path != NULL) {
+                                       assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
 
-                                               value = get_compound_ent_value_by_path(ent, path);
-                                               DB((dbg, LEVEL_1, "  Constant access at %F%F resulted in %+F\n", ent, path, value));
-                                               free_compound_graph_path(path);
-                                       }
+                                       value = get_compound_ent_value_by_path(ent, path);
+                                       DB((dbg, LEVEL_1, "  Constant access at %F%F resulted in %+F\n", ent, path, value));
+                                       free_compound_graph_path(path);
+                               }
+                       }
+                       if (value != NULL) {
+                               ir_graph *irg = get_irn_irg(load);
+                               value = can_replace_load_by_const(load, value);
+                               if (value != NULL && is_Sel(ptr) &&
+                                               !is_irg_state(irg, IR_GRAPH_STATE_IMPLICIT_BITFIELD_MASKING)) {
+                                       /* frontend has inserted masking operations after bitfield accesses,
+                                        * so we might have to shift the const. */
+                                       unsigned char bit_offset = get_entity_offset_bits_remainder(get_Sel_entity(ptr));
+                                       ir_tarval *tv_old = get_Const_tarval(value);
+                                       ir_tarval *tv_offset = new_tarval_from_long(bit_offset, mode_Bu);
+                                       ir_tarval *tv_new = tarval_shl(tv_old, tv_offset);
+                                       value = new_r_Const(irg, tv_new);
                                }
-                               if (value != NULL)
-                                       value = can_replace_load_by_const(load, value);
                        }
                }
        }
@@ -1177,7 +1199,7 @@ static unsigned optimize_load(ir_node *load)
                /* we completely replace the load by this value */
                if (info->projs[pn_Load_X_except]) {
                        ir_graph *irg = get_irn_irg(load);
-                       exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
+                       exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
                        info->projs[pn_Load_X_except] = NULL;
                        res |= CF_CHANGED;
                }
@@ -1466,7 +1488,10 @@ static unsigned optimize_store(ir_node *store)
 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
 {
        int i, n;
-       ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
+       ir_node *store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
+#ifdef DO_CACHEOPT
+       ir_node *old_store;
+#endif
        ir_mode *mode;
        ir_node **inM, **inD, **projMs;
        int *idx;
@@ -1489,16 +1514,14 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
                return 0;
 
        store = skip_Proj(projM);
+#ifdef DO_CACHEOPT
        old_store = store;
+#endif
        if (!is_Store(store))
                return 0;
 
        block = get_nodes_block(store);
 
-       /* abort on dead blocks */
-       if (is_Block_dead(block))
-               return 0;
-
        /* check if the block is post dominated by Phi-block
           and has no exception exit */
        bl_info = (block_info_t*)get_irn_link(block);
@@ -1534,10 +1557,7 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
                if (exc != info->exc_block)
                        return 0;
 
-               /* abort on dead blocks */
                block = get_nodes_block(pred);
-               if (is_Block_dead(block))
-                       return 0;
 
                /* check if the block is post dominated by Phi-block
                   and has no exception exit. Note that block must be different from
@@ -1666,7 +1686,7 @@ static void do_load_store_optimize(ir_node *n, void *env)
                break;
 
        default:
-               ;
+               break;
        }
 }  /* do_load_store_optimize */
 
@@ -1687,13 +1707,14 @@ typedef struct node_entry {
 
 /** A loop entry. */
 typedef struct loop_env {
-       ir_phase ph;           /**< the phase object */
-       ir_node  **stack;      /**< the node stack */
-       size_t   tos;          /**< tos index */
-       unsigned nextDFSnum;   /**< the current DFS number */
-       unsigned POnum;        /**< current post order number */
-
-       unsigned changes;      /**< a bitmask of graph changes */
+       ir_nodehashmap_t map;
+       struct obstack   obst;
+       ir_node          **stack;      /**< the node stack */
+       size_t           tos;          /**< tos index */
+       unsigned         nextDFSnum;   /**< the current DFS number */
+       unsigned         POnum;        /**< current post order number */
+
+       unsigned         changes;      /**< a bitmask of graph changes */
 } loop_env;
 
 /**
@@ -1701,13 +1722,12 @@ typedef struct loop_env {
 */
 static node_entry *get_irn_ne(ir_node *irn, loop_env *env)
 {
-       ir_phase   *ph = &env->ph;
-       node_entry *e  = (node_entry*)phase_get_irn_data(&env->ph, irn);
+       node_entry *e = (node_entry*)ir_nodehashmap_get(&env->map, irn);
 
-       if (! e) {
-               e = (node_entry*)phase_alloc(ph, sizeof(*e));
+       if (e == NULL) {
+               e = OALLOC(&env->obst, node_entry);
                memset(e, 0, sizeof(*e));
-               phase_set_irn_data(ph, irn, e);
+               ir_nodehashmap_insert(&env->map, irn, e);
        }
        return e;
 }  /* get_irn_ne */
@@ -1795,7 +1815,7 @@ static int cmp_avail_entry(const void *elt, const void *key, size_t size)
  */
 static unsigned hash_cache_entry(const avail_entry_t *entry)
 {
-       return get_irn_idx(entry->ptr) * 9 + HASH_PTR(entry->mode);
+       return get_irn_idx(entry->ptr) * 9 + hash_ptr(entry->mode);
 }  /* hash_cache_entry */
 
 /**
@@ -1807,13 +1827,10 @@ static unsigned hash_cache_entry(const avail_entry_t *entry)
 static void move_loads_out_of_loops(scc *pscc, loop_env *env)
 {
        ir_node   *phi, *load, *next, *other, *next_other;
-       ir_entity *ent;
        int       j;
        phi_entry *phi_list = NULL;
        set       *avail;
 
-       avail = new_set(cmp_avail_entry, 8);
-
        /* collect all outer memories */
        for (phi = pscc->head; phi != NULL; phi = next) {
                node_entry *ne = get_irn_ne(phi, env);
@@ -1831,7 +1848,7 @@ static void move_loads_out_of_loops(scc *pscc, loop_env *env)
 
                        if (pe->pscc != ne->pscc) {
                                /* not in the same SCC, is region const */
-                               phi_entry *pe = (phi_entry*)phase_alloc(&env->ph, sizeof(*pe));
+                               phi_entry *pe = OALLOC(&env->obst, phi_entry);
 
                                pe->phi  = phi;
                                pe->pos  = j;
@@ -1847,6 +1864,8 @@ static void move_loads_out_of_loops(scc *pscc, loop_env *env)
        if (phi_list->next != NULL)
                return;
 
+       avail = new_set(cmp_avail_entry, 8);
+
        for (load = pscc->head; load; load = next) {
                ir_mode *load_mode;
                node_entry *ne = get_irn_ne(load, env);
@@ -1861,9 +1880,8 @@ static void move_loads_out_of_loops(scc *pscc, loop_env *env)
                                continue;
 
                        /* for now, we can only move Load(Global) */
-                       if (! is_Global(ptr))
+                       if (! is_SymConst_addr_ent(ptr))
                                continue;
-                       ent       = get_Global_entity(ptr);
                        load_mode = get_Load_mode(load);
                        for (other = pscc->head; other != NULL; other = next_other) {
                                node_entry *ne = get_irn_ne(other, env);
@@ -1909,10 +1927,15 @@ static void move_loads_out_of_loops(scc *pscc, loop_env *env)
                                                DB((dbg, LEVEL_1, "  Created %+F in %+F\n", irn, pred));
                                        }
                                        pe->load = irn;
-                                       ninfo = get_ldst_info(irn, phase_obst(&env->ph));
+                                       ninfo = get_ldst_info(irn, &env->obst);
 
                                        ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
-                                       set_Phi_pred(phi, pos, mem);
+                                       if (res == NULL) {
+                                               /* irn is from cache, so do not set phi pred again.
+                                                * There might be other Loads between phi and irn already.
+                                                */
+                                               set_Phi_pred(phi, pos, mem);
+                                       }
 
                                        ninfo->projs[pn_Load_res] = new_r_Proj(irn, load_mode, pn_Load_res);
                                }
@@ -2119,7 +2142,7 @@ static void dfs(ir_node *irn, loop_env *env)
                                node->low = MIN(o->DFSnum, node->low);
                }
        } else if (is_fragile_op(irn)) {
-               ir_node *pred = get_fragile_op_mem(irn);
+               ir_node *pred = get_memop_mem(irn);
                node_entry *o = get_irn_ne(pred, env);
 
                if (!irn_visited(pred)) {
@@ -2144,7 +2167,7 @@ static void dfs(ir_node *irn, loop_env *env)
        }
 
        if (node->low == node->DFSnum) {
-               scc *pscc = (scc*)phase_alloc(&env->ph, sizeof(*pscc));
+               scc *pscc = OALLOC(&env->obst, scc);
                ir_node *x;
 
                pscc->head = NULL;
@@ -2181,15 +2204,15 @@ static void do_dfs(ir_graph *irg, loop_env *env)
                ir_node *pred = get_Block_cfgpred(endblk, i);
 
                pred = skip_Proj(pred);
-               if (is_Return(pred))
+               if (is_Return(pred)) {
                        dfs(get_Return_mem(pred), env);
-               else if (is_Raise(pred))
+               } else if (is_Raise(pred)) {
                        dfs(get_Raise_mem(pred), env);
-               else if (is_fragile_op(pred))
-                       dfs(get_fragile_op_mem(pred), env);
-               else if (is_Bad(pred))
-                       /* ignore non-optimized block predecessor */;
-               else {
+               } else if (is_fragile_op(pred)) {
+                       dfs(get_memop_mem(pred), env);
+               } else if (is_Bad(pred)) {
+                       /* ignore non-optimized block predecessor */
+               else {
                        assert(0 && "Unknown EndBlock predecessor");
                }
        }
@@ -2218,13 +2241,15 @@ static int optimize_loops(ir_graph *irg)
        env.nextDFSnum    = 0;
        env.POnum         = 0;
        env.changes       = 0;
-       phase_init(&env.ph, irg, phase_irn_init_default);
+       ir_nodehashmap_init(&env.map);
+       obstack_init(&env.obst);
 
        /* calculate the SCC's and drive loop optimization. */
        do_dfs(irg, &env);
 
        DEL_ARR_F(env.stack);
-       phase_deinit(&env.ph);
+       obstack_free(&env.obst, NULL);
+       ir_nodehashmap_destroy(&env.map);
 
        return env.changes;
 }  /* optimize_loops */
@@ -2232,9 +2257,10 @@ static int optimize_loops(ir_graph *irg)
 /*
  * do the load store optimization
  */
-int optimize_load_store(ir_graph *irg)
+static ir_graph_state_t do_loadstore_opt(ir_graph *irg)
 {
        walk_env_t env;
+       ir_graph_state_t res = 0;
 
        FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
 
@@ -2242,16 +2268,7 @@ int optimize_load_store(ir_graph *irg)
        assert(get_irg_pinned(irg) != op_pin_state_floats &&
                "LoadStore optimization needs pinned graph");
 
-       /* we need landing pads */
-       remove_critical_cf_edges(irg);
-
-       edges_assure(irg);
-
-       /* for Phi optimization post-dominators are needed ... */
-       assure_postdoms(irg);
-
        if (get_opt_alias_analysis()) {
-               assure_irg_entity_usage_computed(irg);
                assure_irp_globals_entity_usage_computed();
        }
 
@@ -2271,17 +2288,27 @@ int optimize_load_store(ir_graph *irg)
 
        /* Handle graph state */
        if (env.changes) {
-               set_irg_outs_inconsistent(irg);
-               set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
+               edges_deactivate(irg);
        }
 
-       if (env.changes & CF_CHANGED) {
-               /* is this really needed: Yes, control flow changed, block might
-               have Bad() predecessors. */
-               set_irg_doms_inconsistent(irg);
+       if (!(env.changes & CF_CHANGED)) {
+               res |= IR_GRAPH_STATE_CONSISTENT_DOMINANCE | IR_GRAPH_STATE_NO_BADS;
        }
-       return env.changes != 0;
-}  /* optimize_load_store */
+
+       return res;
+}
+
+static optdesc_t opt_loadstore = {
+       "load-store",
+       IR_GRAPH_STATE_NO_UNREACHABLE_CODE | IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_NO_CRITICAL_EDGES | IR_GRAPH_STATE_CONSISTENT_DOMINANCE | IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE,
+       do_loadstore_opt,
+};
+
+int optimize_load_store(ir_graph *irg)
+{
+       perform_irg_optimization(irg, &opt_loadstore);
+       return 1;
+}
 
 ir_graph_pass_t *optimize_load_store_pass(const char *name)
 {