remove Cast node
[libfirm] / ir / opt / opt_ldst.c
index b704abe..1bbdf2e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
  *
  * This file is part of libFirm.
  *
@@ -22,7 +22,6 @@
  * @brief   Dataflow driven Load/Store optimizations, uses some ideas from
  *          VanDrunen's LEPRE
  * @author  Michael Beck
- * @version $Id$
  */
 #include "config.h"
 
 #include "irgopt.h"
 #include "iropt.h"
 #include "iroptimize.h"
-#include "irnodemap.h"
+#include "irnodehashmap.h"
 #include "raw_bitset.h"
 #include "debug.h"
 #include "error.h"
 #include "irpass.h"
 
 /* maximum number of output Proj's */
-#define MAX_PROJ (pn_Load_max > pn_Store_max ? pn_Load_max : pn_Store_max)
+#define MAX_PROJ ((long)pn_Load_max > (long)pn_Store_max ? (long)pn_Load_max : (long)pn_Store_max)
 
 /**
  * Mapping an address to an dense ID.
@@ -90,7 +89,7 @@ struct memop_t {
        memop_t  *next;      /**< links to the next memory op in the block in forward order. */
        memop_t  *prev;      /**< links to the previous memory op in the block in forward order. */
        unsigned flags;      /**< memop flags */
-       ir_node  *projs[MAX_PROJ]; /**< Projs of this memory op */
+       ir_node  *projs[MAX_PROJ+1]; /**< Projs of this memory op */
 };
 
 /**
@@ -115,8 +114,8 @@ struct block_t {
  * Metadata for this pass.
  */
 typedef struct ldst_env_t {
-       struct obstack  obst;              /**< obstack for temporary data */
-       ir_nodemap_t    adr_map;           /**< Map addresses to */
+       struct obstack   obst;             /**< obstack for temporary data */
+       ir_nodehashmap_t adr_map;          /**< Map addresses to */
        block_t         *forward;          /**< Inverse post-order list of all blocks Start->End */
        block_t         *backward;         /**< Inverse post-order list of all blocks End->Start */
        ir_node         *start_bl;         /**< start block of the current graph */
@@ -125,7 +124,7 @@ typedef struct ldst_env_t {
        memop_t         **curr_id_2_memop; /**< current map of address ids to memops */
        unsigned        curr_adr_id;       /**< number for address mapping */
        unsigned        n_mem_ops;         /**< number of memory operations (Loads/Stores) */
-       unsigned        rbs_size;          /**< size of all bitsets in bytes */
+       size_t          rbs_size;          /**< size of all bitsets in bytes */
        int             max_cfg_preds;     /**< maximum number of block cfg predecessors */
        int             changed;           /**< Flags for changed graph state */
 #ifdef DEBUG_libfirm
@@ -170,7 +169,7 @@ static void dump_block_list(ldst_env *env)
                }
                DB((dbg, LEVEL_2, "\n}\n\n"));
        }
-}  /* dump_block_list */
+}
 
 /**
  * Dumps the current set.
@@ -180,9 +179,9 @@ static void dump_block_list(ldst_env *env)
  */
 static void dump_curr(block_t *bl, const char *s)
 {
-       unsigned end = env.rbs_size - 1;
-       unsigned pos;
-       int      i;
+       size_t end = env.rbs_size - 1;
+       size_t pos;
+       int    i;
 
        DB((dbg, LEVEL_2, "%s[%+F] = {", s, bl->block));
        i = 0;
@@ -197,7 +196,7 @@ static void dump_curr(block_t *bl, const char *s)
                i = (i + 1) & 3;
        }
        DB((dbg, LEVEL_2, "\n}\n"));
-}  /* dump_curr */
+}
 
 #else
 static void dump_block_list(ldst_env *env)
@@ -216,15 +215,15 @@ static block_t *get_block_entry(const ir_node *block)
 {
        assert(is_Block(block));
 
-       return get_irn_link(block);
-}  /* get_block_entry */
+       return (block_t*)get_irn_link(block);
+}
 
 /** Get the memop entry for a memory operation node */
 static memop_t *get_irn_memop(const ir_node *irn)
 {
        assert(! is_Block(irn));
-       return get_irn_link(irn);
-}  /* get_irn_memop */
+       return (memop_t*)get_irn_link(irn);
+}
 
 /**
  * Walk over the memory edges from definition to users.
@@ -237,7 +236,6 @@ static memop_t *get_irn_memop(const ir_node *irn)
  */
 static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, void *ctx)
 {
-       int     i;
        ir_mode *mode;
 
        mark_irn_visited(irn);
@@ -248,7 +246,7 @@ static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, v
        mode = get_irn_mode(irn);
        if (mode == mode_M) {
                /* every successor uses memory */
-               for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
+               for (unsigned i = get_irn_n_outs(irn); i-- > 0; ) {
                        ir_node *succ = get_irn_out(irn, i);
 
                        if (! irn_visited(succ))
@@ -256,7 +254,7 @@ static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, v
                }
        } else if (mode == mode_T) {
                /* only some Proj's uses memory */
-               for (i = get_irn_n_outs(irn) - 1; i >= 0; --i) {
+               for (unsigned i = get_irn_n_outs(irn); i-- > 0; ) {
                        ir_node *proj = get_irn_out(irn, i);
 
                        if (get_irn_mode(proj) == mode_M && ! irn_visited(proj))
@@ -265,7 +263,7 @@ static void walk_memory(ir_node *irn, irg_walk_func *pre, irg_walk_func *post, v
        }
        if (post)
                post(irn, ctx);
-}  /* walk_memory */
+}
 
 /**
  * Walks over all memory nodes of a graph.
@@ -288,7 +286,7 @@ static void walk_memory_irg(ir_graph *irg, irg_walk_func pre, irg_walk_func post
        walk_memory(get_irg_initial_mem(irg), pre, post, ctx);
 
        ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
-}  /* walk_memory_irg */
+}
 
 /**
  * Register an address and allocate a (sparse, 0..n) ID for it.
@@ -301,25 +299,21 @@ static unsigned register_address(ir_node *adr)
 {
        address_entry *entry;
 
-       /* skip Confirms and Casts */
+       /* skip Confirms */
 restart:
        if (is_Confirm(adr)) {
                adr = get_Confirm_value(adr);
                goto restart;
        }
-       if (is_Cast(adr)) {
-               adr = get_Cast_op(adr);
-               goto restart;
-       }
 
-       entry = ir_nodemap_get(&env.adr_map, adr);
+       entry = ir_nodehashmap_get(address_entry, &env.adr_map, adr);
 
        if (entry == NULL) {
                /* new address */
                entry = OALLOC(&env.obst, address_entry);
 
                entry->id = env.curr_adr_id++;
-               ir_nodemap_insert(&env.adr_map, adr, entry);
+               ir_nodehashmap_insert(&env.adr_map, adr, entry);
 
                DB((dbg, LEVEL_3, "ADDRESS %+F has ID %u\n", adr, entry->id));
 #ifdef DEBUG_libfirm
@@ -327,7 +321,7 @@ restart:
 #endif
        }
        return entry->id;
-}  /* register_address */
+}
 
 
 /**
@@ -343,7 +337,7 @@ static ir_node *phi_translate(ir_node *address, const ir_node *block, int pos)
        if (is_Phi(address) && get_nodes_block(address) == block)
                address = get_Phi_pred(address, pos);
        return address;
-}  /* phi_translate */
+}
 
 /**
  * Walker: allocate an block entry for every block
@@ -390,7 +384,7 @@ static void prepare_blocks(ir_node *irn, void *ctx)
                        (void)register_address(irn);
                }
        }
-}  /* prepare_blocks */
+}
 
 /**
  * Post-Walker, link in all Phi's
@@ -403,7 +397,7 @@ static void link_phis(ir_node *irn, void *ctx)
                ir_node *block = get_nodes_block(irn);
                add_Block_phi(block, irn);
        }
-}  /* link_phis */
+}
 
 /**
  * Block walker: creates the inverse post-order list for the CFG.
@@ -424,7 +418,7 @@ static void inverse_post_order(ir_node *block, void *ctx)
        /* remember the first visited (last in list) entry, needed for later */
        if (env.backward == NULL)
                env.backward = entry;
-}  /* inverse_post_order */
+}
 
 /**
  * Block walker: create backward links for the memops of a block.
@@ -455,7 +449,7 @@ static void collect_backward(ir_node *block, void *ctx)
                last     = op;
        }
        entry->memop_backward = last;
-}  /* collect_backward */
+}
 
 /**
  * Allocate a memop.
@@ -484,7 +478,7 @@ static memop_t *alloc_memop(ir_node *irn)
        if (irn != NULL)
                set_irn_link(irn, m);
        return m;
-}  /* alloc_memop */
+}
 
 /**
  * Create a memop for a Phi-replacement.
@@ -506,7 +500,7 @@ static memop_t *clone_memop_phi(memop_t *op, ir_node *phi)
 
        set_irn_link(phi, m);
        return m;
-}  /* clone_memop_phi */
+}
 
 /**
  * Return the memory properties of a call node.
@@ -525,14 +519,14 @@ static unsigned get_Call_memory_properties(ir_node *call)
                /* try the called entity */
                ir_node *ptr = get_Call_ptr(call);
 
-               if (is_Global(ptr)) {
-                       ir_entity *ent = get_Global_entity(ptr);
+               if (is_SymConst_addr_ent(ptr)) {
+                       ir_entity *ent = get_SymConst_entity(ptr);
 
                        prop = get_entity_additional_properties(ent);
                }
        }
        return prop & (mtp_property_const|mtp_property_pure);
-}  /* get_Call_memory_properties */
+}
 
 /**
  * Returns an entity if the address ptr points to a constant one.
@@ -561,10 +555,10 @@ static ir_entity *find_constant_entity(ir_node *ptr)
                                int i, n;
 
                                for (i = 0, n = get_Sel_n_indexs(ptr); i < n; ++i) {
-                                       ir_node *bound;
-                                       tarval *tlower, *tupper;
-                                       ir_node *index = get_Sel_index(ptr, i);
-                                       tarval *tv     = computed_value(index);
+                                       ir_node   *bound;
+                                       ir_tarval *tlower, *tupper;
+                                       ir_node   *index = get_Sel_index(ptr, i);
+                                       ir_tarval *tv     = computed_value(index);
 
                                        /* check if the index is constant */
                                        if (tv == tarval_bad)
@@ -578,9 +572,9 @@ static ir_entity *find_constant_entity(ir_node *ptr)
                                        if (tlower == tarval_bad || tupper == tarval_bad)
                                                return NULL;
 
-                                       if (tarval_cmp(tv, tlower) & pn_Cmp_Lt)
+                                       if (tarval_cmp(tv, tlower) == ir_relation_less)
                                                return NULL;
-                                       if (tarval_cmp(tupper, tv) & pn_Cmp_Lt)
+                                       if (tarval_cmp(tupper, tv) == ir_relation_less)
                                                return NULL;
 
                                        /* ok, bounds check finished */
@@ -620,7 +614,7 @@ static ir_entity *find_constant_entity(ir_node *ptr)
                } else
                        return NULL;
        }
-}  /* find_constant_entity */
+}
 
 /**
  * Return the Selection index of a Sel node from dimension n
@@ -628,176 +622,13 @@ static ir_entity *find_constant_entity(ir_node *ptr)
 static long get_Sel_array_index_long(ir_node *n, int dim)
 {
        ir_node *index = get_Sel_index(n, dim);
-       assert(is_Const(index));
        return get_tarval_long(get_Const_tarval(index));
-}  /* get_Sel_array_index_long */
-
-/**
- * Returns the accessed component graph path for an
- * node computing an address.
- *
- * @param ptr    the node computing the address
- * @param depth  current depth in steps upward from the root
- *               of the address
- */
-static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth)
-{
-       compound_graph_path *res = NULL;
-       ir_entity           *root, *field, *ent;
-       int                 path_len, pos, idx;
-       tarval              *tv;
-       ir_type             *tp;
-
-       if (is_SymConst(ptr)) {
-               /* a SymConst. If the depth is 0, this is an access to a global
-                * entity and we don't need a component path, else we know
-                * at least its length.
-                */
-               assert(get_SymConst_kind(ptr) == symconst_addr_ent);
-               root = get_SymConst_entity(ptr);
-               res = (depth == 0) ? NULL : new_compound_graph_path(get_entity_type(root), depth);
-       } else if (is_Sel(ptr)) {
-               /* it's a Sel, go up until we find the root */
-               res = rec_get_accessed_path(get_Sel_ptr(ptr), depth+1);
-               if (res == NULL)
-                       return NULL;
-
-               /* fill up the step in the path at the current position */
-               field    = get_Sel_entity(ptr);
-               path_len = get_compound_graph_path_length(res);
-               pos      = path_len - depth - 1;
-               set_compound_graph_path_node(res, pos, field);
-
-               if (is_Array_type(get_entity_owner(field))) {
-                       assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
-                       set_compound_graph_path_array_index(res, pos, get_Sel_array_index_long(ptr, 0));
-               }
-       } else if (is_Add(ptr)) {
-               ir_node *l    = get_Add_left(ptr);
-               ir_node *r    = get_Add_right(ptr);
-               ir_mode *mode = get_irn_mode(ptr);
-               tarval  *tmp;
-
-               if (is_Const(r) && get_irn_mode(l) == mode) {
-                       ptr = l;
-                       tv  = get_Const_tarval(r);
-               } else {
-                       ptr = r;
-                       tv  = get_Const_tarval(l);
-               }
-ptr_arith:
-               mode = get_tarval_mode(tv);
-               tmp  = tv;
-
-               /* ptr must be a Sel or a SymConst, this was checked in find_constant_entity() */
-               if (is_Sel(ptr)) {
-                       field = get_Sel_entity(ptr);
-               } else {
-                       field = get_SymConst_entity(ptr);
-               }
-               idx = 0;
-               for (ent = field;;) {
-                       unsigned size;
-                       tarval   *sz, *tv_index, *tlower, *tupper;
-                       ir_node  *bound;
-
-                       tp = get_entity_type(ent);
-                       if (! is_Array_type(tp))
-                               break;
-                       ent = get_array_element_entity(tp);
-                       size = get_type_size_bytes(get_entity_type(ent));
-                       sz   = new_tarval_from_long(size, mode);
-
-                       tv_index = tarval_div(tmp, sz);
-                       tmp      = tarval_mod(tmp, sz);
-
-                       if (tv_index == tarval_bad || tmp == tarval_bad)
-                               return NULL;
-
-                       assert(get_array_n_dimensions(tp) == 1 && "multiarrays not implemented");
-                       bound  = get_array_lower_bound(tp, 0);
-                       tlower = computed_value(bound);
-                       bound  = get_array_upper_bound(tp, 0);
-                       tupper = computed_value(bound);
-
-                       if (tlower == tarval_bad || tupper == tarval_bad)
-                               return NULL;
-
-                       if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
-                               return NULL;
-                       if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
-                               return NULL;
-
-                       /* ok, bounds check finished */
-                       ++idx;
-               }
-               if (! tarval_is_null(tmp)) {
-                       /* access to some struct/union member */
-                       return NULL;
-               }
-
-               /* should be at least ONE array */
-               if (idx == 0)
-                       return NULL;
-
-               res = rec_get_accessed_path(ptr, depth + idx);
-               if (res == NULL)
-                       return NULL;
-
-               path_len = get_compound_graph_path_length(res);
-               pos      = path_len - depth - idx;
-
-               for (ent = field;;) {
-                       unsigned size;
-                       tarval   *sz, *tv_index;
-                       long     index;
-
-                       tp = get_entity_type(ent);
-                       if (! is_Array_type(tp))
-                               break;
-                       ent = get_array_element_entity(tp);
-                       set_compound_graph_path_node(res, pos, ent);
-
-                       size = get_type_size_bytes(get_entity_type(ent));
-                       sz   = new_tarval_from_long(size, mode);
-
-                       tv_index = tarval_div(tv, sz);
-                       tv       = tarval_mod(tv, sz);
-
-                       /* worked above, should work again */
-                       assert(tv_index != tarval_bad && tv != tarval_bad);
-
-                       /* bounds already checked above */
-                       index = get_tarval_long(tv_index);
-                       set_compound_graph_path_array_index(res, pos, index);
-                       ++pos;
-               }
-       } else if (is_Sub(ptr)) {
-               ir_node *l = get_Sub_left(ptr);
-               ir_node *r = get_Sub_right(ptr);
-
-               ptr = l;
-               tv  = get_Const_tarval(r);
-               tv  = tarval_neg(tv);
-               goto ptr_arith;
-       }
-       return res;
-}  /* rec_get_accessed_path */
-
-/**
- * Returns an access path or NULL.  The access path is only
- * valid, if the graph is in phase_high and _no_ address computation is used.
- */
-static compound_graph_path *get_accessed_path(ir_node *ptr)
-{
-       compound_graph_path *gr = rec_get_accessed_path(ptr, 0);
-       return gr;
-}  /* get_accessed_path */
+}
 
 typedef struct path_entry {
        ir_entity         *ent;
        struct path_entry *next;
-       long              index;
+       size_t            index;
 } path_entry;
 
 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
@@ -805,9 +636,9 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
        path_entry       entry, *p;
        ir_entity        *ent, *field;
        ir_initializer_t *initializer;
-       tarval           *tv;
+       ir_tarval        *tv;
        ir_type          *tp;
-       unsigned         n;
+       size_t           n;
 
        entry.next = next;
        if (is_SymConst(ptr)) {
@@ -830,7 +661,7 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
                                        continue;
                                }
                        }
-                       if (p->index >= (int) n)
+                       if (p->index >= n)
                                return NULL;
                        initializer = get_initializer_compound_value(initializer, p->index);
 
@@ -863,7 +694,7 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
                        assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
                        entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
                } else {
-                       int i, n_members = get_compound_n_members(tp);
+                       size_t i, n_members = get_compound_n_members(tp);
                        for (i = 0; i < n_members; ++i) {
                                if (get_compound_member(tp, i) == field)
                                        break;
@@ -876,17 +707,19 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
                }
                return rec_find_compound_ent_value(get_Sel_ptr(ptr), &entry);
        }  else if (is_Add(ptr)) {
-               ir_node  *l = get_Add_left(ptr);
-               ir_node  *r = get_Add_right(ptr);
-               ir_mode  *mode;
+               ir_mode *mode;
                unsigned pos;
 
-               if (is_Const(r)) {
-                       ptr = l;
-                       tv  = get_Const_tarval(r);
-               } else {
-                       ptr = r;
-                       tv  = get_Const_tarval(l);
+               {
+                       ir_node *l = get_Add_left(ptr);
+                       ir_node *r = get_Add_right(ptr);
+                       if (is_Const(r)) {
+                               ptr = l;
+                               tv  = get_Const_tarval(r);
+                       } else {
+                               ptr = r;
+                               tv  = get_Const_tarval(l);
+                       }
                }
 ptr_arith:
                mode = get_tarval_mode(tv);
@@ -917,10 +750,10 @@ ptr_arith:
                /* fill them up */
                pos = 0;
                for (ent = field;;) {
-                       unsigned size;
-                       tarval   *sz, *tv_index, *tlower, *tupper;
-                       long     index;
-                       ir_node  *bound;
+                       unsigned  size;
+                       ir_tarval *sz, *tv_index, *tlower, *tupper;
+                       size_t    index;
+                       ir_node   *bound;
 
                        tp = get_entity_type(ent);
                        if (! is_Array_type(tp))
@@ -947,9 +780,9 @@ ptr_arith:
                        if (tlower == tarval_bad || tupper == tarval_bad)
                                return NULL;
 
-                       if (tarval_cmp(tv_index, tlower) & pn_Cmp_Lt)
+                       if (tarval_cmp(tv_index, tlower) == ir_relation_less)
                                return NULL;
-                       if (tarval_cmp(tupper, tv_index) & pn_Cmp_Lt)
+                       if (tarval_cmp(tupper, tv_index) == ir_relation_less)
                                return NULL;
 
                        /* ok, bounds check finished */
@@ -973,12 +806,12 @@ ptr_arith:
                goto ptr_arith;
        }
        return NULL;
-}  /* rec_find_compound_ent_value */
+}
 
 static ir_node *find_compound_ent_value(ir_node *ptr)
 {
        return rec_find_compound_ent_value(ptr, NULL);
-}  /* find_compound_ent_value */
+}
 
 /**
  * Mark a Load memop to be replace by a definition
@@ -990,7 +823,7 @@ static void mark_replace_load(memop_t *op, ir_node *def)
        op->replace = def;
        op->flags |= FLAG_KILLED_NODE;
        env.changed = 1;
-}  /* mark_replace_load */
+}
 
 /**
  * Mark a Store memop to be removed.
@@ -1001,7 +834,7 @@ static void mark_remove_store(memop_t *op)
 {
        op->flags |= FLAG_KILLED_NODE;
        env.changed = 1;
-}  /* mark_remove_store */
+}
 
 /**
  * Update a memop for a Load.
@@ -1010,7 +843,6 @@ static void mark_remove_store(memop_t *op)
  */
 static void update_Load_memop(memop_t *m)
 {
-       int       i;
        ir_node   *load = m->node;
        ir_node   *ptr;
        ir_entity *ent;
@@ -1022,7 +854,7 @@ static void update_Load_memop(memop_t *m)
 
        m->value.address = ptr;
 
-       for (i = get_irn_n_outs(load) - 1; i >= 0; --i) {
+       for (unsigned i = get_irn_n_outs(load); i-- > 0; ) {
                ir_node *proj = get_irn_out(load, i);
                long    pn;
 
@@ -1061,7 +893,7 @@ static void update_Load_memop(memop_t *m)
                /* no exception, clear the m fields as it might be checked later again */
                if (m->projs[pn_Load_X_except]) {
                        ir_graph *irg = get_irn_irg(ptr);
-                       exchange(m->projs[pn_Load_X_except], new_r_Bad(irg));
+                       exchange(m->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
                        m->projs[pn_Load_X_except] = NULL;
                        m->flags &= ~FLAG_EXCEPTION;
                        env.changed = 1;
@@ -1076,17 +908,6 @@ static void update_Load_memop(memop_t *m)
                        if (ent->initializer) {
                                /* new style initializer */
                                value = find_compound_ent_value(ptr);
-                       } else if (entity_has_compound_ent_values(ent)) {
-                               /* old style initializer */
-                               compound_graph_path *path = get_accessed_path(ptr);
-
-                               if (path != NULL) {
-                                       assert(is_proper_compound_graph_path(path, get_compound_graph_path_length(path)-1));
-
-                                       value = get_compound_ent_value_by_path(ent, path);
-                                       DB((dbg, LEVEL_1, "  Constant access at %F%F resulted in %+F\n", ent, path, value));
-                                       free_compound_graph_path(path);
-                               }
                        }
                        if (value != NULL)
                                value = can_replace_load_by_const(load, value);
@@ -1108,7 +929,7 @@ static void update_Load_memop(memop_t *m)
                /* no user, KILL it */
                mark_replace_load(m, NULL);
        }
-}  /* update_Load_memop */
+}
 
 /**
  * Update a memop for a Store.
@@ -1117,7 +938,6 @@ static void update_Load_memop(memop_t *m)
  */
 static void update_Store_memop(memop_t *m)
 {
-       int     i;
        ir_node *store = m->node;
        ir_node *adr   = get_Store_ptr(store);
 
@@ -1131,7 +951,7 @@ static void update_Store_memop(memop_t *m)
 
        m->value.address = adr;
 
-       for (i = get_irn_n_outs(store) - 1; i >= 0; --i) {
+       for (unsigned i = get_irn_n_outs(store); i-- > 0; ) {
                ir_node *proj = get_irn_out(store, i);
                long    pn;
 
@@ -1156,7 +976,7 @@ static void update_Store_memop(memop_t *m)
        }
        m->value.value = get_Store_value(store);
        m->value.mode  = get_irn_mode(m->value.value);
-}  /* update_Store_memop */
+}
 
 /**
  * Update a memop for a Call.
@@ -1167,7 +987,6 @@ static void update_Call_memop(memop_t *m)
 {
        ir_node  *call = m->node;
        unsigned prop  = get_Call_memory_properties(call);
-       int      i;
 
        if (prop & mtp_property_const) {
                /* A constant call did NOT use memory at all, we
@@ -1178,7 +997,7 @@ static void update_Call_memop(memop_t *m)
        } else
                m->flags = FLAG_KILL_ALL;
 
-       for (i = get_irn_n_outs(call) - 1; i >= 0; --i) {
+       for (unsigned i = get_irn_n_outs(call); i-- > 0; ) {
                ir_node *proj = get_irn_out(call, i);
 
                /* beware of keep edges */
@@ -1194,19 +1013,40 @@ static void update_Call_memop(memop_t *m)
                        break;
                }
        }
-}  /* update_Call_memop */
+}
 
 /**
- * Update a memop for a Div/Mod/Quot/DivMod.
+ * Update a memop for a Div/Mod.
  *
  * @param m  the memop
  */
-static void update_DivOp_memop(memop_t *m)
+static void update_Div_memop(memop_t *m)
+{
+       ir_node *div = m->node;
+
+       for (unsigned i = get_irn_n_outs(div); i-- > 0; ) {
+               ir_node *proj = get_irn_out(div, i);
+
+               /* beware of keep edges */
+               if (is_End(proj))
+                       continue;
+
+               switch (get_Proj_proj(proj)) {
+               case pn_Div_X_except:
+                       m->flags |= FLAG_EXCEPTION;
+                       break;
+               case pn_Div_M:
+                       m->mem = proj;
+                       break;
+               }
+       }
+}
+
+static void update_Mod_memop(memop_t *m)
 {
        ir_node *div = m->node;
-       int     i;
 
-       for (i = get_irn_n_outs(div) - 1; i >= 0; --i) {
+       for (unsigned i = get_irn_n_outs(div); i-- > 0; ) {
                ir_node *proj = get_irn_out(div, i);
 
                /* beware of keep edges */
@@ -1214,15 +1054,15 @@ static void update_DivOp_memop(memop_t *m)
                        continue;
 
                switch (get_Proj_proj(proj)) {
-               case pn_Generic_X_except:
+               case pn_Mod_X_except:
                        m->flags |= FLAG_EXCEPTION;
                        break;
-               case pn_Generic_M:
+               case pn_Mod_M:
                        m->mem = proj;
                        break;
                }
        }
-}  /* update_DivOp_memop */
+}
 
 /**
  * Update a memop for a Phi.
@@ -1231,9 +1071,9 @@ static void update_DivOp_memop(memop_t *m)
  */
 static void update_Phi_memop(memop_t *m)
 {
-       /* the Phi is it's own mem */
+       /* the Phi is its own mem */
        m->mem = m->node;
-}  /* update_Phi_memop */
+}
 
 /**
  * Memory walker: collect all memory ops and build topological lists.
@@ -1247,7 +1087,8 @@ static void collect_memops(ir_node *irn, void *ctx)
        (void) ctx;
        if (is_Proj(irn)) {
                /* we can safely ignore ProjM's except the initial memory */
-               if (irn != get_irg_initial_mem(current_ir_graph))
+               ir_graph *irg = get_irn_irg(irn);
+               if (irn != get_irg_initial_mem(irg))
                        return;
        }
 
@@ -1286,10 +1127,10 @@ static void collect_memops(ir_node *irn, void *ctx)
                        /* we can those to find the memory edge */
                        break;
                case iro_Div:
-               case iro_DivMod:
-               case iro_Quot:
+                       update_Div_memop(op);
+                       break;
                case iro_Mod:
-                       update_DivOp_memop(op);
+                       update_Mod_memop(op);
                        break;
 
                case iro_Builtin:
@@ -1308,7 +1149,7 @@ static void collect_memops(ir_node *irn, void *ctx)
                        entry->memop_backward       = op;
                }
        }
-}  /* collect_memops */
+}
 
 /**
  * Find an address in the current set.
@@ -1333,7 +1174,7 @@ static memop_t *find_address(const value_t *value)
                        return res;
        }
        return NULL;
-}  /* find_address */
+}
 
 /**
  * Find an address in the avail_out set.
@@ -1354,7 +1195,7 @@ static memop_t *find_address_avail(const block_t *bl, unsigned id, const ir_mode
                        return res;
        }
        return NULL;
-}  /* find_address_avail */
+}
 
 /**
  * Kill all addresses from the current set.
@@ -1365,7 +1206,7 @@ static void kill_all(void)
 
        /* set sentinel */
        rbitset_set(env.curr_set, env.rbs_size - 1);
-}  /* kill_all */
+}
 
 /**
  * Kill memops that are not alias free due to a Store value from the current set.
@@ -1374,20 +1215,20 @@ static void kill_all(void)
  */
 static void kill_memops(const value_t *value)
 {
-       unsigned end = env.rbs_size - 1;
-       unsigned pos;
+       size_t end = env.rbs_size - 1;
+       size_t pos;
 
        for (pos = rbitset_next(env.curr_set, 0, 1); pos < end; pos = rbitset_next(env.curr_set, pos + 1, 1)) {
                memop_t *op = env.curr_id_2_memop[pos];
 
-               if (ir_no_alias != get_alias_relation(current_ir_graph, value->address, value->mode,
+               if (ir_no_alias != get_alias_relation(value->address, value->mode,
                                                          op->value.address, op->value.mode)) {
                        rbitset_clear(env.curr_set, pos);
                        env.curr_id_2_memop[pos] = NULL;
                        DB((dbg, LEVEL_2, "KILLING %+F because of possible alias address %+F\n", op->node, value->address));
                }
        }
-}  /* kill_memops */
+}
 
 /**
  * Add the value of a memop to the current set.
@@ -1398,7 +1239,7 @@ static void add_memop(memop_t *op)
 {
        rbitset_set(env.curr_set, op->value.id);
        env.curr_id_2_memop[op->value.id] = op;
-}  /* add_memop */
+}
 
 /**
  * Add the value of a memop to the avail_out set.
@@ -1410,7 +1251,7 @@ static void add_memop_avail(block_t *bl, memop_t *op)
 {
        rbitset_set(bl->avail_out, op->value.id);
        bl->id_2_memop_avail[op->value.id] = op;
-}  /* add_memop_avail */
+}
 
 /**
  * Check, if we can convert a value of one mode to another mode
@@ -1426,7 +1267,7 @@ static int can_convert_to(const ir_mode *from, const ir_mode *to)
            get_mode_size_bits(from) == get_mode_size_bits(to))
                return 1;
        return 0;
-}  /* can_convert_to */
+}
 
 /**
  * Add a Conv to the requested mode if needed.
@@ -1450,7 +1291,7 @@ static ir_node *conv_to(ir_node *irn, ir_mode *mode)
                return NULL;
        }
        return irn;
-}  /* conv_to */
+}
 
 /**
  * Update the address of an value if this address was a load result
@@ -1470,7 +1311,7 @@ static void update_address(value_t *value)
                                value->address = op->replace;
                }
        }
-}  /* update_address */
+}
 
 /**
  * Do forward dataflow analysis on the given block and calculate the
@@ -1558,9 +1399,7 @@ static void calc_gen_kill_avail(block_t *bl)
                                kill_all();
                }
        }
-}  /* calc_gen_kill_avail */
-
-#define BYTE_SIZE(x)  (((x) + 7) >> 3)
+}
 
 /**
  * Do forward dataflow analysis on a given block to calculate the avail_out set
@@ -1576,7 +1415,7 @@ static void forward_avail(block_t *bl)
 
        calc_gen_kill_avail(bl);
        dump_curr(bl, "Avail_out");
-}  /* forward_avail */
+}
 
 /**
  * Do backward dataflow analysis on a given block to calculate the antic set
@@ -1596,8 +1435,8 @@ static int backward_antic(block_t *bl)
                ir_node  *succ    = get_Block_cfg_out(block, 0);
                block_t  *succ_bl = get_block_entry(succ);
                int      pred_pos = get_Block_cfgpred_pos(succ, block);
-               unsigned end      = env.rbs_size - 1;
-               unsigned pos;
+               size_t   end      = env.rbs_size - 1;
+               size_t   pos;
 
                kill_all();
 
@@ -1700,7 +1539,7 @@ static int backward_antic(block_t *bl)
        }
        dump_curr(bl, "AnticL_in");
        return 0;
-}  /* backward_antic */
+}
 
 /**
  * Replace a Load memop by a already known value.
@@ -1743,13 +1582,13 @@ static void replace_load(memop_t *op)
        proj = op->projs[pn_Load_X_except];
        if (proj != NULL) {
                ir_graph *irg = get_irn_irg(load);
-               exchange(proj, new_r_Bad(irg));
+               exchange(proj, new_r_Bad(irg, mode_X));
        }
        proj = op->projs[pn_Load_X_regular];
        if (proj != NULL) {
                exchange(proj, new_r_Jmp(get_nodes_block(load)));
        }
-}  /* replace_load */
+}
 
 /**
  * Remove a Store memop.
@@ -1770,13 +1609,13 @@ static void remove_store(memop_t *op)
        proj = op->projs[pn_Store_X_except];
        if (proj != NULL) {
                ir_graph *irg = get_irn_irg(store);
-               exchange(proj, new_r_Bad(irg));
+               exchange(proj, new_r_Bad(irg, mode_X));
        }
        proj = op->projs[pn_Store_X_regular];
        if (proj != NULL) {
                exchange(proj, new_r_Jmp(get_nodes_block(store)));
        }
-}  /* remove_store */
+}
 
 
 /**
@@ -1800,7 +1639,7 @@ static void do_replacements(block_t *bl)
                        }
                }
        }
-}  /* do_replacements */
+}
 
 /**
  * Calculate the Avail_out sets for all basic blocks.
@@ -1822,7 +1661,7 @@ static void calcAvail(void)
        /* restore the current sets */
        env.curr_id_2_memop = tmp_memop;
        env.curr_set        = tmp_set;
-}  /* calcAvail */
+}
 
 /**
  * Calculate the Antic_in sets for all basic blocks.
@@ -1848,7 +1687,7 @@ static void calcAntic(void)
                ++i;
        } while (need_iter);
        DB((dbg, LEVEL_2, "Get anticipated Load set after %d iterations\n", i));
-}  /* calcAntic */
+}
 
 /**
  * Return the node representing the last memory in a block.
@@ -1864,7 +1703,7 @@ static ir_node *find_last_memory(block_t *bl)
                /* if there is NO memory in this block, go to the dominator */
                bl = get_block_entry(get_Block_idom(bl->block));
        }
-}  /* find_last_memory */
+}
 
 /**
  * Reroute all memory users of old memory
@@ -1875,9 +1714,7 @@ static ir_node *find_last_memory(block_t *bl)
  */
 static void reroute_all_mem_users(ir_node *omem, ir_node *nmem)
 {
-       int i;
-
-       for (i = get_irn_n_outs(omem) - 1; i >= 0; --i) {
+       for (unsigned i = get_irn_n_outs(omem); i-- > 0; ) {
                int     n_pos;
                ir_node *user = get_irn_out_ex(omem, i, &n_pos);
 
@@ -1885,8 +1722,8 @@ static void reroute_all_mem_users(ir_node *omem, ir_node *nmem)
        }
 
        /* all edges previously point to omem now point to nmem */
-       nmem->out = omem->out;
-}  /* reroute_all_mem_users */
+       nmem->o.out = omem->o.out;
+}
 
 /**
  * Reroute memory users of old memory that are dominated by a given block
@@ -1898,10 +1735,11 @@ static void reroute_all_mem_users(ir_node *omem, ir_node *nmem)
  */
 static void reroute_mem_through(ir_node *omem, ir_node *nmem, ir_node *pass_bl)
 {
-       int             i, j, n = get_irn_n_outs(omem);
-       ir_def_use_edge *edges = NEW_ARR_D(ir_def_use_edge, &env.obst, n + 1);
+       unsigned n = get_irn_n_outs(omem);
+       ir_def_use_edges *new_out = OALLOCF(&env.obst, ir_def_use_edges, edges, n);
 
-       for (i = j = 0; i < n; ++i) {
+       unsigned j = 0;
+       for (unsigned i = 0; i < n; ++i) {
                int     n_pos;
                ir_node *user   = get_irn_out_ex(omem, i, &n_pos);
                ir_node *use_bl = get_nodes_block(user);
@@ -1912,21 +1750,21 @@ static void reroute_mem_through(ir_node *omem, ir_node *nmem, ir_node *pass_bl)
                }
                if (block_dominates(pass_bl, use_bl)) {
                        /* found an user that is dominated */
+                       new_out->edges[j].pos = n_pos;
+                       new_out->edges[j].use = user;
                        ++j;
-                       edges[j].pos = n_pos;
-                       edges[j].use = user;
 
                        set_irn_n(user, n_pos, nmem);
                }
        }
+       new_out->n_edges = j;
 
        /* Modify the out structure: we create a new out edge array on our
-          temporary obstack here. This should be no problem, as we invalidate the edges
-          at the end either. */
+          temporary obstack here. This should be no problem, as we invalidate the
+          edges at the end either. */
        /* first entry is used for the length */
-       edges[0].pos = j;
-       nmem->out = edges;
-}  /* reroute_mem_through */
+       nmem->o.out = new_out;
+}
 
 /**
  * insert Loads, making partly redundant Loads fully redundant
@@ -1935,8 +1773,7 @@ static int insert_Load(block_t *bl)
 {
        ir_node  *block = bl->block;
        int      i, n = get_Block_n_cfgpreds(block);
-       unsigned end = env.rbs_size - 1;
-       unsigned pos;
+       size_t   end = env.rbs_size - 1;
 
        DB((dbg, LEVEL_3, "processing %+F\n", block));
 
@@ -1947,7 +1784,7 @@ static int insert_Load(block_t *bl)
 
        if (n > 1) {
                ir_node **ins;
-               int     pos;
+               size_t    pos;
 
                NEW_ARR_A(ir_node *, ins, n);
 
@@ -1974,25 +1811,21 @@ static int insert_Load(block_t *bl)
                }
                /*
                 * Ensure that all values are in the map: build Phi's if necessary:
-                * Note: the last bit is the sentinel and ALWAYS set, so start with -2.
+                * Note: the last bit is the sentinel and ALWAYS set, so end with -2.
                 */
-               for (pos = env.rbs_size - 2; pos >= 0; --pos) {
+               for (pos = 0; pos < env.rbs_size - 1; ++pos) {
                        if (! rbitset_is_set(env.curr_set, pos))
                                env.curr_id_2_memop[pos] = NULL;
                        else {
-                               ir_node *pred    = get_Block_cfgpred_block(bl->block, 0);
-                               block_t *pred_bl = get_block_entry(pred);
-                               int     need_phi = 0;
-                               memop_t *first   = NULL;
-                               ir_mode *mode    = NULL;
+                               int      need_phi = 0;
+                               memop_t *first    = NULL;
+                               ir_mode *mode     = NULL;
 
                                for (i = 0; i < n; ++i) {
-                                       memop_t *mop;
-
-                                       pred    = get_Block_cfgpred_block(bl->block, i);
-                                       pred_bl = get_block_entry(pred);
+                                       ir_node *pred    = get_Block_cfgpred_block(bl->block, i);
+                                       block_t *pred_bl = get_block_entry(pred);
 
-                                       mop = pred_bl->id_2_memop_avail[pos];
+                                       memop_t *mop = pred_bl->id_2_memop_avail[pos];
                                        if (first == NULL) {
                                                first = mop;
                                                ins[0] = first->value.value;
@@ -2040,6 +1873,8 @@ static int insert_Load(block_t *bl)
        }
 
        if (n > 1) {
+               size_t pos;
+
                /* check for partly redundant values */
                for (pos = rbitset_next(bl->anticL_in, 0, 1);
                     pos < end;
@@ -2178,7 +2013,7 @@ static int insert_Load(block_t *bl)
        }
        dump_curr(bl, "Avail_out");
        return 0;
-}  /* insert_Load */
+}
 
 /**
  * Insert Loads upwards.
@@ -2205,126 +2040,29 @@ static void insert_Loads_upwards(void)
        } while (need_iter);
 
        DB((dbg, LEVEL_2, "Finished Load inserting after %d iterations\n", i));
-}  /* insert_Loads_upwards */
+}
 
-/**
- * Kill unreachable control flow.
- *
- * @param irg  the graph to operate on
- */
-static void kill_unreachable_blocks(ir_graph *irg)
+void opt_ldst(ir_graph *irg)
 {
        block_t *bl;
-       ir_node **ins;
-       int     changed = 0;
-
-       NEW_ARR_A(ir_node *, ins, env.max_cfg_preds);
-
-       for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
-               ir_node *block = bl->block;
-               int     i, j, k, n;
-
-               assert(get_Block_mark(block));
-
-               n = get_Block_n_cfgpreds(block);
-
-               for (i = j = 0; i < n; ++i) {
-                       ir_node *pred = get_Block_cfgpred(block, i);
-                       ir_node *pred_bl;
-
-                       if (is_Bad(pred))
-                               continue;
-
-                       pred_bl = get_nodes_block(skip_Proj(pred));
-                       if (! get_Block_mark(pred_bl))
-                               continue;
-
-                       ins[j++] = pred;
-               }
-               if (j != n) {
-                       ir_node *phi, *next;
-
-                       /* some unreachable blocks detected */
-                       changed = 1;
-
-                       DB((dbg, LEVEL_1, "Killing dead block predecessors on %+F\n", block));
-
-                       set_irn_in(block, j, ins);
-
-                       /* shorten all Phi nodes */
-                       for (phi = get_Block_phis(block); phi != NULL; phi = next) {
-                               next = get_Phi_next(phi);
-
-                               for (i = k = 0; i < n; ++i) {
-                                       ir_node *pred = get_Block_cfgpred_block(block, i);
-
-                                       if (is_Bad(pred))
-                                               continue;
-
-                                       if (! get_Block_mark(pred))
-                                               continue;
-
-                                       ins[k++] = get_Phi_pred(phi, i);
-                               }
-                               if (k == 1)
-                                       exchange(phi, ins[0]);
-                               else
-                                       set_irn_in(phi, k, ins);
-                       }
-               }
-
-       }
-
-       if (changed) {
-               /* kick keep alives */
-               ir_node *end = get_irg_end(irg);
-               int     i, j, n = get_End_n_keepalives(end);
-
-               NEW_ARR_A(ir_node *, ins, n);
-
-               for (i = j = 0; i < n; ++i) {
-                       ir_node *ka = get_End_keepalive(end, i);
-                       ir_node *ka_bl;
-
-                       if (is_Bad(ka))
-                               continue;
-                       if (is_Block(ka))
-                               ka_bl = ka;
-                       else
-                               ka_bl = get_nodes_block(skip_Proj(ka));
-                       if (get_Block_mark(ka_bl))
-                               ins[j++] = ka;
-               }
-               if (j != n)
-                       set_End_keepalives(end, j, ins);
-
-               free_irg_outs(irg);
-
-               /* this transformation do NOT invalidate the dominance */
-       }
-}  /* kill_unreachable_blocks */
-
-int opt_ldst(ir_graph *irg)
-{
-       block_t  *bl;
-       ir_graph *rem = current_ir_graph;
-
-       current_ir_graph = irg;
 
        FIRM_DBG_REGISTER(dbg, "firm.opt.ldst");
 
        DB((dbg, LEVEL_1, "\nDoing Load/Store optimization on %+F\n", irg));
 
-       /* we need landing pads */
-       remove_critical_cf_edges(irg);
+       assure_irg_properties(irg,
+               IR_GRAPH_PROPERTY_NO_CRITICAL_EDGES /* we need landing pads */
+               | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE
+               | IR_GRAPH_PROPERTY_CONSISTENT_OUTS
+               | IR_GRAPH_PROPERTY_NO_UNREACHABLE_CODE
+               | IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE);
 
        if (get_opt_alias_analysis()) {
-               assure_irg_entity_usage_computed(irg);
                assure_irp_globals_entity_usage_computed();
        }
 
        obstack_init(&env.obst);
-       ir_nodemap_init(&env.adr_map);
+       ir_nodehashmap_init(&env.adr_map);
 
        env.forward       = NULL;
        env.backward      = NULL;
@@ -2338,8 +2076,6 @@ int opt_ldst(ir_graph *irg)
        env.id_2_address  = NEW_ARR_F(ir_node *, 0);
 #endif
 
-       assure_irg_outs(irg);
-
        ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK);
 
        /* first step: allocate block entries. Note that some blocks might be
@@ -2362,11 +2098,6 @@ int opt_ldst(ir_graph *irg)
                set_Block_mark(env.end_bl, 1);
        }
 
-       /* KILL unreachable blocks: these disturb the data flow analysis */
-       kill_unreachable_blocks(irg);
-
-       assure_doms(irg);
-
        /* second step: find and sort all memory ops */
        walk_memory_irg(irg, collect_memops, NULL, NULL);
 
@@ -2377,7 +2108,7 @@ int opt_ldst(ir_graph *irg)
 
        if (env.n_mem_ops == 0) {
                /* no memory ops */
-               goto end;
+               goto no_changes;
        }
 
        /* create the backward links. */
@@ -2400,22 +2131,19 @@ int opt_ldst(ir_graph *irg)
        /* create the current set */
        env.curr_set = rbitset_obstack_alloc(&env.obst, env.rbs_size);
        rbitset_set(env.curr_set, env.rbs_size - 1);
-       env.curr_id_2_memop = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
-       memset(env.curr_id_2_memop, 0, env.rbs_size * sizeof(env.curr_id_2_memop[0]));
+       env.curr_id_2_memop = NEW_ARR_DZ(memop_t*, &env.obst, env.rbs_size);
 
        for (bl = env.forward; bl != NULL; bl = bl->forward_next) {
                /* set sentinel bits */
                bl->avail_out  = rbitset_obstack_alloc(&env.obst, env.rbs_size);
                rbitset_set(bl->avail_out, env.rbs_size - 1);
 
-               bl->id_2_memop_avail = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
-               memset(bl->id_2_memop_avail, 0, env.rbs_size * sizeof(bl->id_2_memop_avail[0]));
+               bl->id_2_memop_avail = NEW_ARR_DZ(memop_t*, &env.obst, env.rbs_size);
 
                bl->anticL_in  = rbitset_obstack_alloc(&env.obst, env.rbs_size);
                rbitset_set(bl->anticL_in, env.rbs_size - 1);
 
-               bl->id_2_memop_antic = NEW_ARR_D(memop_t *, &env.obst, env.rbs_size);
-               memset(bl->id_2_memop_antic, 0, env.rbs_size * sizeof(bl->id_2_memop_antic[0]));
+               bl->id_2_memop_antic = NEW_ARR_DZ(memop_t*, &env.obst, env.rbs_size);
        }
 
        (void) dump_block_list;
@@ -2433,24 +2161,22 @@ int opt_ldst(ir_graph *irg)
 
                /* not only invalidate but free them. We might allocate new out arrays
                   on our obstack which will be deleted yet. */
-               free_irg_outs(irg);
-               set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
+               confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_CONTROL_FLOW);
+       } else {
+no_changes:
+               confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
        }
-end:
 
        ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_BLOCK_MARK);
-       ir_nodemap_destroy(&env.adr_map);
+       ir_nodehashmap_destroy(&env.adr_map);
        obstack_free(&env.obst, NULL);
 
 #ifdef DEBUG_libfirm
        DEL_ARR_F(env.id_2_address);
 #endif
-
-       current_ir_graph = rem;
-       return env.changed != 0;
-}  /* opt_ldst */
+}
 
 ir_graph_pass_t *opt_ldst_pass(const char *name)
 {
-       return def_graph_pass_ret(name ? name : "ldst_df", opt_ldst);
-}  /* opt_ldst_pass */
+       return def_graph_pass(name ? name : "ldst_df", opt_ldst);
+}