reuse is_switch_Cond function
[libfirm] / ir / opt / ldstopt.c
index f8e2b86..c8552d6 100644 (file)
@@ -215,7 +215,7 @@ static void collect_nodes(ir_node *node, void *env)
 
                        if (is_Proj(proj)) {
                                pred   = get_Proj_pred(proj);
-                               is_exc = get_Proj_proj(proj) == pn_Generic_X_except;
+                               is_exc = is_x_except_Proj(proj);
                        }
 
                        /* ignore Bad predecessors, they will be removed later */
@@ -346,11 +346,11 @@ static long get_Sel_array_index_long(ir_node *n, int dim)
  * @param depth  current depth in steps upward from the root
  *               of the address
  */
-static compound_graph_path *rec_get_accessed_path(ir_node *ptr, int depth)
+static compound_graph_path *rec_get_accessed_path(ir_node *ptr, size_t depth)
 {
        compound_graph_path *res = NULL;
        ir_entity           *root, *field, *ent;
-       int                 path_len, pos, idx;
+       size_t              path_len, pos, idx;
        ir_tarval           *tv;
        ir_type             *tp;
 
@@ -505,7 +505,7 @@ static compound_graph_path *get_accessed_path(ir_node *ptr)
 typedef struct path_entry {
        ir_entity         *ent;
        struct path_entry *next;
-       long              index;
+       size_t            index;
 } path_entry;
 
 static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
@@ -515,7 +515,7 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
        ir_initializer_t *initializer;
        ir_tarval        *tv;
        ir_type          *tp;
-       unsigned         n;
+       size_t           n;
 
        entry.next = next;
        if (is_SymConst(ptr)) {
@@ -538,7 +538,7 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
                                        continue;
                                }
                        }
-                       if (p->index >= (int) n)
+                       if (p->index >= n)
                                return NULL;
                        initializer = get_initializer_compound_value(initializer, p->index);
 
@@ -571,7 +571,7 @@ static ir_node *rec_find_compound_ent_value(ir_node *ptr, path_entry *next)
                        assert(get_Sel_n_indexs(ptr) == 1 && "multi dim arrays not implemented");
                        entry.index = get_Sel_array_index_long(ptr, 0) - get_array_lower_bound_int(tp, 0);
                } else {
-                       int i, n_members = get_compound_n_members(tp);
+                       size_t i, n_members = get_compound_n_members(tp);
                        for (i = 0; i < n_members; ++i) {
                                if (get_compound_member(tp, i) == field)
                                        break;
@@ -746,20 +746,29 @@ static void reduce_adr_usage(ir_node *ptr)
  */
 static int can_use_stored_value(ir_mode *old_mode, ir_mode *new_mode)
 {
+       unsigned old_size;
+       unsigned new_size;
        if (old_mode == new_mode)
-               return 1;
+               return true;
+
+       old_size = get_mode_size_bits(old_mode);
+       new_size = get_mode_size_bits(new_mode);
 
        /* if both modes are two-complement ones, we can always convert the
-          Stored value into the needed one. */
-       if (get_mode_size_bits(old_mode) >= get_mode_size_bits(new_mode) &&
+          Stored value into the needed one. (on big endian machines we currently
+          only support this for modes of same size) */
+       if (old_size >= new_size &&
                  get_mode_arithmetic(old_mode) == irma_twos_complement &&
-                 get_mode_arithmetic(new_mode) == irma_twos_complement)
-               return 1;
-       return 0;
-}  /* can_use_stored_value */
+                 get_mode_arithmetic(new_mode) == irma_twos_complement &&
+                 (!be_get_backend_param()->byte_order_big_endian
+               || old_size == new_size)) {
+               return true;
+       }
+       return false;
+}
 
 /**
- * Check whether a Call is at least pure, ie. does only read memory.
+ * Check whether a Call is at least pure, i.e. does only read memory.
  */
 static unsigned is_Call_pure(ir_node *call)
 {
@@ -867,7 +876,10 @@ static int try_load_after_store(ir_node *load,
        store_value    = get_Store_value(store);
 
        if (delta != 0 || store_mode != load_mode) {
-               if (delta < 0 || delta + load_mode_len > store_mode_len)
+               /* TODO: implement for big-endian */
+               if (delta < 0 || delta + load_mode_len > store_mode_len
+                               || (be_get_backend_param()->byte_order_big_endian
+                                   && load_mode_len != store_mode_len))
                        return 0;
 
                if (get_mode_arithmetic(store_mode) != irma_twos_complement ||
@@ -880,7 +892,6 @@ static int try_load_after_store(ir_node *load,
                        ir_node *cnst;
                        ir_graph *irg = get_irn_irg(load);
 
-                       /* FIXME: only true for little endian */
                        cnst        = new_r_Const_long(irg, mode_Iu, delta * 8);
                        store_value = new_r_Shr(get_nodes_block(load),
                                                                        store_value, cnst, store_mode);
@@ -902,7 +913,7 @@ static int try_load_after_store(ir_node *load,
        /* no exception */
        if (info->projs[pn_Load_X_except]) {
                ir_graph *irg = get_irn_irg(load);
-               exchange( info->projs[pn_Load_X_except], new_r_Bad(irg));
+               exchange( info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
                res |= CF_CHANGED;
        }
        if (info->projs[pn_Load_X_regular]) {
@@ -976,6 +987,9 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
                         * Here, there is no need to check if the previous Load has an
                         * exception hander because they would have exact the same
                         * exception...
+                        *
+                        * TODO: implement load-after-load with different mode for big
+                        *       endian
                         */
                        if (info->projs[pn_Load_X_except] == NULL
                                        || get_nodes_block(load) == get_nodes_block(pred)) {
@@ -1005,7 +1019,7 @@ static unsigned follow_Mem_chain(ir_node *load, ir_node *curr)
                                /* no exception */
                                if (info->projs[pn_Load_X_except]) {
                                        ir_graph *irg = get_irn_irg(load);
-                                       exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
+                                       exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
                                        res |= CF_CHANGED;
                                }
                                if (info->projs[pn_Load_X_regular]) {
@@ -1142,7 +1156,7 @@ static unsigned optimize_load(ir_node *load)
                        /* no exception, clear the info field as it might be checked later again */
                        if (info->projs[pn_Load_X_except]) {
                                ir_graph *irg = get_irn_irg(load);
-                               exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
+                               exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
                                info->projs[pn_Load_X_except] = NULL;
                                res |= CF_CHANGED;
                        }
@@ -1177,7 +1191,7 @@ static unsigned optimize_load(ir_node *load)
                /* we completely replace the load by this value */
                if (info->projs[pn_Load_X_except]) {
                        ir_graph *irg = get_irn_irg(load);
-                       exchange(info->projs[pn_Load_X_except], new_r_Bad(irg));
+                       exchange(info->projs[pn_Load_X_except], new_r_Bad(irg, mode_X));
                        info->projs[pn_Load_X_except] = NULL;
                        res |= CF_CHANGED;
                }
@@ -1495,10 +1509,6 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
 
        block = get_nodes_block(store);
 
-       /* abort on dead blocks */
-       if (is_Block_dead(block))
-               return 0;
-
        /* check if the block is post dominated by Phi-block
           and has no exception exit */
        bl_info = (block_info_t*)get_irn_link(block);
@@ -1534,10 +1544,7 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
                if (exc != info->exc_block)
                        return 0;
 
-               /* abort on dead blocks */
                block = get_nodes_block(pred);
-               if (is_Block_dead(block))
-                       return 0;
 
                /* check if the block is post dominated by Phi-block
                   and has no exception exit. Note that block must be different from
@@ -1912,7 +1919,12 @@ static void move_loads_out_of_loops(scc *pscc, loop_env *env)
                                        ninfo = get_ldst_info(irn, phase_obst(&env->ph));
 
                                        ninfo->projs[pn_Load_M] = mem = new_r_Proj(irn, mode_M, pn_Load_M);
-                                       set_Phi_pred(phi, pos, mem);
+                                       if (res == NULL) {
+                                               /* irn is from cache, so do not set phi pred again.
+                                                * There might be other Loads between phi and irn already.
+                                                */
+                                               set_Phi_pred(phi, pos, mem);
+                                       }
 
                                        ninfo->projs[pn_Load_res] = new_r_Proj(irn, load_mode, pn_Load_res);
                                }
@@ -2271,8 +2283,8 @@ int optimize_load_store(ir_graph *irg)
 
        /* Handle graph state */
        if (env.changes) {
-               set_irg_outs_inconsistent(irg);
                set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
+               edges_deactivate(irg);
        }
 
        if (env.changes & CF_CHANGED) {