API to enable dumping
[libfirm] / ir / opt / ldstopt.c
index c8552d6..d8cd2d3 100644 (file)
@@ -50,6 +50,7 @@
 #include "set.h"
 #include "be.h"
 #include "debug.h"
+#include "opt_manage.h"
 
 /** The debug handle. */
 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
@@ -74,7 +75,7 @@ typedef struct walk_env_t {
 
 /** A Load/Store info. */
 typedef struct ldst_info_t {
-       ir_node  *projs[MAX_PROJ];    /**< list of Proj's of this node */
+       ir_node  *projs[MAX_PROJ+1];  /**< list of Proj's of this node */
        ir_node  *exc_block;          /**< the exception block if available */
        int      exc_idx;             /**< predecessor index in the exception block */
        unsigned visited;             /**< visited counter for breaking loops */
@@ -1096,8 +1097,9 @@ ir_node *can_replace_load_by_const(const ir_node *load, ir_node *c)
                if (is_reinterpret_cast(c_mode, l_mode)) {
                        /* copy the value from the const code irg and cast it */
                        res = new_rd_Conv(dbgi, block, res, l_mode);
+               } else {
+                       return NULL;
                }
-               return NULL;
        }
        return res;
 }
@@ -1167,7 +1169,7 @@ static unsigned optimize_load(ir_node *load)
                        }
 
                        if (get_entity_linkage(ent) & IR_LINKAGE_CONSTANT) {
-                               if (ent->initializer != NULL) {
+                               if (has_entity_initializer(ent)) {
                                        /* new style initializer */
                                        value = find_compound_ent_value(ptr);
                                } else if (entity_has_compound_ent_values(ent)) {
@@ -1182,8 +1184,20 @@ static unsigned optimize_load(ir_node *load)
                                                free_compound_graph_path(path);
                                        }
                                }
-                               if (value != NULL)
+                               if (value != NULL) {
+                                       ir_graph *irg = get_irn_irg(load);
                                        value = can_replace_load_by_const(load, value);
+                                       if (value != NULL && is_Sel(ptr) &&
+                                                       !is_irg_state(irg, IR_GRAPH_STATE_IMPLICIT_BITFIELD_MASKING)) {
+                                               /* frontend has inserted masking operations after bitfield accesses,
+                                                * so we might have to shift the const. */
+                                               unsigned char bit_offset = get_entity_offset_bits_remainder(get_Sel_entity(ptr));
+                                               ir_tarval *tv_old = get_Const_tarval(value);
+                                               ir_tarval *tv_offset = new_tarval_from_long(bit_offset, mode_Bu);
+                                               ir_tarval *tv_new = tarval_shl(tv_old, tv_offset);
+                                               value = new_r_Const(irg, tv_new);
+                                       }
+                               }
                        }
                }
        }
@@ -1480,7 +1494,10 @@ static unsigned optimize_store(ir_node *store)
 static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
 {
        int i, n;
-       ir_node *store, *old_store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
+       ir_node *store, *ptr, *block, *phi_block, *phiM, *phiD, *exc, *projM;
+#ifdef DO_CACHEOPT
+       ir_node *old_store;
+#endif
        ir_mode *mode;
        ir_node **inM, **inD, **projMs;
        int *idx;
@@ -1503,7 +1520,9 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
                return 0;
 
        store = skip_Proj(projM);
+#ifdef DO_CACHEOPT
        old_store = store;
+#endif
        if (!is_Store(store))
                return 0;
 
@@ -1673,7 +1692,7 @@ static void do_load_store_optimize(ir_node *n, void *env)
                break;
 
        default:
-               ;
+               break;
        }
 }  /* do_load_store_optimize */
 
@@ -1814,7 +1833,6 @@ static unsigned hash_cache_entry(const avail_entry_t *entry)
 static void move_loads_out_of_loops(scc *pscc, loop_env *env)
 {
        ir_node   *phi, *load, *next, *other, *next_other;
-       ir_entity *ent;
        int       j;
        phi_entry *phi_list = NULL;
        set       *avail;
@@ -1870,7 +1888,6 @@ static void move_loads_out_of_loops(scc *pscc, loop_env *env)
                        /* for now, we can only move Load(Global) */
                        if (! is_Global(ptr))
                                continue;
-                       ent       = get_Global_entity(ptr);
                        load_mode = get_Load_mode(load);
                        for (other = pscc->head; other != NULL; other = next_other) {
                                node_entry *ne = get_irn_ne(other, env);
@@ -2193,15 +2210,15 @@ static void do_dfs(ir_graph *irg, loop_env *env)
                ir_node *pred = get_Block_cfgpred(endblk, i);
 
                pred = skip_Proj(pred);
-               if (is_Return(pred))
+               if (is_Return(pred)) {
                        dfs(get_Return_mem(pred), env);
-               else if (is_Raise(pred))
+               } else if (is_Raise(pred)) {
                        dfs(get_Raise_mem(pred), env);
-               else if (is_fragile_op(pred))
+               } else if (is_fragile_op(pred)) {
                        dfs(get_fragile_op_mem(pred), env);
-               else if (is_Bad(pred))
-                       /* ignore non-optimized block predecessor */;
-               else {
+               } else if (is_Bad(pred)) {
+                       /* ignore non-optimized block predecessor */
+               else {
                        assert(0 && "Unknown EndBlock predecessor");
                }
        }
@@ -2244,9 +2261,10 @@ static int optimize_loops(ir_graph *irg)
 /*
  * do the load store optimization
  */
-int optimize_load_store(ir_graph *irg)
+static ir_graph_state_t do_loadstore_opt(ir_graph *irg)
 {
        walk_env_t env;
+       ir_graph_state_t res = 0;
 
        FIRM_DBG_REGISTER(dbg, "firm.opt.ldstopt");
 
@@ -2254,16 +2272,7 @@ int optimize_load_store(ir_graph *irg)
        assert(get_irg_pinned(irg) != op_pin_state_floats &&
                "LoadStore optimization needs pinned graph");
 
-       /* we need landing pads */
-       remove_critical_cf_edges(irg);
-
-       edges_assure(irg);
-
-       /* for Phi optimization post-dominators are needed ... */
-       assure_postdoms(irg);
-
        if (get_opt_alias_analysis()) {
-               assure_irg_entity_usage_computed(irg);
                assure_irp_globals_entity_usage_computed();
        }
 
@@ -2288,12 +2297,26 @@ int optimize_load_store(ir_graph *irg)
        }
 
        if (env.changes & CF_CHANGED) {
-               /* is this really needed: Yes, control flow changed, block might
-               have Bad() predecessors. */
+               /* control flow changed, block might have Bad() predecessors. */
                set_irg_doms_inconsistent(irg);
+       } else {
+               res |= IR_GRAPH_STATE_CONSISTENT_DOMINANCE | IR_GRAPH_STATE_NO_BAD_BLOCKS;
        }
-       return env.changes != 0;
-}  /* optimize_load_store */
+
+       return res;
+}
+
+optdesc_t opt_loadstore = {
+       "load-store",
+       IR_GRAPH_STATE_NO_UNREACHABLE_BLOCKS | IR_GRAPH_STATE_CONSISTENT_OUT_EDGES | IR_GRAPH_STATE_NO_CRITICAL_EDGES | IR_GRAPH_STATE_CONSISTENT_DOMINANCE | IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE,
+       do_loadstore_opt,
+};
+
+int optimize_load_store(ir_graph *irg)
+{
+       perform_irg_optimization(irg, &opt_loadstore);
+       return 1;
+}
 
 ir_graph_pass_t *optimize_load_store_pass(const char *name)
 {