Some more cleanup: Put the return type and other specifiers on the same line as the...
[libfirm] / ir / opt / cfopt.c
index d33d9d6..e2a658a 100644 (file)
@@ -23,9 +23,7 @@
  * @author  Goetz Lindenmaier, Michael Beck, Sebastian Hack
  * @version $Id$
  */
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
 
 #include "iroptimize.h"
 
 #include "irvrfy.h"
 #include "iredges.h"
 
-#include "array.h"
+#include "array_t.h"
 
 #include "irouts.h"
 #include "irbackedge_t.h"
 
 #include "irflag_t.h"
 #include "firmstat.h"
+#include "irpass.h"
 
 #include "iropt_dbg.h"
 
@@ -87,7 +86,8 @@
  * Note that the simple case that Block has only these two
  * predecessors are already handled in equivalent_node_Block().
  */
-static int remove_senseless_conds(ir_node *bl) {
+static int remove_senseless_conds(ir_node *bl)
+{
        int i, j;
        int n = get_Block_n_cfgpreds(bl);
        int changed = 0;
@@ -104,7 +104,7 @@ static int remove_senseless_conds(ir_node *bl) {
                                ir_node *cond_j = skip_Proj(pred_j);
 
                                if (cond_j == cond_i) {
-                                       ir_node *jmp = new_r_Jmp(current_ir_graph, get_nodes_block(cond_i));
+                                       ir_node *jmp = new_r_Jmp(get_nodes_block(cond_i));
                                        set_irn_n(bl, i, jmp);
                                        set_irn_n(bl, j, new_Bad());
 
@@ -132,7 +132,8 @@ typedef struct _merge_env {
  * Therefore we also optimize at control flow operations, depending
  * how we first reach the Block.
  */
-static void merge_blocks(ir_node *node, void *ctx) {
+static void merge_blocks(ir_node *node, void *ctx)
+{
        int i;
        ir_node *new_block;
        merge_env *env = ctx;
@@ -165,7 +166,7 @@ static void merge_blocks(ir_node *node, void *ctx) {
                if (!is_Block_dead(b)) {
                        new_block = equivalent_node(b);
 
-                       while (irn_not_visited(b) && (!is_Block_dead(new_block)) && (new_block != b)) {
+                       while (!irn_visited(b) && !is_Block_dead(new_block) && new_block != b) {
                                /* We would have to run gigo() if new is bad, so we
                                   promote it directly below. Nevertheless, we sometimes reach a block
                                   the first time through a dataflow node.  In this case we optimized the
@@ -200,7 +201,8 @@ static void merge_blocks(ir_node *node, void *ctx) {
  *
  * Must be run in the post walker.
  */
-static void remove_unreachable_blocks_and_conds(ir_node *block, void *env) {
+static void remove_unreachable_blocks_and_conds(ir_node *block, void *env)
+{
        int i;
        int *changed = env;
 
@@ -217,15 +219,20 @@ static void remove_unreachable_blocks_and_conds(ir_node *block, void *env) {
                                set_Block_dead(pred_bl);
                                exchange(pred_X, new_Bad());
                                *changed = 1;
-                       } else if (skipped != pred_X)
+                       } else if (skipped != pred_X) {
                                set_Block_cfgpred(block, i, skipped);
+                               *changed = 1;
+                       }
                }
        }
 
        *changed |= remove_senseless_conds(block);
 
-       /* clear the block mark of all blocks */
-       set_Block_mark(block, 0);
+       /* clear the block mark of all non labeled blocks */
+       if (has_Block_entity(block))
+               set_Block_non_removable(block);
+       else
+               set_Block_removable(block);
 }
 
 /**
@@ -235,13 +242,14 @@ static void remove_unreachable_blocks_and_conds(ir_node *block, void *env) {
  * Links all Proj nodes to their predecessors.
  * Collects all switch-Conds in a list.
  */
-static void collect_nodes(ir_node *n, void *ctx) {
+static void collect_nodes(ir_node *n, void *ctx)
+{
        ir_opcode code = get_irn_opcode(n);
        merge_env *env = ctx;
 
        if (code == iro_Block) {
                /* mark the block as non-removable if it is labeled */
-               if (has_Block_label(n))
+               if (has_Block_entity(n))
                        set_Block_non_removable(n);
        } else {
                ir_node *b = get_nodes_block(n);
@@ -270,7 +278,8 @@ static void collect_nodes(ir_node *n, void *ctx) {
 }
 
 /** Returns true if pred is predecessor of block. */
-static int is_pred_of(ir_node *pred, ir_node *b) {
+static int is_pred_of(ir_node *pred, ir_node *b)
+{
        int i;
 
        for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
@@ -308,7 +317,8 @@ static int is_pred_of(ir_node *pred, ir_node *b) {
  *  To perform the test for pos, we must regard predecessors before pos
  *  as already removed.
  **/
-static int test_whether_dispensable(ir_node *b, int pos) {
+static int test_whether_dispensable(ir_node *b, int pos)
+{
        int i, j, n_preds = 1;
        ir_node *pred = get_Block_cfgpred_block(b, pos);
 
@@ -407,9 +417,10 @@ non_dispensable:
  * @@@ It is negotiable whether we should do this ... there might end up a copy
  * from the Phi in the loop when removing the Phis.
  */
-static void optimize_blocks(ir_node *b, void *ctx) {
+static void optimize_blocks(ir_node *b, void *ctx)
+{
        int i, j, k, n, max_preds, n_preds, p_preds = -1;
-       ir_node *pred, *phi;
+       ir_node *pred, *phi, *next;
        ir_node **in;
        merge_env *env = ctx;
 
@@ -419,21 +430,21 @@ static void optimize_blocks(ir_node *b, void *ctx) {
        for (i = 0, k = get_Block_n_cfgpreds(b); i < k; ++i) {
                max_preds += test_whether_dispensable(b, i);
        }
-       in = xmalloc(max_preds * sizeof(*in));
+       in = XMALLOCN(ir_node*, max_preds);
 
        /*- Fix the Phi nodes of the current block -*/
-       for (phi = get_irn_link(b); phi; ) {
-               assert(get_irn_op(phi) == op_Phi);
+       for (phi = get_irn_link(b); phi != NULL; phi = next) {
+               assert(is_Phi(phi));
+               next = get_irn_link(phi);
 
                /* Find the new predecessors for the Phi */
                p_preds = 0;
                for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
                        pred = get_Block_cfgpred_block(b, i);
 
-                       if (is_Bad(get_Block_cfgpred(b, i))) {
+                       if (is_Block_dead(pred)) {
                                /* case Phi 1: Do nothing */
-                       }
-                       else if (is_Block_removable(pred) && Block_not_block_visited(pred)) {
+                       } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
                                /* case Phi 2: It's an empty block and not yet visited. */
                                ir_node *phi_pred = get_Phi_pred(phi, i);
 
@@ -443,7 +454,7 @@ static void optimize_blocks(ir_node *b, void *ctx) {
                                        if (! is_Bad(get_Block_cfgpred(pred, j))) {
                                                if (get_nodes_block(phi_pred) == pred) {
                                                        /* case Phi 2a: */
-                                                       assert(get_irn_op(phi_pred) == op_Phi);  /* Block is empty!! */
+                                                       assert(is_Phi(phi_pred));  /* Block is empty!! */
 
                                                        in[p_preds++] = get_Phi_pred(phi_pred, j);
                                                } else {
@@ -466,8 +477,6 @@ static void optimize_blocks(ir_node *b, void *ctx) {
                else
                        set_irn_in(phi, p_preds, in);
                env->changed = 1;
-
-               phi = get_irn_link(phi);
        }
 
        /*- This happens only if merge between loop backedge and single loop entry.
@@ -476,7 +485,7 @@ static void optimize_blocks(ir_node *b, void *ctx) {
        for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
                ir_node *predb = get_nodes_block(get_Block_cfgpred(b, k));
 
-               if (is_Block_removable(predb) && Block_not_block_visited(predb)) {
+               if (is_Block_removable(predb) && !Block_block_visited(predb)) {
                        ir_node *next_phi;
 
                        /* we found a predecessor block at position k that will be removed */
@@ -501,9 +510,9 @@ static void optimize_blocks(ir_node *b, void *ctx) {
                                        for (i = 0; i < k; i++) {
                                                pred = get_Block_cfgpred_block(b, i);
 
-                                               if (is_Bad(pred)) {
+                                               if (is_Block_dead(pred)) {
                                                        /* Do nothing */
-                                               } else if (is_Block_removable(pred) && Block_not_block_visited(pred)) {
+                                               } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
                                                        /* It's an empty block and not yet visited. */
                                                        for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
                                                                if (! is_Bad(get_Block_cfgpred(pred, j)))
@@ -523,11 +532,11 @@ static void optimize_blocks(ir_node *b, void *ctx) {
 
                                        /* and now all the rest */
                                        for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
-                                               pred = get_nodes_block(get_Block_cfgpred(b, i));
+                                               pred = get_Block_cfgpred_block(b, i);
 
-                                               if (is_Bad(get_Block_cfgpred(b, i))) {
+                                               if (is_Block_dead(pred)) {
                                                        /* Do nothing */
-                                               } else if (is_Block_removable(pred) && Block_not_block_visited(pred)) {
+                                               } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
                                                        /* It's an empty block and not yet visited. */
                                                        for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
                                                                if (! is_Bad(get_Block_cfgpred(pred, j)))
@@ -557,19 +566,19 @@ static void optimize_blocks(ir_node *b, void *ctx) {
        for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
                pred = get_Block_cfgpred_block(b, i);
 
-               if (is_Bad(pred)) {
+               if (is_Block_dead(pred)) {
                        /* case 1: Do nothing */
-               } else if (is_Block_removable(pred) && Block_not_block_visited(pred)) {
+               } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
                        /* case 2: It's an empty block and not yet visited. */
-                       assert(get_Block_n_cfgpreds(b) > 1);
+                       assert(get_Block_n_cfgpreds(b) > 1 || has_Block_entity(b));
                        /* Else it should be optimized by equivalent_node. */
                        for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
-                               ir_node *pred_block = get_Block_cfgpred(pred, j);
+                               ir_node *pred_X = get_Block_cfgpred(pred, j);
 
                                /* because of breaking loops, not all predecessors are Bad-clean,
                                 * so we must check this here again */
-                               if (! is_Bad(pred_block))
-                                       in[n_preds++] = pred_block;
+                               if (! is_Bad(pred_X))
+                                       in[n_preds++] = pred_X;
                        }
                        /* Remove block as it might be kept alive. */
                        exchange(pred, b/*new_Bad()*/);
@@ -591,7 +600,8 @@ static void optimize_blocks(ir_node *b, void *ctx) {
  * Block walker: optimize all blocks using the default optimizations.
  * This removes Blocks that with only a Jmp predecessor.
  */
-static void remove_simple_blocks(ir_node *block, void *ctx) {
+static void remove_simple_blocks(ir_node *block, void *ctx)
+{
        ir_node *new_blk = equivalent_node(block);
        merge_env *env = ctx;
 
@@ -613,7 +623,8 @@ static void remove_simple_blocks(ir_node *block, void *ctx) {
  *
  * Expects all Proj's linked to the cond node
  */
-static int handle_switch_cond(ir_node *cond) {
+static int handle_switch_cond(ir_node *cond)
+{
        ir_node *sel = get_Cond_selector(cond);
 
        ir_node *proj1 = get_irn_link(cond);
@@ -624,9 +635,9 @@ static int handle_switch_cond(ir_node *cond) {
 
        if (proj2 == NULL) {
                /* this Cond has only one Proj: must be the defProj */
-               assert(get_Cond_defaultProj(cond) == get_Proj_proj(proj1));
+               assert(get_Cond_default_proj(cond) == get_Proj_proj(proj1));
                /* convert it into a Jmp */
-               jmp = new_r_Jmp(current_ir_graph, blk);
+               jmp = new_r_Jmp(blk);
                exchange(proj1, jmp);
                return 1;
        } else if (get_irn_link(proj2) == NULL) {
@@ -637,12 +648,12 @@ static int handle_switch_cond(ir_node *cond) {
                if (tv != tarval_bad) {
                        /* we have a constant switch */
                        long num     = get_tarval_long(tv);
-                       long def_num = get_Cond_defaultProj(cond);
+                       long def_num = get_Cond_default_proj(cond);
 
                        if (def_num == get_Proj_proj(proj1)) {
                                /* first one is the defProj */
                                if (num == get_Proj_proj(proj2)) {
-                                       jmp = new_r_Jmp(current_ir_graph, blk);
+                                       jmp = new_r_Jmp(blk);
                                        exchange(proj2, jmp);
                                        exchange(proj1, new_Bad());
                                        return 1;
@@ -650,7 +661,7 @@ static int handle_switch_cond(ir_node *cond) {
                        } else if (def_num == get_Proj_proj(proj2)) {
                                /* second one is the defProj */
                                if (num == get_Proj_proj(proj1)) {
-                                       jmp = new_r_Jmp(current_ir_graph, blk);
+                                       jmp = new_r_Jmp(blk);
                                        exchange(proj1, jmp);
                                        exchange(proj2, new_Bad());
                                        return 1;
@@ -658,12 +669,12 @@ static int handle_switch_cond(ir_node *cond) {
                        } else {
                                /* neither: strange, Cond was not optimized so far */
                                if (num == get_Proj_proj(proj1)) {
-                                       jmp = new_r_Jmp(current_ir_graph, blk);
+                                       jmp = new_r_Jmp(blk);
                                        exchange(proj1, jmp);
                                        exchange(proj2, new_Bad());
                                        return 1;
                                } else if (num == get_Proj_proj(proj2)) {
-                                       jmp = new_r_Jmp(current_ir_graph, blk);
+                                       jmp = new_r_Jmp(blk);
                                        exchange(proj2, jmp);
                                        exchange(proj1, new_Bad());
                                        return 1;
@@ -690,8 +701,9 @@ static int handle_switch_cond(ir_node *cond) {
  * We use the mark flag to mark removable blocks in the first
  * phase.
  */
-void optimize_cf(ir_graph *irg) {
-       int i, j, n;
+void optimize_cf(ir_graph *irg)
+{
+       int i, j, n, changed;
        ir_node **in = NULL;
        ir_node *cond, *end = get_irg_end(irg);
        ir_graph *rem = current_ir_graph;
@@ -710,7 +722,7 @@ void optimize_cf(ir_graph *irg) {
        edges_deactivate(irg);
 
        /* we use the mark flag to mark removable blocks */
-       set_using_block_mark(irg);
+       ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK);
 restart:
        env.changed    = 0;
        env.phis_moved = 0;
@@ -721,29 +733,34 @@ restart:
        irg_block_walk_graph(irg, NULL, remove_unreachable_blocks_and_conds, &env.changed);
 
        /* fix the keep-alives */
+       changed = 0;
        for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
                ir_node *ka = get_End_keepalive(end, i);
 
                if (is_Block(ka)) {
                        /* do NOT keep dead blocks */
-                       if (get_Block_dom_depth(ka) < 0) {
+                       if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
                                set_End_keepalive(end, i, new_Bad());
-                               env.changed = 1;
+                               changed = 1;
+                       }
+               } else {
+                       ir_node *block = get_nodes_block(ka);
+
+                       if (is_Bad(block) || is_Block_dead(block) || get_Block_dom_depth(block) < 0) {
+                               /* do NOT keep nodes in dead blocks */
+                               set_End_keepalive(end, i, new_Bad());
+                               changed = 1;
                        }
-               } else if (is_Block_dead(get_nodes_block(ka)) ||
-                          get_Block_dom_depth(get_nodes_block(ka)) < 0) {
-                       /* do NOT keep nodes in dead blocks */
-                       set_End_keepalive(end, i, new_Bad());
-                       env.changed = 1;
                }
        }
+       env.changed |= changed;
 
-       set_using_irn_link(irg);
+       ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
 
        env.list = plist_new();
        irg_walk(end, merge_blocks, collect_nodes, &env);
 
-       clear_using_irn_link(irg);
+       ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
 
        if (env.changed) {
                /* Handle graph state if was changed. */
@@ -751,6 +768,7 @@ restart:
                set_irg_doms_inconsistent(irg);
                set_irg_extblk_inconsistent(irg);
                set_irg_loopinfo_inconsistent(irg);
+               set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
                env.changed = 0;
        }
 
@@ -770,51 +788,39 @@ restart:
        /* Optimize the standard code. */
        env.changed = 0;
        assure_doms(irg);
-       irg_block_walk(get_irg_end_block(irg), optimize_blocks, remove_simple_blocks, &env);
-
-       /* Walk all keep alives, optimize them if block, add to new in-array
-          for end if useful. */
-       n  = get_End_n_keepalives(end);
-       if (n > 0)
-               NEW_ARR_A(ir_node *, in, n);
+       irg_block_walk_graph(irg, optimize_blocks, remove_simple_blocks, &env);
 
        /* in rare cases a node may be kept alive more than once, use the visited flag to detect this */
+       ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
        inc_irg_visited(irg);
-       set_using_irn_visited(irg);
 
-       /* fix the keep alive */
-       for (i = j = 0; i < n; i++) {
+       /* fix the keep-alives again */
+       changed = 0;
+       for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
                ir_node *ka = get_End_keepalive(end, i);
 
-               if (irn_not_visited(ka)) {
-                       ir_op *op = get_irn_op(ka);
-
-                       if ((op == op_Block) && Block_not_block_visited(ka)) {
-                               /* irg_block_walk() will increase the block visited flag, but we must visit only
-                                  these blocks that are not visited yet, so decrease it first. */
-                               set_irg_block_visited(irg, get_irg_block_visited(irg) - 1);
-                               irg_block_walk(ka, optimize_blocks, remove_simple_blocks, &env.changed);
-                               mark_irn_visited(ka);
-                               in[j++] = ka;
-                       } else if (op == op_Phi) {
-                               mark_irn_visited(ka);
-                               /* don't keep alive dead blocks */
-                               if (! is_Block_dead(get_nodes_block(ka)))
-                                       in[j++] = ka;
-                       } else if (is_op_keep(op)) {
-                               mark_irn_visited(ka);
-                               if (! is_Block_dead(get_nodes_block(ka)))
-                                       in[j++] = ka;
+               if (is_Block(ka)) {
+                       /* do NOT keep dead blocks */
+                       if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
+                               set_End_keepalive(end, i, new_Bad());
+                               changed = 1;
+                       }
+               } else {
+                       ir_node *block = get_nodes_block(ka);
+
+                       if (is_Bad(block) || is_Block_dead(block) || get_Block_dom_depth(block) < 0) {
+                               /* do NOT keep nodes in dead blocks */
+                               set_End_keepalive(end, i, new_Bad());
+                               changed = 1;
                        }
                }
        }
-       if (j != n) {
-               set_End_keepalives(end, j, in);
-               env.changed = 1;
-       }
+       env.changed |= changed;
 
-       clear_using_block_mark(irg);
-       clear_using_irn_visited(irg);
+       remove_End_Bads_and_doublets(end);
+
+
+       ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_VISITED);
 
        if (env.phis_moved) {
                /* Bad: when we moved Phi's, we might produce dead Phi nodes
@@ -823,6 +829,7 @@ restart:
                 */
                n = get_End_n_keepalives(end);
                if (n > 0) {
+                       NEW_ARR_A(ir_node *, in, n);
                        if (env.changed) {
                                /* Handle graph state if was changed. */
                                set_irg_outs_inconsistent(irg);
@@ -861,6 +868,7 @@ restart:
                set_irg_doms_inconsistent(irg);
                set_irg_extblk_inconsistent(irg);
                set_irg_loopinfo_inconsistent(irg);
+               set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
        }
 
 
@@ -876,3 +884,9 @@ restart:
 
        current_ir_graph = rem;
 }
+
+/* Creates an ir_graph pass for optimize_cf. */
+ir_graph_pass_t *optimize_cf_pass(const char *name)
+{
+       return def_graph_pass(name ? name : "optimize_cf", optimize_cf);
+}  /* optimize_cf_pass */