Add pass constructor C99 feature removed.
[libfirm] / ir / opt / cfopt.c
index 64c60c1..e2a658a 100644 (file)
@@ -50,6 +50,7 @@
 
 #include "irflag_t.h"
 #include "firmstat.h"
+#include "irpass.h"
 
 #include "iropt_dbg.h"
 
@@ -85,7 +86,8 @@
  * Note that the simple case that Block has only these two
  * predecessors are already handled in equivalent_node_Block().
  */
-static int remove_senseless_conds(ir_node *bl) {
+static int remove_senseless_conds(ir_node *bl)
+{
        int i, j;
        int n = get_Block_n_cfgpreds(bl);
        int changed = 0;
@@ -102,7 +104,7 @@ static int remove_senseless_conds(ir_node *bl) {
                                ir_node *cond_j = skip_Proj(pred_j);
 
                                if (cond_j == cond_i) {
-                                       ir_node *jmp = new_r_Jmp(current_ir_graph, get_nodes_block(cond_i));
+                                       ir_node *jmp = new_r_Jmp(get_nodes_block(cond_i));
                                        set_irn_n(bl, i, jmp);
                                        set_irn_n(bl, j, new_Bad());
 
@@ -130,7 +132,8 @@ typedef struct _merge_env {
  * Therefore we also optimize at control flow operations, depending
  * how we first reach the Block.
  */
-static void merge_blocks(ir_node *node, void *ctx) {
+static void merge_blocks(ir_node *node, void *ctx)
+{
        int i;
        ir_node *new_block;
        merge_env *env = ctx;
@@ -198,7 +201,8 @@ static void merge_blocks(ir_node *node, void *ctx) {
  *
  * Must be run in the post walker.
  */
-static void remove_unreachable_blocks_and_conds(ir_node *block, void *env) {
+static void remove_unreachable_blocks_and_conds(ir_node *block, void *env)
+{
        int i;
        int *changed = env;
 
@@ -224,8 +228,11 @@ static void remove_unreachable_blocks_and_conds(ir_node *block, void *env) {
 
        *changed |= remove_senseless_conds(block);
 
-       /* clear the block mark of all blocks */
-       set_Block_removable(block);
+       /* clear the block mark of all non labeled blocks */
+       if (has_Block_entity(block))
+               set_Block_non_removable(block);
+       else
+               set_Block_removable(block);
 }
 
 /**
@@ -235,13 +242,14 @@ static void remove_unreachable_blocks_and_conds(ir_node *block, void *env) {
  * Links all Proj nodes to their predecessors.
  * Collects all switch-Conds in a list.
  */
-static void collect_nodes(ir_node *n, void *ctx) {
+static void collect_nodes(ir_node *n, void *ctx)
+{
        ir_opcode code = get_irn_opcode(n);
        merge_env *env = ctx;
 
        if (code == iro_Block) {
                /* mark the block as non-removable if it is labeled */
-               if (has_Block_label(n))
+               if (has_Block_entity(n))
                        set_Block_non_removable(n);
        } else {
                ir_node *b = get_nodes_block(n);
@@ -270,7 +278,8 @@ static void collect_nodes(ir_node *n, void *ctx) {
 }
 
 /** Returns true if pred is predecessor of block. */
-static int is_pred_of(ir_node *pred, ir_node *b) {
+static int is_pred_of(ir_node *pred, ir_node *b)
+{
        int i;
 
        for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
@@ -308,7 +317,8 @@ static int is_pred_of(ir_node *pred, ir_node *b) {
  *  To perform the test for pos, we must regard predecessors before pos
  *  as already removed.
  **/
-static int test_whether_dispensable(ir_node *b, int pos) {
+static int test_whether_dispensable(ir_node *b, int pos)
+{
        int i, j, n_preds = 1;
        ir_node *pred = get_Block_cfgpred_block(b, pos);
 
@@ -407,7 +417,8 @@ non_dispensable:
  * @@@ It is negotiable whether we should do this ... there might end up a copy
  * from the Phi in the loop when removing the Phis.
  */
-static void optimize_blocks(ir_node *b, void *ctx) {
+static void optimize_blocks(ir_node *b, void *ctx)
+{
        int i, j, k, n, max_preds, n_preds, p_preds = -1;
        ir_node *pred, *phi, *next;
        ir_node **in;
@@ -559,7 +570,7 @@ static void optimize_blocks(ir_node *b, void *ctx) {
                        /* case 1: Do nothing */
                } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
                        /* case 2: It's an empty block and not yet visited. */
-                       assert(get_Block_n_cfgpreds(b) > 1);
+                       assert(get_Block_n_cfgpreds(b) > 1 || has_Block_entity(b));
                        /* Else it should be optimized by equivalent_node. */
                        for (j = 0; j < get_Block_n_cfgpreds(pred); j++) {
                                ir_node *pred_X = get_Block_cfgpred(pred, j);
@@ -589,7 +600,8 @@ static void optimize_blocks(ir_node *b, void *ctx) {
  * Block walker: optimize all blocks using the default optimizations.
  * This removes Blocks that with only a Jmp predecessor.
  */
-static void remove_simple_blocks(ir_node *block, void *ctx) {
+static void remove_simple_blocks(ir_node *block, void *ctx)
+{
        ir_node *new_blk = equivalent_node(block);
        merge_env *env = ctx;
 
@@ -611,7 +623,8 @@ static void remove_simple_blocks(ir_node *block, void *ctx) {
  *
  * Expects all Proj's linked to the cond node
  */
-static int handle_switch_cond(ir_node *cond) {
+static int handle_switch_cond(ir_node *cond)
+{
        ir_node *sel = get_Cond_selector(cond);
 
        ir_node *proj1 = get_irn_link(cond);
@@ -622,9 +635,9 @@ static int handle_switch_cond(ir_node *cond) {
 
        if (proj2 == NULL) {
                /* this Cond has only one Proj: must be the defProj */
-               assert(get_Cond_defaultProj(cond) == get_Proj_proj(proj1));
+               assert(get_Cond_default_proj(cond) == get_Proj_proj(proj1));
                /* convert it into a Jmp */
-               jmp = new_r_Jmp(current_ir_graph, blk);
+               jmp = new_r_Jmp(blk);
                exchange(proj1, jmp);
                return 1;
        } else if (get_irn_link(proj2) == NULL) {
@@ -635,12 +648,12 @@ static int handle_switch_cond(ir_node *cond) {
                if (tv != tarval_bad) {
                        /* we have a constant switch */
                        long num     = get_tarval_long(tv);
-                       long def_num = get_Cond_defaultProj(cond);
+                       long def_num = get_Cond_default_proj(cond);
 
                        if (def_num == get_Proj_proj(proj1)) {
                                /* first one is the defProj */
                                if (num == get_Proj_proj(proj2)) {
-                                       jmp = new_r_Jmp(current_ir_graph, blk);
+                                       jmp = new_r_Jmp(blk);
                                        exchange(proj2, jmp);
                                        exchange(proj1, new_Bad());
                                        return 1;
@@ -648,7 +661,7 @@ static int handle_switch_cond(ir_node *cond) {
                        } else if (def_num == get_Proj_proj(proj2)) {
                                /* second one is the defProj */
                                if (num == get_Proj_proj(proj1)) {
-                                       jmp = new_r_Jmp(current_ir_graph, blk);
+                                       jmp = new_r_Jmp(blk);
                                        exchange(proj1, jmp);
                                        exchange(proj2, new_Bad());
                                        return 1;
@@ -656,12 +669,12 @@ static int handle_switch_cond(ir_node *cond) {
                        } else {
                                /* neither: strange, Cond was not optimized so far */
                                if (num == get_Proj_proj(proj1)) {
-                                       jmp = new_r_Jmp(current_ir_graph, blk);
+                                       jmp = new_r_Jmp(blk);
                                        exchange(proj1, jmp);
                                        exchange(proj2, new_Bad());
                                        return 1;
                                } else if (num == get_Proj_proj(proj2)) {
-                                       jmp = new_r_Jmp(current_ir_graph, blk);
+                                       jmp = new_r_Jmp(blk);
                                        exchange(proj2, jmp);
                                        exchange(proj1, new_Bad());
                                        return 1;
@@ -688,8 +701,9 @@ static int handle_switch_cond(ir_node *cond) {
  * We use the mark flag to mark removable blocks in the first
  * phase.
  */
-void optimize_cf(ir_graph *irg) {
-       int i, j, n;
+void optimize_cf(ir_graph *irg)
+{
+       int i, j, n, changed;
        ir_node **in = NULL;
        ir_node *cond, *end = get_irg_end(irg);
        ir_graph *rem = current_ir_graph;
@@ -719,6 +733,7 @@ restart:
        irg_block_walk_graph(irg, NULL, remove_unreachable_blocks_and_conds, &env.changed);
 
        /* fix the keep-alives */
+       changed = 0;
        for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
                ir_node *ka = get_End_keepalive(end, i);
 
@@ -726,15 +741,19 @@ restart:
                        /* do NOT keep dead blocks */
                        if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
                                set_End_keepalive(end, i, new_Bad());
-                               env.changed = 1;
+                               changed = 1;
+                       }
+               } else {
+                       ir_node *block = get_nodes_block(ka);
+
+                       if (is_Bad(block) || is_Block_dead(block) || get_Block_dom_depth(block) < 0) {
+                               /* do NOT keep nodes in dead blocks */
+                               set_End_keepalive(end, i, new_Bad());
+                               changed = 1;
                        }
-               } else if (is_Block_dead(get_nodes_block(ka)) ||
-                          get_Block_dom_depth(get_nodes_block(ka)) < 0) {
-                       /* do NOT keep nodes in dead blocks */
-                       set_End_keepalive(end, i, new_Bad());
-                       env.changed = 1;
                }
        }
+       env.changed |= changed;
 
        ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
 
@@ -749,6 +768,7 @@ restart:
                set_irg_doms_inconsistent(irg);
                set_irg_extblk_inconsistent(irg);
                set_irg_loopinfo_inconsistent(irg);
+               set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
                env.changed = 0;
        }
 
@@ -768,44 +788,37 @@ restart:
        /* Optimize the standard code. */
        env.changed = 0;
        assure_doms(irg);
-       irg_block_walk(get_irg_end_block(irg), optimize_blocks, remove_simple_blocks, &env);
-
-       /* Walk all keep alives, optimize them if block, add to new in-array
-          for end if useful. */
-       n  = get_End_n_keepalives(end);
-       if (n > 0)
-               NEW_ARR_A(ir_node *, in, n);
+       irg_block_walk_graph(irg, optimize_blocks, remove_simple_blocks, &env);
 
        /* in rare cases a node may be kept alive more than once, use the visited flag to detect this */
        ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
        inc_irg_visited(irg);
 
-       /* fix the keep alive */
-       for (i = j = 0; i < n; i++) {
+       /* fix the keep-alives again */
+       changed = 0;
+       for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
                ir_node *ka = get_End_keepalive(end, i);
 
-               if (!irn_visited(ka)) {
-                       if (is_Block(ka)) {
-                               if (!Block_block_visited(ka)) {
-                                       /* irg_block_walk() will increase the block visited flag, but we must visit only
-                                          these blocks that are not visited yet, so decrease it first. */
-                                       set_irg_block_visited(irg, get_irg_block_visited(irg) - 1);
-                                       irg_block_walk(ka, optimize_blocks, remove_simple_blocks, &env.changed);
-                                       mark_irn_visited(ka);
-                                       in[j++] = ka;
-                               }
-                       } else {
-                               mark_irn_visited(ka);
-                               /* don't keep alive dead blocks */
-                               if (!is_Bad(ka) && !is_Block_dead(get_nodes_block(ka)))
-                                       in[j++] = ka;
+               if (is_Block(ka)) {
+                       /* do NOT keep dead blocks */
+                       if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
+                               set_End_keepalive(end, i, new_Bad());
+                               changed = 1;
+                       }
+               } else {
+                       ir_node *block = get_nodes_block(ka);
+
+                       if (is_Bad(block) || is_Block_dead(block) || get_Block_dom_depth(block) < 0) {
+                               /* do NOT keep nodes in dead blocks */
+                               set_End_keepalive(end, i, new_Bad());
+                               changed = 1;
                        }
                }
        }
-       if (j != n) {
-               set_End_keepalives(end, j, in);
-               env.changed = 1;
-       }
+       env.changed |= changed;
+
+       remove_End_Bads_and_doublets(end);
+
 
        ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_VISITED);
 
@@ -816,6 +829,7 @@ restart:
                 */
                n = get_End_n_keepalives(end);
                if (n > 0) {
+                       NEW_ARR_A(ir_node *, in, n);
                        if (env.changed) {
                                /* Handle graph state if was changed. */
                                set_irg_outs_inconsistent(irg);
@@ -854,6 +868,7 @@ restart:
                set_irg_doms_inconsistent(irg);
                set_irg_extblk_inconsistent(irg);
                set_irg_loopinfo_inconsistent(irg);
+               set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
        }
 
 
@@ -869,3 +884,9 @@ restart:
 
        current_ir_graph = rem;
 }
+
+/* Creates an ir_graph pass for optimize_cf. */
+ir_graph_pass_t *optimize_cf_pass(const char *name)
+{
+       return def_graph_pass(name ? name : "optimize_cf", optimize_cf);
+}  /* optimize_cf_pass */