Simplify handling of unreachable code
authorMatthias Braun <matze@braunis.de>
Thu, 21 Apr 2011 13:58:53 +0000 (15:58 +0200)
committerMatthias Braun <matze@braunis.de>
Thu, 28 Apr 2011 10:45:12 +0000 (12:45 +0200)
See http://www.libfirm.org/Unreachable_Code for details

18 files changed:
include/libfirm/irgraph.h
include/libfirm/irnode.h
ir/ana/irdom.c
ir/ir/ircons.c
ir/ir/irdump.c
ir/ir/irgopt.c
ir/ir/irnode.c
ir/ir/irnode_t.h
ir/ir/iropt.c
ir/ir/irtypes.h
ir/ir/irverify.c
ir/opt/cfopt.c
ir/opt/code_placement.c
ir/opt/combo.c
ir/opt/jumpthreading.c
ir/opt/ldstopt.c
ir/opt/reassoc.c
scripts/gen_ir.py

index 97ecbc0..c261d42 100644 (file)
@@ -520,6 +520,7 @@ typedef enum {
        IR_GRAPH_STATE_KEEP_MUX      = 1U << 0,  /**< should perform no further optimisations on Mux nodes */
        IR_GRAPH_STATE_ARCH_DEP      = 1U << 1,  /**< should not construct more nodes which irarch potentially breaks down */
        IR_GRAPH_STATE_BCONV_ALLOWED = 1U << 2,  /**< Conv(mode_b) to Iu is allowed as set command */
+       IR_GRAPH_STATE_BAD_BLOCK     = 1U << 3,  /**< a node may have Bad in its block input */
 } ir_graph_state_t;
 ENUM_BITSET(ir_graph_state_t)
 
index 1f657e4..99cc27d 100644 (file)
@@ -298,13 +298,6 @@ FIRM_API void set_Block_matured(ir_node *block, int matured);
 FIRM_API ir_visited_t get_Block_block_visited(const ir_node *block);
 FIRM_API void set_Block_block_visited(ir_node *block, ir_visited_t visit);
 
-/**
- * Marks a block as dead but do not replace it with a Bad node.
- * Dead blocks are removed in the con
- */
-FIRM_API ir_node *set_Block_dead(ir_node *block);
-FIRM_API int is_Block_dead(const ir_node *block);
-
 /* For this current_ir_graph must be set. */
 FIRM_API void mark_Block_block_visited(ir_node *node);
 FIRM_API int Block_block_visited(const ir_node *node);
index 677ddf0..7db7234 100644 (file)
@@ -471,9 +471,6 @@ static void count_and_init_blocks_pdom(ir_node *bl, void *env)
 {
        int *n_blocks = (int *) env;
 
-       if (is_Block_dead(bl))
-               return;
-
        (*n_blocks) ++;
 
        memset(get_pdom_info(bl), 0, sizeof(ir_dom_info));
@@ -517,9 +514,6 @@ static void init_tmp_dom_info(ir_node *bl, tmp_dom_info *parent,
        tmp_dom_info *tdi;
        int i;
 
-       if (is_Block_dead(bl))
-               return;
-
        assert(is_Block(bl));
        if (Block_block_visited(bl))
          return;
@@ -559,9 +553,6 @@ static void init_tmp_pdom_info(ir_node *bl, tmp_dom_info *parent,
        tmp_dom_info *tdi;
        int i;
 
-       if (is_Block_dead(bl))
-               return;
-
        assert(is_Block(bl));
        if (get_irg_block_visited(current_ir_graph) == get_Block_block_visited(bl))
          return;
@@ -640,8 +631,6 @@ inline static void dom_link(tmp_dom_info *v, tmp_dom_info *w)
 static void count_and_init_blocks_dom(ir_node *bl, void *env)
 {
        int *n_blocks = (int *) env;
-       if (is_Block_dead(bl))
-               return;
 
        (*n_blocks) ++;
 
@@ -715,7 +704,7 @@ void compute_doms(ir_graph *irg)
                                ir_node *pred = get_irn_n(end, j);
                                tmp_dom_info *u;
 
-                               if (!is_Block(pred) || get_Block_dom_pre_num(pred) == -1 || is_Block_dead(pred))
+                               if (!is_Block(pred) || get_Block_dom_pre_num(pred) == -1)
                                        continue;   /* control-dead */
 
                                u = dom_eval (&tdi_list[get_Block_dom_pre_num(pred)]);
index 070d5c0..f38994b 100644 (file)
@@ -501,7 +501,6 @@ ir_node *new_rd_immBlock(dbg_info *dbgi, ir_graph *irg)
        res = new_ir_node(dbgi, irg, NULL, op_Block, mode_BB, -1, NULL);
 
        set_Block_matured(res, 0);
-       res->attr.block.is_dead     = 0;
        res->attr.block.irg.irg     = irg;
        res->attr.block.backedge    = NULL;
        res->attr.block.in_cg       = NULL;
index d1c5eb6..f27bb1d 100644 (file)
@@ -700,8 +700,6 @@ void dump_node_opcode(FILE *F, ir_node *n)
                fprintf(F, "%s", get_irn_opname(n));
                break;
        case iro_Block:
-               if (is_Block_dead(n))
-                       fputs("Dead ", F);
                if (n == get_irg_start_block(get_irn_irg(n)))
                        fputs("Start ", F);
                if (n == get_irg_end_block(get_irn_irg(n)))
@@ -1098,10 +1096,7 @@ static void dump_node_vcgattr(FILE *F, ir_node *node, ir_node *local, int bad)
                print_vcg_color(F, ird_color_error);
                break;
        case iro_Block:
-               if (is_Block_dead(n))
-                       print_vcg_color(F, ird_color_dead_block_background);
-               else
-                       print_vcg_color(F, ird_color_block_background);
+               print_vcg_color(F, ird_color_block_background);
                break;
        case iro_Phi:
                print_vcg_color(F, ird_color_phi);
@@ -1559,8 +1554,6 @@ static void dump_whole_block(FILE *F, ir_node *block)
        /* colorize blocks */
        if (! get_Block_matured(block))
                color = ird_color_block_background;
-       if (is_Block_dead(block))
-               color = ird_color_dead_block_background;
 
        fprintf(F, "\" status:clustered ");
        print_vcg_color(F, color);
index 9c06123..499269e 100644 (file)
@@ -112,7 +112,8 @@ static void kill_dead_blocks(ir_node *block, void *env)
                 * Note that the new dominance code correctly handles
                 * the End block, i.e. it is always reachable from Start
                 */
-               set_Block_dead(block);
+               ir_graph *irg = get_irn_irg(block);
+               exchange(block, get_irg_bad(irg));
        }
 }
 
@@ -122,9 +123,6 @@ void local_optimize_graph(ir_graph *irg)
        ir_graph *rem = current_ir_graph;
        current_ir_graph = irg;
 
-       if (get_irg_dom_state(irg) == dom_consistent)
-               irg_block_walk_graph(irg, NULL, kill_dead_blocks, NULL);
-
        do_local_optimize(get_irg_end(irg));
 
        current_ir_graph = rem;
@@ -186,6 +184,7 @@ int optimize_graph_df(ir_graph *irg)
 
        if (get_opt_global_cse())
                set_irg_pinned(irg, op_pin_state_floats);
+       set_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK);
 
        /* Clean the value_table in irg for the CSE. */
        new_identities(irg);
@@ -226,6 +225,8 @@ int optimize_graph_df(ir_graph *irg)
        end  = get_irg_end(irg);
        remove_End_Bads_and_doublets(end);
 
+       clear_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK);
+
        current_ir_graph = rem;
        return changed;
 }
index b9c72ba..08acb59 100644 (file)
@@ -586,16 +586,6 @@ int (Block_block_visited)(const ir_node *node)
        return _Block_block_visited(node);
 }
 
-ir_node *(set_Block_dead)(ir_node *block)
-{
-       return _set_Block_dead(block);
-}
-
-int (is_Block_dead)(const ir_node *block)
-{
-       return _is_Block_dead(block);
-}
-
 ir_extblk *get_Block_extbb(const ir_node *block)
 {
        ir_extblk *res;
index aa70bbd..ed0a3c9 100644 (file)
@@ -420,29 +420,6 @@ static inline int _Block_block_visited(const ir_node *node)
        return node->attr.block.block_visited >= get_irg_block_visited(irg);
 }
 
-static inline ir_node *_set_Block_dead(ir_node *block)
-{
-       assert(_get_irn_op(block) == op_Block);
-       block->attr.block.dom.dom_depth = -1;
-       block->attr.block.is_dead = 1;
-       return block;
-}
-
-static inline int _is_Block_dead(const ir_node *block)
-{
-       ir_op *op = _get_irn_op(block);
-
-       /* we can have Bad, Anchor and Block nodes as block input */
-       if (op == op_Bad) {
-               return 1;
-       } else if (op == op_Anchor) {
-               return 0;
-       } else {
-               assert(op == op_Block);
-               return block->attr.block.is_dead;
-       }
-}
-
 static inline ir_graph *_get_Block_irg(const ir_node *block)
 {
        assert(is_Block(block));
@@ -575,6 +552,7 @@ static inline ir_node *_get_Phi_next(const ir_node *phi)
 /** Add a Phi node to the list of Block Phi's. */
 static inline void _add_Block_phi(ir_node *block, ir_node *phi)
 {
+       assert(_is_Block(block));
        _set_Phi_next(phi, _get_Block_phis(block));
        _set_Block_phis(block, phi);
 }
@@ -640,8 +618,6 @@ void init_irnode(void);
 #define set_Block_block_visited(node, visit)  _set_Block_block_visited(node, visit)
 #define mark_Block_block_visited(node)        _mark_Block_block_visited(node)
 #define Block_block_visited(node)             _Block_block_visited(node)
-#define set_Block_dead(block)                 _set_Block_dead(block)
-#define is_Block_dead(block)                  _is_Block_dead(block)
 #define get_Block_irg(block)                  _get_Block_irg(block)
 #define get_Const_tarval(node)                _get_Const_tarval(node)
 #define is_Const_null(node)                   _is_Const_null(node)
index f6b38c8..ac017a3 100644 (file)
@@ -672,10 +672,6 @@ static ir_op_ops *firm_set_default_computed_value(ir_opcode code, ir_op_ops *ops
  *
  * If all predecessors of a block are bad or lies in a dead
  * block, the current block is dead as well.
- *
- * Note, that blocks are NEVER turned into Bad's, instead
- * the dead_block flag is set. So, never test for is_Bad(block),
- * always use is_dead_Block(block).
  */
 static ir_node *equivalent_node_Block(ir_node *n)
 {
@@ -684,7 +680,7 @@ static ir_node *equivalent_node_Block(ir_node *n)
        ir_graph *irg;
 
        /* don't optimize dead or labeled blocks */
-       if (is_Block_dead(n) || has_Block_entity(n))
+       if (has_Block_entity(n))
                return n;
 
        n_preds = get_Block_n_cfgpreds(n);
@@ -696,31 +692,14 @@ static ir_node *equivalent_node_Block(ir_node *n)
        irg = get_irn_irg(n);
 
        /* Straightening: a single entry Block following a single exit Block
-          can be merged, if it is not the Start block. */
-       /* !!! Beware, all Phi-nodes of n must have been optimized away.
-          This should be true, as the block is matured before optimize is called.
-          But what about Phi-cycles with the Phi0/Id that could not be resolved?
-          Remaining Phi nodes are just Ids. */
+        * can be merged. */
        if (n_preds == 1) {
-               ir_node *pred = skip_Proj(get_Block_cfgpred(n, 0));
+               ir_node *pred = get_Block_cfgpred(n, 0);
 
                if (is_Jmp(pred)) {
-                       ir_node *predblock = get_nodes_block(pred);
-                       if (predblock == oldn) {
-                               /* Jmp jumps into the block it is in -- deal self cycle. */
-                               n = set_Block_dead(n);
-                               DBG_OPT_DEAD_BLOCK(oldn, n);
-                       } else {
-                               n = predblock;
-                               DBG_OPT_STG(oldn, n);
-                       }
-               } else if (is_Cond(pred)) {
-                       ir_node *predblock = get_nodes_block(pred);
-                       if (predblock == oldn) {
-                               /* Jmp jumps into the block it is in -- deal self cycle. */
-                               n = set_Block_dead(n);
-                               DBG_OPT_DEAD_BLOCK(oldn, n);
-                       }
+                       ir_node *pred_block = get_nodes_block(pred);
+                       DBG_OPT_STG(n, pred_block);
+                       return pred_block;
                }
        } else if (n_preds == 2) {
                /* Test whether Cond jumps twice to this block
@@ -735,64 +714,33 @@ static ir_node *equivalent_node_Block(ir_node *n)
 
                    if (cond == get_Proj_pred(b) && is_Cond(cond) &&
                        get_irn_mode(get_Cond_selector(cond)) == mode_b) {
-                               /* Also a single entry Block following a single exit Block.  Phis have
-                                  twice the same operand and will be optimized away. */
+                               /* Also a single entry Block following a single exit Block.
+                                * Phis have twice the same operand and will be optimized away.
+                                */
                                n = get_nodes_block(cond);
                                DBG_OPT_IFSIM1(oldn, a, b, n);
                        }
                }
-       } else if (get_opt_unreachable_code() &&
-                  (n != get_irg_start_block(irg)) &&
-                  (n != get_irg_end_block(irg))) {
+       } else if (is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK)) {
                int i;
+               int n_cfgpreds = get_Block_n_cfgpreds(n);
 
-               /* If all inputs are dead, this block is dead too, except if it is
-                  the start or end block.  This is one step of unreachable code
-                  elimination */
-               for (i = get_Block_n_cfgpreds(n) - 1; i >= 0; --i) {
+               for (i = 0; i < n_cfgpreds; ++i) {
                        ir_node *pred = get_Block_cfgpred(n, i);
-                       ir_node *pred_blk;
-
-                       if (is_Bad(pred)) continue;
-                       pred_blk = get_nodes_block(skip_Proj(pred));
-
-                       if (is_Block_dead(pred_blk)) continue;
-
-                       if (pred_blk != n) {
-                               /* really found a living input */
+                       if (!is_Bad(pred))
                                break;
-                       }
                }
-               if (i < 0) {
-                       n = set_Block_dead(n);
-                       DBG_OPT_DEAD_BLOCK(oldn, n);
+               /* only bad unreachable inputs? It's unreachable code (unless it is the
+                * start or end block) */
+               if (i >= n_cfgpreds && n != get_irg_start_block(irg)
+                   && n != get_irg_end_block(irg)) {
+                   return get_irg_bad(irg);
                }
        }
 
        return n;
 }  /* equivalent_node_Block */
 
-/**
- * Returns a equivalent node for a Jmp, a Bad :-)
- * Of course this only happens if the Block of the Jmp is dead.
- */
-static ir_node *equivalent_node_Jmp(ir_node *n)
-{
-       ir_node *oldn = n;
-
-       /* unreachable code elimination */
-       if (is_Block_dead(get_nodes_block(n))) {
-               ir_graph *irg = get_irn_irg(n);
-               n = get_irg_bad(irg);
-               DBG_OPT_DEAD_BLOCK(oldn, n);
-       }
-       return n;
-}  /* equivalent_node_Jmp */
-
-/** Raise is handled in the same way as Jmp. */
-#define equivalent_node_Raise   equivalent_node_Jmp
-
-
 /* We do not evaluate Cond here as we replace it by a new node, a Jmp.
    See transform_node_Proj_Cond(). */
 
@@ -1309,31 +1257,18 @@ static ir_node *equivalent_node_Phi(ir_node *n)
        n_preds = get_Phi_n_preds(n);
 
        block = get_nodes_block(n);
-       /* Control dead */
-       if (is_Block_dead(block)) {
-               ir_graph *irg = get_irn_irg(n);
-               return get_irg_bad(irg);
-       }
 
-       if (n_preds == 0) return n;           /* Phi of dead Region without predecessors. */
+       /* Phi of dead Region without predecessors. */
+       if (n_preds == 0)
+               return n;
 
        /* Find first non-self-referencing input */
        for (i = 0; i < n_preds; ++i) {
                first_val = get_Phi_pred(n, i);
-               if (   (first_val != n)                            /* not self pointer */
-#if 0
-                   /* BEWARE: when the if is changed to 1, Phis will ignore their Bad
-                    * predecessors. Then, Phi nodes in unreachable code might be removed,
-                    * causing nodes pointing to themselev (Adds for instance).
-                    * This is really bad and causes endless recursion on several
-                    * code pathes, so we do NOT optimize such code.
-                    * This is not that bad as it sounds, optimize_cf() removes bad control flow
-                    * (and bad Phi predecessors), so live code is optimized later.
-                    */
-                       && (! is_Bad(get_Block_cfgpred(block, i)))
-#endif
-                  ) {        /* value not dead */
-                       break;          /* then found first value. */
+               /* not self pointer */
+               if (first_val != n) {
+                       /* then found first value. */
+                       break;
                }
        }
 
@@ -1347,13 +1282,7 @@ static ir_node *equivalent_node_Phi(ir_node *n)
        are non-self-referencing */
        while (++i < n_preds) {
                ir_node *scnd_val = get_Phi_pred(n, i);
-               if (   (scnd_val != n)
-                   && (scnd_val != first_val)
-#if 0
-                   /* see above */
-                   && (! is_Bad(get_Block_cfgpred(block, i)))
-#endif
-                       ) {
+               if (scnd_val != n && scnd_val != first_val) {
                        break;
                }
        }
@@ -1598,14 +1527,6 @@ static ir_node *equivalent_node_Proj_Store(ir_node *proj)
 static ir_node *equivalent_node_Proj(ir_node *proj)
 {
        ir_node *n = get_Proj_pred(proj);
-
-       if (get_irn_mode(proj) == mode_X) {
-               if (is_Block_dead(get_nodes_block(n))) {
-                       /* Remove dead control flow -- early gigo(). */
-                       ir_graph *irg = get_irn_irg(proj);
-                       return get_irg_bad(irg);
-               }
-       }
        if (n->op->ops.equivalent_node_Proj)
                return n->op->ops.equivalent_node_Proj(proj);
        return proj;
@@ -1785,8 +1706,6 @@ static ir_op_ops *firm_set_default_equivalent_node(ir_opcode code, ir_op_ops *op
 
        switch (code) {
        CASE(Block);
-       CASE(Jmp);
-       CASE(Raise);
        CASE(Eor);
        CASE(Add);
        CASE(Shl);
@@ -4566,14 +4485,24 @@ static ir_node *transform_node_Proj(ir_node *proj)
        return proj;
 }  /* transform_node_Proj */
 
-/**
- * Move Confirms down through Phi nodes.
- */
 static ir_node *transform_node_Phi(ir_node *phi)
 {
-       int i, n;
-       ir_mode *mode = get_irn_mode(phi);
+       int       n     = get_irn_arity(phi);
+       ir_mode  *mode  = get_irn_mode(phi);
+       ir_node  *block = get_nodes_block(phi);
+       ir_graph *irg   = get_irn_irg(phi);
+       ir_node  *bad   = get_irg_bad(irg);
+       int       i;
 
+       /* Set phi-operands for bad-block inputs to bad */
+       for (i = 0; i < n; ++i) {
+               ir_node *pred = get_Block_cfgpred(block, i);
+               if (!is_Bad(pred))
+                       continue;
+               set_irn_n(phi, i, bad);
+       }
+
+       /* Move Confirms down through Phi nodes. */
        if (mode_is_reference(mode)) {
                n = get_irn_arity(phi);
 
@@ -5385,14 +5314,7 @@ static ir_node *transform_node_End(ir_node *n)
 
        for (i = j = 0; i < n_keepalives; ++i) {
                ir_node *ka = get_End_keepalive(n, i);
-               if (is_Block(ka)) {
-                       if (! is_Block_dead(ka)) {
-                               in[j++] = ka;
-                       }
-                       continue;
-               } else if (is_irn_pinned_in_irg(ka) && is_Block_dead(get_nodes_block(ka))) {
-                       continue;
-               } else if (is_Bad(ka)) {
+               if (is_Bad(ka)) {
                        /* no need to keep Bad */
                        continue;
                }
@@ -5714,7 +5636,7 @@ static ir_node *transform_node_Load(ir_node *n)
                        ir_node  *block = get_nodes_block(n);
                        ir_node  *jmp   = new_r_Jmp(block);
                        ir_graph *irg   = get_irn_irg(n);
-                       ir_node  *bad   = new_r_Bad(irg);
+                       ir_node  *bad   = get_irg_bad(irg);
                        ir_mode  *mode  = get_Load_mode(n);
                        ir_node  *res   = new_r_Proj(pred_load, mode, pn_Load_res);
                        ir_node  *in[pn_Load_max] = { mem, jmp, bad, res };
@@ -5735,7 +5657,7 @@ static ir_node *transform_node_Load(ir_node *n)
                        ir_node  *block = get_nodes_block(n);
                        ir_node  *jmp   = new_r_Jmp(block);
                        ir_graph *irg   = get_irn_irg(n);
-                       ir_node  *bad   = new_r_Bad(irg);
+                       ir_node  *bad   = get_irg_bad(irg);
                        ir_node  *res   = value;
                        ir_node  *in[pn_Load_max] = { mem, jmp, bad, res };
                        ir_node  *tuple = new_r_Tuple(block, ARRAY_SIZE(in), in);
@@ -6350,91 +6272,37 @@ void visit_all_identities(ir_graph *irg, irg_walk_func visit, void *env)
  */
 static ir_node *gigo(ir_node *node)
 {
-       int i, irn_arity;
        ir_op *op = get_irn_op(node);
 
-       /* remove garbage blocks by looking at control flow that leaves the block
-          and replacing the control flow by Bad. */
-       if (get_irn_mode(node) == mode_X) {
-               ir_node  *block = get_nodes_block(skip_Proj(node));
-               ir_graph *irg   = get_irn_irg(block);
-
-               /* Don't optimize nodes in immature blocks. */
-               if (!get_Block_matured(block))
-                       return node;
-               /* Don't optimize End, may have Bads. */
-               if (op == op_End) return node;
-
-               if (is_Block(block)) {
-                       if (is_Block_dead(block)) {
-                               /* control flow from dead block is dead */
-                               return get_irg_bad(irg);
-                       }
-
-                       for (i = get_irn_arity(block) - 1; i >= 0; --i) {
-                               if (!is_Bad(get_irn_n(block, i)))
-                                       break;
-                       }
-                       if (i < 0) {
-                               ir_graph *irg = get_irn_irg(block);
-                               /* the start block is never dead */
-                               if (block != get_irg_start_block(irg)
-                                       && block != get_irg_end_block(irg)) {
-                                       /*
-                                        * Do NOT kill control flow without setting
-                                        * the block to dead of bad things can happen:
-                                        * We get a Block that is not reachable be irg_block_walk()
-                                        * but can be found by irg_walk()!
-                                        */
-                                       set_Block_dead(block);
-                                       return get_irg_bad(irg);
-                               }
-                       }
-               }
+       /* Nodes in bad blocks are bad.
+        * Beware: we can only read the block of a non-floating node. */
+       if (op != op_Block && is_irn_pinned_in_irg(node)
+           && is_Bad(get_nodes_block(node))) {
+           ir_graph *irg = get_irn_irg(node);
+               return get_irg_bad(irg);
        }
 
        /* Blocks, Phis and Tuples may have dead inputs, e.g., if one of the
           blocks predecessors is dead. */
-       if (op != op_Block && op != op_Phi && op != op_Tuple && op != op_Anchor) {
+       if (op != op_Block && op != op_Phi && op != op_Tuple && op != op_Anchor
+                       && op != op_Sync && op != op_End) {
                ir_graph *irg = get_irn_irg(node);
-               irn_arity = get_irn_arity(node);
-
-               /*
-                * Beware: we can only read the block of a non-floating node.
-                */
-               if (is_irn_pinned_in_irg(node) &&
-                       is_Block_dead(get_nodes_block(skip_Proj(node))))
-                       return get_irg_bad(irg);
+               int irn_arity = get_irn_arity(node);
+               int i;
 
                for (i = 0; i < irn_arity; i++) {
                        ir_node *pred = get_irn_n(node, i);
 
-                       if (is_Bad(pred))
-                               return get_irg_bad(irg);
-#if 0
-                       /* Propagating Unknowns here seems to be a bad idea, because
-                          sometimes we need a node as a input and did not want that
-                          it kills its user.
-                          However, it might be useful to move this into a later phase
-                          (if you think that optimizing such code is useful). */
-                       if (is_Unknown(pred) && mode_is_data(get_irn_mode(node)))
-                               return new_r_Unknown(irg, get_irn_mode(node));
-#endif
-               }
-       }
-#if 0
-       /* With this code we violate the agreement that local_optimize
-          only leaves Bads in Block, Phi and Tuple nodes. */
-       /* If Block has only Bads as predecessors it's garbage. */
-       /* If Phi has only Bads as predecessors it's garbage. */
-       if ((op == op_Block && get_Block_matured(node)) || op == op_Phi)  {
-               irn_arity = get_irn_arity(node);
-               for (i = 0; i < irn_arity; i++) {
-                       if (!is_Bad(get_irn_n(node, i))) break;
+                       if (is_Bad(pred)) {
+                               /* be careful not to kill cfopts too early or we might violate
+                                * the 1 cfop per block property */
+                               if (!is_cfop(node)
+                                               || is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK))
+                                       return get_irg_bad(irg);
+                       }
                }
-               if (i == irn_arity) node = get_irg_bad(irg);
        }
-#endif
+
        return node;
 }  /* gigo */
 
index cd3bfcf..23e2cc0 100644 (file)
@@ -148,7 +148,6 @@ typedef struct block_attr {
        ir_visited_t block_visited; /**< For the walker that walks over all blocks. */
        /* Attributes private to construction: */
        unsigned is_matured:1;      /**< If set, all in-nodes of the block are fixed. */
-       unsigned is_dead:1;         /**< If set, the block is dead (and could be replace by a Bad. */
        unsigned marked:1;          /**< Can be set/unset to temporary mark a block. */
        ir_node **graph_arr;        /**< An array to store all parameters. */
        /* Attributes holding analyses information */
index 983b7e7..fa278fa 100644 (file)
@@ -1711,20 +1711,23 @@ static int check_dominance_for_node(ir_node *use)
                ir_node *bl = get_nodes_block(use);
 
                for (i = get_irn_arity(use) - 1; i >= 0; --i) {
-                       ir_node *def    = get_irn_n(use, i);
-                       ir_node *def_bl = get_nodes_block(def);
-                       ir_node *use_bl = bl;
+                       ir_node  *def    = get_irn_n(use, i);
+                       ir_node  *def_bl = get_nodes_block(def);
+                       ir_node  *use_bl = bl;
                        ir_graph *irg;
 
-                       /* ignore dead definition blocks, will be removed */
-                       if (is_Block_dead(def_bl) || get_Block_dom_depth(def_bl) == -1)
+                       /* we have no dominance relation for unreachable blocks, so we can't
+                        * check the dominance property there */
+                       if (!is_Block(def_bl) || get_Block_dom_depth(def_bl) == -1)
                                continue;
 
-                       if (is_Phi(use))
+                       if (is_Phi(use)) {
+                               if (is_Bad(def))
+                                       continue;
                                use_bl = get_Block_cfgpred_block(bl, i);
+                       }
 
-                       /* ignore dead use blocks, will be removed */
-                       if (is_Block_dead(use_bl) || get_Block_dom_depth(use_bl) == -1)
+                       if (!is_Block(use_bl) || get_Block_dom_depth(use_bl) == -1)
                                continue;
 
                        irg = get_irn_irg(use);
@@ -1744,7 +1747,6 @@ static int check_dominance_for_node(ir_node *use)
 /* Tests the modes of n and its predecessors. */
 int irn_verify_irg(ir_node *n, ir_graph *irg)
 {
-       int i;
        ir_op *op;
 
        if (!get_node_verification_mode())
@@ -1775,15 +1777,6 @@ int irn_verify_irg(ir_node *n, ir_graph *irg)
 
        op = get_irn_op(n);
 
-       /* We don't want to test nodes whose predecessors are Bad,
-          as we would have to special case that for each operation. */
-       if (op != op_Phi && op != op_Block) {
-               for (i = get_irn_arity(n) - 1; i >= 0; --i) {
-                       if (is_Bad(get_irn_n(n, i)))
-                               return 1;
-               }
-       }
-
        if (_get_op_pinned(op) >= op_pin_state_exc_pinned) {
                op_pin_state state = get_irn_pinned(n);
                ASSERT_AND_RET_DBG(
@@ -1791,6 +1784,21 @@ int irn_verify_irg(ir_node *n, ir_graph *irg)
                        state == op_pin_state_pinned,
                        "invalid pin state", 0,
                        ir_printf("node %+F", n));
+       } else if (!is_Block(n) && is_irn_pinned_in_irg(n)
+                  && !is_irg_state(irg, IR_GRAPH_STATE_BAD_BLOCK)) {
+               ASSERT_AND_RET_DBG(is_Block(get_nodes_block(n)) || is_Anchor(n),
+                               "block input is not a block", 0,
+                               ir_printf("node %+F", n));
+       }
+
+       /* We don't want to test nodes whose predecessors are Bad,
+          as we would have to special case that for each operation. */
+       if (op != op_Phi && op != op_Block) {
+               int i;
+               for (i = get_irn_arity(n) - 1; i >= 0; --i) {
+                       if (is_Bad(get_irn_n(n, i)))
+                               return 1;
+               }
        }
 
        if (op->ops.verify_node)
index d01d1a7..53f6aa3 100644 (file)
  * @brief   Control flow optimizations.
  * @author  Goetz Lindenmaier, Michael Beck, Sebastian Hack
  * @version $Id$
+ *
+ * Removes Bad control flow predecessors and empty blocks.  A block is empty
+ * if it contains only a Jmp node. Blocks can only be removed if they are not
+ * needed for the semantics of Phi nodes. Further, we NEVER remove labeled
+ * blocks (even if we could move the label).
  */
 #include "config.h"
 
 #include "iroptimize.h"
 
 #include <assert.h>
+#include <stdbool.h>
 
-#include "plist.h"
 #include "xmalloc.h"
 #include "irnode_t.h"
 #include "irgraph_t.h"
 
 #include "iropt_dbg.h"
 
-/*------------------------------------------------------------------*/
-/* Control flow optimization.                                       */
-/*                                                                  */
-/* Removes Bad control flow predecessors and empty blocks.  A block */
-/* is empty if it contains only a Jmp node.                         */
-/* Blocks can only be removed if they are not needed for the        */
-/* semantics of Phi nodes.                                          */
-/* Further, we NEVER remove labeled blocks (even if we could move   */
-/* the label.                                                       */
-/*------------------------------------------------------------------*/
-
-#define set_Block_removable(block)      set_Block_mark(block, 1)
-#define set_Block_non_removable(block)  set_Block_mark(block, 0)
-#define is_Block_removable(block)       (get_Block_mark(block) != 0)
-
-/**
- * Replace binary Conds that jumps twice into the same block
- * by a simple Jmp.
- * E.g.
- * @verbatim
- *               Cond                     Jmp  Bad
- *             /       \                   |   /
- *       ProjX True   ProjX False  ==>     |  /
- *             \       /                   | /
- *               Block                    Block
- * @endverbatim
- *
- * Such pattern are the result of if-conversion.
- *
- * Note that the simple case that Block has only these two
- * predecessors are already handled in equivalent_node_Block().
- */
-static int remove_senseless_conds(ir_node *bl)
-{
-       int i, j;
-       int n = get_Block_n_cfgpreds(bl);
-       int changed = 0;
-
-       for (i = 0; i < n; ++i) {
-               ir_node *pred_i = get_Block_cfgpred(bl, i);
-               ir_node *cond_i = skip_Proj(pred_i);
-
-               /* binary Cond */
-               if (is_Cond(cond_i) && get_irn_mode(get_Cond_selector(cond_i)) == mode_b) {
-
-                       for (j = i + 1; j < n; ++j) {
-                               ir_node *pred_j = get_Block_cfgpred(bl, j);
-                               ir_node *cond_j = skip_Proj(pred_j);
-
-                               if (cond_j == cond_i) {
-                                       ir_graph *irg = get_irn_irg(bl);
-                                       ir_node  *jmp = new_r_Jmp(get_nodes_block(cond_i));
-                                       set_irn_n(bl, i, jmp);
-                                       set_irn_n(bl, j, new_r_Bad(irg));
-
-                                       DBG_OPT_IFSIM2(cond_i, jmp);
-                                       changed = 1;
-                                       break;
-                               }
-                       }
-               }
-       }
-       return changed;
-}
-
 /** An environment for merge_blocks and collect nodes. */
 typedef struct merge_env {
-       int changed;    /**< Set if the graph was changed. */
-       int phis_moved; /**< Set if Phi nodes were moved. */
-       plist_t *list;  /**< Helper list for all found Switch Conds. */
+       bool      changed;      /**< Set if the graph was changed. */
+       bool      phis_moved;   /**< Set if Phi nodes were moved. */
+       ir_node **switch_conds; /**< Helper list for all found Switch Conds. */
 } merge_env;
 
-/**
- * Removes Tuples from Block control flow predecessors.
- * Optimizes blocks with equivalent_node().  This is tricky,
- * as we want to avoid nodes that have as block predecessor Bads.
- * Therefore we also optimize at control flow operations, depending
- * how we first reach the Block.
- */
-static void merge_blocks(ir_node *node, void *ctx)
+static void set_Block_removable(ir_node *block, bool removable)
 {
-       int i;
-       ir_node *new_block;
-       merge_env *env = (merge_env*)ctx;
-
-       /* clear the link field for ALL nodes first */
-       set_irn_link(node, NULL);
-
-       if (is_Block(node)) {
-               /* Remove Tuples */
-               for (i = get_Block_n_cfgpreds(node) - 1; i >= 0; --i) {
-                       ir_node *pred = get_Block_cfgpred(node, i);
-                       ir_node *skipped = skip_Tuple(pred);
-                       if (pred != skipped) {
-                               set_Block_cfgpred(node, i, skipped);
-                               env->changed = 1;
-                       }
-               }
-
-               /* see below */
-               new_block = equivalent_node(node);
-               if (new_block != node && ! is_Block_dead(new_block)) {
-                       exchange(node, new_block);
-                       env->changed = 1;
-               }
-
-       } else if (get_opt_optimize() && (get_irn_mode(node) == mode_X)) {
-               /* We will soon visit a block.  Optimize it before visiting! */
-               ir_node *b = get_nodes_block(skip_Proj(node));
-
-               if (!is_Block_dead(b)) {
-                       new_block = equivalent_node(b);
-
-                       while (!irn_visited(b) && !is_Block_dead(new_block) && new_block != b) {
-                               /* We would have to run gigo() if new is bad, so we
-                                  promote it directly below. Nevertheless, we sometimes reach a block
-                                  the first time through a dataflow node.  In this case we optimized the
-                                  block as such and have to promote the Bad here. */
-                               exchange(b, new_block);
-                               env->changed = 1;
-                               b = new_block;
-                               new_block = equivalent_node(b);
-                       }
-
-                       /* normally, we would create a Bad block here, but this must be
-                        * prevented, so just set its cf to Bad.
-                        */
-                       if (is_Block_dead(new_block)) {
-                               ir_graph *irg = get_irn_irg(node);
-                               exchange(node, new_r_Bad(irg));
-                               env->changed = 1;
-                       }
-               }
-       }
+       set_Block_mark(block, removable);
 }
 
-/**
- * Block walker removing control flow from dead block by
- * inspecting dominance info.
- * Do not replace blocks by Bad.  This optimization shall
- * ensure, that all Bad control flow predecessors are
- * removed, and no new other Bads are introduced.
- * Further removed useless Conds and clear the mark of all blocks.
- *
- * Must be run in the post walker.
- */
-static void remove_unreachable_blocks_and_conds(ir_node *block, void *env)
+static bool is_Block_removable(ir_node *block)
 {
-       int i;
-       int *changed = (int*)env;
-
-       /* Check block predecessors and turn control flow into bad.
-          Beware of Tuple, kill them. */
-       for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
-               ir_node *pred_X  = get_Block_cfgpred(block, i);
-               ir_node *skipped = skip_Tuple(pred_X);
-
-               if (! is_Bad(skipped)) {
-                       ir_node *pred_bl = get_nodes_block(skip_Proj(skipped));
-
-                       if (is_Block_dead(pred_bl) || (get_Block_dom_depth(pred_bl) < 0)) {
-                               ir_graph *irg = get_irn_irg(block);
-                               set_Block_dead(pred_bl);
-                               exchange(pred_X, new_r_Bad(irg));
-                               *changed = 1;
-                       } else if (skipped != pred_X) {
-                               set_Block_cfgpred(block, i, skipped);
-                               *changed = 1;
-                       }
-               }
-       }
-
-       *changed |= remove_senseless_conds(block);
+       return get_Block_mark(block);
+}
 
-       /* clear the block mark of all non labeled blocks */
-       if (has_Block_entity(block))
-               set_Block_non_removable(block);
-       else
-               set_Block_removable(block);
+static void clear_link(ir_node *node, void *ctx)
+{
+       (void) ctx;
+       set_irn_link(node, NULL);
 }
 
 /**
@@ -244,50 +91,45 @@ static void remove_unreachable_blocks_and_conds(ir_node *block, void *env)
  */
 static void collect_nodes(ir_node *n, void *ctx)
 {
-       unsigned   code = get_irn_opcode(n);
-       merge_env *env  = (merge_env*)ctx;
-
-       if (code == iro_Block) {
-               /* mark the block as non-removable if it is labeled */
-               if (has_Block_entity(n))
-                       set_Block_non_removable(n);
-       } else {
-               ir_node *b = get_nodes_block(n);
-
-               if (code == iro_Phi && get_irn_arity(n) > 0) {
-                       /* Collect Phi nodes to compact ins along with block's ins. */
-                       set_irn_link(n, get_irn_link(b));
-                       set_irn_link(b, n);
-               } else if (code != iro_Jmp && !is_Bad(b)) {  /* Check for non-empty block. */
-                       set_Block_non_removable(b);
-
-                       if (code == iro_Proj) {               /* link Proj nodes */
-                               ir_node *pred = get_Proj_pred(n);
-
-                               set_irn_link(n, get_irn_link(pred));
-                               set_irn_link(pred, n);
-                       } else if (code == iro_Cond) {
-                               ir_node *sel = get_Cond_selector(n);
-                               if (mode_is_int(get_irn_mode(sel))) {
-                                       /* found a switch-Cond, collect */
-                                       plist_insert_back(env->list, n);
-                               }
+       merge_env *env = (merge_env*)ctx;
+
+       if (is_Phi(n)) {
+               /* Collect Phi nodes to compact ins along with block's ins. */
+               ir_node *block = get_nodes_block(n);
+               set_irn_link(n, get_irn_link(block));
+               set_irn_link(block, n);
+       } else if (is_Block(n)) {
+               return;
+       } else if (!is_Jmp(n)) {  /* Check for non-empty block. */
+               ir_node *block = get_nodes_block(n);
+               set_Block_removable(block, false);
+
+               if (is_Proj(n)) {
+                       /* link Proj nodes */
+                       ir_node *pred = get_Proj_pred(n);
+                       set_irn_link(n, get_irn_link(pred));
+                       set_irn_link(pred, n);
+               } else if (is_Cond(n)) {
+                       ir_node *sel = get_Cond_selector(n);
+                       if (get_irn_mode(sel) != mode_b) {
+                               /* found a switch-Cond, collect */
+                               ARR_APP1(ir_node*, env->switch_conds, n);
                        }
                }
        }
 }
 
 /** Returns true if pred is predecessor of block. */
-static int is_pred_of(ir_node *pred, ir_node *b)
+static bool is_pred_of(ir_node *pred, ir_node *b)
 {
        int i;
 
        for (i = get_Block_n_cfgpreds(b) - 1; i >= 0; --i) {
                ir_node *b_pred = get_Block_cfgpred_block(b, i);
                if (b_pred == pred)
-                       return 1;
+                       return true;
        }
-       return 0;
+       return false;
 }
 
 /** Test whether we can optimize away pred block pos of b.
@@ -317,13 +159,13 @@ static int is_pred_of(ir_node *pred, ir_node *b)
  *  To perform the test for pos, we must regard predecessors before pos
  *  as already removed.
  **/
-static int test_whether_dispensable(ir_node *b, int pos)
+static unsigned test_whether_dispensable(ir_node *b, int pos)
 {
        int i, j, n_preds = 1;
        ir_node *pred = get_Block_cfgpred_block(b, pos);
 
        /* Bad blocks will be optimized away, so we don't need space for them */
-       if (is_Block_dead(pred))
+       if (is_Bad(pred))
                return 0;
 
        if (is_Block_removable(pred)) {
@@ -336,7 +178,7 @@ static int test_whether_dispensable(ir_node *b, int pos)
                           Handle all pred blocks with preds < pos as if they were already removed. */
                        for (i = 0; i < pos; i++) {
                                ir_node *b_pred = get_Block_cfgpred_block(b, i);
-                               if (! is_Block_dead(b_pred) && is_Block_removable(b_pred)) {
+                               if (! is_Bad(b_pred) && is_Block_removable(b_pred)) {
                                        for (j = get_Block_n_cfgpreds(b_pred) - 1; j >= 0; --j) {
                                                ir_node *b_pred_pred = get_Block_cfgpred_block(b_pred, j);
                                                if (is_pred_of(b_pred_pred, pred))
@@ -360,7 +202,7 @@ static int test_whether_dispensable(ir_node *b, int pos)
        return n_preds;
 
 non_dispensable:
-       set_Block_non_removable(pred);
+       set_Block_removable(pred, false);
        return 1;
 }
 
@@ -374,20 +216,22 @@ non_dispensable:
  * for all nodes, not regarding whether there is a possibility for optimization.
  *
  * For each predecessor p of a Block b there are three cases:
- *  -1. The predecessor p is a Bad node:  just skip it.  The in array of b shrinks by one.
- *  -2. The predecessor p is empty.  Remove p.  All predecessors of p are now
- *      predecessors of b.
- *  -3. The predecessor p is a block containing useful code.  Just keep p as is.
+ *  - The predecessor p is a Bad node: just skip it. The in array of b shrinks
+ *    by one.
+ *  - The predecessor p is empty. Remove p. All predecessors of p are now
+ *    predecessors of b.
+ *  - The predecessor p is a block containing useful code. Just keep p as is.
  *
  * For Phi nodes f we have to check the conditions at the Block of f.
  * For cases 1 and 3 we proceed as for Blocks.  For case 2 we can have two
  * cases:
- *  -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED.  In this
- *      case we proceed as for blocks. We remove pred_f.  All
- *      predecessors of pred_f now are predecessors of f.
- *  -2b: The old predecessor of f is NOT in the block removed. It might be a Phi, too.
- *      We have to replicate f for each predecessor of the removed block. Or, with
- *      other words, the removed predecessor block has exactly one predecessor.
+ *  -2a: The old predecessor of the Phi f is a Phi pred_f IN THE BLOCK REMOVED.
+ *       In this case we proceed as for blocks. We remove pred_f.  All
+ *       predecessors of pred_f now are predecessors of f.
+ *  -2b: The old predecessor of f is NOT in the block removed. It might be a Phi
+ *       too. We have to replicate f for each predecessor of the removed block.
+ *       Or, with other words, the removed predecessor block has exactly one
+ *       predecessor.
  *
  * Further there is a special case for self referencing blocks:
  * @verbatim
@@ -408,8 +252,6 @@ non_dispensable:
  * If there is a Phi in pred_b, but we remove pred_b, we have to generate a
  * Phi in loop_b, that has the ins of the Phi in pred_b and a self referencing
  * backedge.
- * @@@ It is negotiable whether we should do this ... there might end up a copy
- * from the Phi in the loop when removing the Phis.
  */
 static void optimize_blocks(ir_node *b, void *ctx)
 {
@@ -436,15 +278,15 @@ static void optimize_blocks(ir_node *b, void *ctx)
                for (i = 0, n = get_Block_n_cfgpreds(b); i < n; ++i) {
                        pred = get_Block_cfgpred_block(b, i);
 
-                       if (is_Block_dead(pred)) {
+                       if (is_Bad(pred)) {
                                /* case Phi 1: Do nothing */
                        } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
                                /* case Phi 2: It's an empty block and not yet visited. */
                                ir_node *phi_pred = get_Phi_pred(phi, i);
 
                                for (j = 0, k = get_Block_n_cfgpreds(pred); j < k; j++) {
-                                       /* because of breaking loops, not all predecessors are Bad-clean,
-                                        * so we must check this here again */
+                                       /* because of breaking loops, not all predecessors are
+                                        * Bad-clean, so we must check this here again */
                                        if (! is_Bad(get_Block_cfgpred(pred, j))) {
                                                if (get_nodes_block(phi_pred) == pred) {
                                                        /* case Phi 2a: */
@@ -470,15 +312,18 @@ static void optimize_blocks(ir_node *b, void *ctx)
                        exchange(phi, in[0]);
                else
                        set_irn_in(phi, p_preds, in);
-               env->changed = 1;
+               env->changed = true;
        }
 
        /*- This happens only if merge between loop backedge and single loop entry.
-           Moreover, it is only needed if predb is the direct dominator of b, else there can be no uses
-           of the Phi's in predb ... -*/
+           Moreover, it is only needed if predb is the direct dominator of b,
+           else there can be no uses of the Phi's in predb ... -*/
        for (k = 0, n = get_Block_n_cfgpreds(b); k < n; ++k) {
                ir_node *predb = get_nodes_block(get_Block_cfgpred(b, k));
 
+               if (is_Bad(predb))
+                       continue;
+
                if (is_Block_removable(predb) && !Block_block_visited(predb)) {
                        ir_node *next_phi;
 
@@ -492,20 +337,20 @@ static void optimize_blocks(ir_node *b, void *ctx)
                                if (get_Block_idom(b) != predb) {
                                        /* predb is not the dominator. There can't be uses of pred's Phi nodes, kill them .*/
                                        ir_graph *irg = get_irn_irg(b);
-                                       exchange(phi, new_r_Bad(irg));
+                                       exchange(phi, get_irg_bad(irg));
                                } else {
                                        /* predb is the direct dominator of b. There might be uses of the Phi nodes from
                                           predb in further block, so move this phi from the predecessor into the block b */
                                        set_nodes_block(phi, b);
                                        set_irn_link(phi, get_irn_link(b));
                                        set_irn_link(b, phi);
-                                       env->phis_moved = 1;
+                                       env->phis_moved = true;
 
                                        /* first, copy all 0..k-1 predecessors */
                                        for (i = 0; i < k; i++) {
                                                pred = get_Block_cfgpred_block(b, i);
 
-                                               if (is_Block_dead(pred)) {
+                                               if (is_Bad(pred)) {
                                                        /* Do nothing */
                                                } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
                                                        /* It's an empty block and not yet visited. */
@@ -529,7 +374,7 @@ static void optimize_blocks(ir_node *b, void *ctx)
                                        for (i = k+1; i < get_Block_n_cfgpreds(b); i++) {
                                                pred = get_Block_cfgpred_block(b, i);
 
-                                               if (is_Block_dead(pred)) {
+                                               if (is_Bad(pred)) {
                                                        /* Do nothing */
                                                } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
                                                        /* It's an empty block and not yet visited. */
@@ -547,7 +392,7 @@ static void optimize_blocks(ir_node *b, void *ctx)
                                                exchange(phi, in[0]);
                                        else
                                                set_irn_in(phi, q_preds, in);
-                                       env->changed = 1;
+                                       env->changed = true;
 
                                        assert(q_preds <= max_preds);
                                        // assert(p_preds == q_preds && "Wrong Phi Fix");
@@ -561,7 +406,7 @@ static void optimize_blocks(ir_node *b, void *ctx)
        for (i = 0; i < get_Block_n_cfgpreds(b); i++) {
                pred = get_Block_cfgpred_block(b, i);
 
-               if (is_Block_dead(pred)) {
+               if (is_Bad(pred)) {
                        /* case 1: Do nothing */
                } else if (is_Block_removable(pred) && !Block_block_visited(pred)) {
                        /* case 2: It's an empty block and not yet visited. */
@@ -576,7 +421,7 @@ static void optimize_blocks(ir_node *b, void *ctx)
                                        in[n_preds++] = pred_X;
                        }
                        /* Remove block as it might be kept alive. */
-                       exchange(pred, b/*new_r_Bad(irg)*/);
+                       exchange(pred, b/*get_irg_bad(irg)*/);
                } else {
                        /* case 3: */
                        in[n_preds++] = get_Block_cfgpred(b, i);
@@ -585,7 +430,7 @@ static void optimize_blocks(ir_node *b, void *ctx)
        assert(n_preds <= max_preds);
 
        set_irn_in(b, n_preds, in);
-       env->changed = 1;
+       env->changed = true;
 
        assert(get_irn_link(b) == NULL || p_preds == -1 || (n_preds == p_preds && "Wrong Phi Fix"));
        xfree(in);
@@ -593,7 +438,7 @@ static void optimize_blocks(ir_node *b, void *ctx)
 
 /**
  * Block walker: optimize all blocks using the default optimizations.
- * This removes Blocks that with only a Jmp predecessor.
+ * This removes Blocks with only a Jmp predecessor.
  */
 static void remove_simple_blocks(ir_node *block, void *ctx)
 {
@@ -602,42 +447,37 @@ static void remove_simple_blocks(ir_node *block, void *ctx)
 
        if (new_blk != block) {
                exchange(block, new_blk);
-               env->changed = 1;
+               env->changed = true;
        }
 }
 
 /**
- * Handle pre-optimized table switch Cond's.
- * During iropt, all Projs from a switch-Cond are already removed except
- * the defProj and maybe the taken one.
- * The defProj cannot be removed WITHOUT looking backwards, so we do this here.
+ * Optimize table-switch Conds.
  *
  * @param cond the switch-Cond
- *
- * @return non-zero if a switch-Cond was optimized
- *
- * Expects all Proj's linked to the cond node
+ * @return true if the switch-Cond was optimized
  */
-static int handle_switch_cond(ir_node *cond)
+static bool handle_switch_cond(ir_node *cond)
 {
-       ir_node *sel = get_Cond_selector(cond);
-
+       ir_node *sel   = get_Cond_selector(cond);
        ir_node *proj1 = (ir_node*)get_irn_link(cond);
        ir_node *proj2 = (ir_node*)get_irn_link(proj1);
-       ir_node *jmp, *blk;
-
-       blk = get_nodes_block(cond);
+       ir_node *blk   = get_nodes_block(cond);
 
+       /* exactly 1 Proj on the Cond node: must be the defaultProj */
        if (proj2 == NULL) {
-               /* this Cond has only one Proj: must be the defProj */
+               ir_node *jmp = new_r_Jmp(blk);
                assert(get_Cond_default_proj(cond) == get_Proj_proj(proj1));
                /* convert it into a Jmp */
-               jmp = new_r_Jmp(blk);
                exchange(proj1, jmp);
-               return 1;
-       } else if (get_irn_link(proj2) == NULL) {
-               /* We have two Proj's here. Check if the Cond has
-                  a constant argument */
+               return true;
+       }
+
+       /* handle Cond nodes with constant argument. In this case the localopt rules
+        * should have killed all obviously impossible cases.
+        * So the only case left to handle here is 1 defaultProj + 1case
+        * (this one case should be the one taken) */
+       if (get_irn_link(proj2) == NULL) {
                ir_tarval *tv = value_of(sel);
 
                if (tv != tarval_bad) {
@@ -645,40 +485,41 @@ static int handle_switch_cond(ir_node *cond)
                        long      num     = get_tarval_long(tv);
                        long      def_num = get_Cond_default_proj(cond);
                        ir_graph *irg     = get_irn_irg(cond);
+                       ir_node  *bad     = get_irg_bad(irg);
 
                        if (def_num == get_Proj_proj(proj1)) {
                                /* first one is the defProj */
                                if (num == get_Proj_proj(proj2)) {
-                                       jmp = new_r_Jmp(blk);
+                                       ir_node *jmp = new_r_Jmp(blk);
                                        exchange(proj2, jmp);
-                                       exchange(proj1, new_r_Bad(irg));
-                                       return 1;
+                                       exchange(proj1, bad);
+                                       return true;
                                }
                        } else if (def_num == get_Proj_proj(proj2)) {
                                /* second one is the defProj */
                                if (num == get_Proj_proj(proj1)) {
-                                       jmp = new_r_Jmp(blk);
+                                       ir_node *jmp = new_r_Jmp(blk);
                                        exchange(proj1, jmp);
-                                       exchange(proj2, new_r_Bad(irg));
-                                       return 1;
+                                       exchange(proj2, bad);
+                                       return true;
                                }
                        } else {
                                /* neither: strange, Cond was not optimized so far */
                                if (num == get_Proj_proj(proj1)) {
-                                       jmp = new_r_Jmp(blk);
+                                       ir_node *jmp = new_r_Jmp(blk);
                                        exchange(proj1, jmp);
-                                       exchange(proj2, new_r_Bad(irg));
-                                       return 1;
+                                       exchange(proj2, bad);
+                                       return true;
                                } else if (num == get_Proj_proj(proj2)) {
-                                       jmp = new_r_Jmp(blk);
+                                       ir_node *jmp = new_r_Jmp(blk);
                                        exchange(proj2, jmp);
-                                       exchange(proj1, new_r_Bad(irg));
-                                       return 1;
+                                       exchange(proj1, bad);
+                                       return true;
                                }
                        }
                }
        }
-       return 0;
+       return false;
 }
 
 /* Optimizations of the control flow that also require changes of Phi nodes.
@@ -691,18 +532,17 @@ static int handle_switch_cond(ir_node *cond)
  * computations, i.e., these blocks might be removed.
  *
  * The second pass performs the optimizations intended by this algorithm.
- * It walks only over block nodes and adapts these and the Phi nodes in these blocks,
- * which it finds in a linked list computed by the first pass.
+ * It walks only over block nodes and adapts these and the Phi nodes in these
+ * blocks, which it finds in a linked list computed by the first pass.
  *
- * We use the mark flag to mark removable blocks in the first
- * phase.
+ * We use the mark flag to mark removable blocks in the first phase.
  */
 void optimize_cf(ir_graph *irg)
 {
-       int i, j, n, changed;
+       int i, j, n;
        ir_node **in = NULL;
-       ir_node *cond, *end = get_irg_end(irg);
-       plist_element_t *el;
+       ir_node *end = get_irg_end(irg);
+       ir_node *new_end;
        merge_env env;
 
        assert(get_irg_phase_state(irg) != phase_building);
@@ -711,49 +551,28 @@ void optimize_cf(ir_graph *irg)
        assert(get_irg_pinned(irg) != op_pin_state_floats &&
               "Control flow optimization need a pinned graph");
 
-       /* FIXME: control flow opt destroys block edges. So edges are deactivated here. Fix the edges! */
+       /* FIXME: control flow opt destroys block edges. So edges are deactivated
+        * here. Fix the edges! */
        edges_deactivate(irg);
 
        /* we use the mark flag to mark removable blocks */
-       ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK);
+       ir_reserve_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK);
 restart:
-       env.changed    = 0;
-       env.phis_moved = 0;
+       env.changed    = false;
+       env.phis_moved = false;
 
-       /* ALWAYS kill unreachable control flow. Backend cannot handle it anyway.
-          Use dominator info to kill blocks. Also optimize useless Conds. */
        assure_doms(irg);
-       irg_block_walk_graph(irg, NULL, remove_unreachable_blocks_and_conds, &env.changed);
-
-       /* fix the keep-alives */
-       changed = 0;
-       for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
-               ir_node *ka = get_End_keepalive(end, i);
-
-               if (is_Block(ka)) {
-                       /* do NOT keep dead blocks */
-                       if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
-                               set_End_keepalive(end, i, new_r_Bad(irg));
-                               changed = 1;
-                       }
-               } else {
-                       ir_node *block = get_nodes_block(ka);
-
-                       if (is_Bad(block) || is_Block_dead(block) || get_Block_dom_depth(block) < 0) {
-                               /* do NOT keep nodes in dead blocks */
-                               set_End_keepalive(end, i, new_r_Bad(irg));
-                               changed = 1;
-                       }
-               }
-       }
-       env.changed |= changed;
-
-       ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
 
-       env.list = plist_new();
-       irg_walk(end, merge_blocks, collect_nodes, &env);
+       env.switch_conds = NEW_ARR_F(ir_node*, 0);
+       irg_walk(end, clear_link, collect_nodes, &env);
 
-       ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
+       /* handle all collected switch-Conds */
+       n = ARR_LEN(env.switch_conds);
+       for (i = 0; i < n; ++i) {
+               ir_node *cond = env.switch_conds[i];
+               env.changed |= handle_switch_cond(cond);
+       }
+       DEL_ARR_F(env.switch_conds);
 
        if (env.changed) {
                /* Handle graph state if was changed. */
@@ -762,58 +581,24 @@ restart:
                set_irg_extblk_inconsistent(irg);
                set_irg_loopinfo_inconsistent(irg);
                set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
-               env.changed = 0;
-       }
 
-       /* handle all collected switch-Conds */
-       foreach_plist(env.list, el) {
-               cond = (ir_node*)plist_element_get_value(el);
-               env.changed |= handle_switch_cond(cond);
-       }
-       plist_free(env.list);
-
-       if (env.changed) {
                /* The Cond optimization might generate unreachable code, so restart if
                   it happens. */
                goto restart;
        }
 
        /* Optimize the standard code. */
-       env.changed = 0;
        assure_doms(irg);
        irg_block_walk_graph(irg, optimize_blocks, remove_simple_blocks, &env);
 
-       /* in rare cases a node may be kept alive more than once, use the visited flag to detect this */
-       ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
-       inc_irg_visited(irg);
-
-       /* fix the keep-alives again */
-       changed = 0;
-       for (i = 0, n = get_End_n_keepalives(end); i < n; ++i) {
-               ir_node *ka = get_End_keepalive(end, i);
-
-               if (is_Block(ka)) {
-                       /* do NOT keep dead blocks */
-                       if (is_Block_dead(ka) || get_Block_dom_depth(ka) < 0) {
-                               set_End_keepalive(end, i, new_r_Bad(irg));
-                               changed = 1;
-                       }
-               } else {
-                       ir_node *block = get_nodes_block(ka);
-
-                       if (is_Bad(block) || is_Block_dead(block) || get_Block_dom_depth(block) < 0) {
-                               /* do NOT keep nodes in dead blocks */
-                               set_End_keepalive(end, i, new_r_Bad(irg));
-                               changed = 1;
-                       }
-               }
+       new_end = optimize_in_place(end);
+       if (new_end != end) {
+               set_irg_end(irg, new_end);
+               end = new_end;
        }
-       env.changed |= changed;
-
        remove_End_Bads_and_doublets(end);
 
-
-       ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_VISITED);
+       ir_free_resources(irg, IR_RESOURCE_BLOCK_MARK | IR_RESOURCE_IRN_LINK);
 
        if (env.phis_moved) {
                /* Bad: when we moved Phi's, we might produce dead Phi nodes
@@ -850,7 +635,7 @@ restart:
                        }
                        if (j != n) {
                                set_End_keepalives(end, j, in);
-                               env.changed = 1;
+                               env.changed = true;
                        }
                }
        }
@@ -864,7 +649,6 @@ restart:
                set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
        }
 
-
        /* the verifier doesn't work yet with floating nodes */
        if (get_irg_pinned(irg) == op_pin_state_pinned) {
                /* after optimize_cf(), only Bad data flow may remain. */
@@ -879,4 +663,4 @@ restart:
 ir_graph_pass_t *optimize_cf_pass(const char *name)
 {
        return def_graph_pass(name ? name : "optimize_cf", optimize_cf);
-}  /* optimize_cf_pass */
+}
index 75bb384..5ca8d40 100644 (file)
 #include "irgopt.h"
 #include "irpass.h"
 
-/**
- * Returns non-zero, is a block is not reachable from Start.
- *
- * @param block  the block to test
- */
 static int is_Block_unreachable(ir_node *block)
 {
-       return is_Block_dead(block) || get_Block_dom_depth(block) < 0;
+       return get_Block_dom_depth(block) < 0;
 }
 
 /**
@@ -125,8 +120,7 @@ static void place_floats_early(ir_node *n, waitq *worklist)
                           place_early() has already been finished on them.
                           We do not have any unfinished inputs! */
                        pred_block = get_nodes_block(pred);
-                       if ((!is_Block_dead(pred_block)) &&
-                               (get_Block_dom_depth(pred_block) > depth)) {
+                       if (get_Block_dom_depth(pred_block) > depth) {
                                b = pred_block;
                                depth = get_Block_dom_depth(pred_block);
                        }
@@ -262,12 +256,9 @@ static ir_node *calc_dom_dca(ir_node *dca, ir_node *block)
 {
        assert(block != NULL);
 
-       /* we do not want to place nodes in dead blocks */
-       if (is_Block_dead(block))
-               return dca;
-
        /* We found a first legal placement. */
-       if (!dca) return block;
+       if (!dca)
+               return block;
 
        /* Find a placement that is dominates both, dca and block. */
        while (get_Block_dom_depth(block) > get_Block_dom_depth(dca))
@@ -302,6 +293,8 @@ static ir_node *consumer_dom_dca(ir_node *dca, ir_node *consumer, ir_node *produ
                for (i = 0;  i < arity; i++) {
                        if (get_Phi_pred(consumer, i) == producer) {
                                ir_node *new_block = get_Block_cfgpred_block(phi_block, i);
+                               if (is_Bad(new_block))
+                                       continue;
 
                                if (!is_Block_unreachable(new_block))
                                        dca = calc_dom_dca(dca, new_block);
@@ -435,6 +428,7 @@ static void place_floats_late(ir_node *n, pdeq *worklist)
        if (!is_Block(n) &&
            (!is_cfop(n)) &&
            (get_irn_mode(n) != mode_X)) {
+           ir_op *op;
                /* Remember the early_blk placement of this block to move it
                   out of loop no further than the early_blk placement. */
                early_blk = get_nodes_block(n);
@@ -459,27 +453,25 @@ static void place_floats_late(ir_node *n, pdeq *worklist)
                                place_floats_late(succ, worklist);
                }
 
-               if (! is_Block_dead(early_blk)) {
-                       /* do only move things that where not dead */
-                       ir_op *op = get_irn_op(n);
-
-                       /* We have to determine the final block of this node... except for
-                          constants and Projs */
-                       if ((get_irn_pinned(n) == op_pin_state_floats) &&
-                           (op != op_Const)    &&
-                           (op != op_SymConst) &&
-                           (op != op_Proj))
-                       {
-                               /* deepest common ancestor in the dominator tree of all nodes'
-                                  blocks depending on us; our final placement has to dominate
-                                  DCA. */
-                               ir_node *dca = get_deepest_common_dom_ancestor(n, NULL);
-                               if (dca != NULL) {
-                                       set_nodes_block(n, dca);
-                                       move_out_of_loops(n, early_blk);
-                                       if (get_irn_mode(n) == mode_T) {
-                                               set_projs_block(n, get_nodes_block(n));
-                                       }
+               /* do only move things that where not dead */
+               op = get_irn_op(n);
+
+               /* We have to determine the final block of this node... except for
+                  constants and Projs */
+               if ((get_irn_pinned(n) == op_pin_state_floats) &&
+                       (op != op_Const)    &&
+                       (op != op_SymConst) &&
+                       (op != op_Proj))
+               {
+                       /* deepest common ancestor in the dominator tree of all nodes'
+                          blocks depending on us; our final placement has to dominate
+                          DCA. */
+                       ir_node *dca = get_deepest_common_dom_ancestor(n, NULL);
+                       if (dca != NULL) {
+                               set_nodes_block(n, dca);
+                               move_out_of_loops(n, early_blk);
+                               if (get_irn_mode(n) == mode_T) {
+                                       set_projs_block(n, get_nodes_block(n));
                                }
                        }
                }
index 94c116b..d1e3e0e 100644 (file)
@@ -780,7 +780,8 @@ static void init_block_phis(ir_node *irn, void *ctx)
        (void) ctx;
 
        if (is_Phi(irn)) {
-               add_Block_phi(get_nodes_block(irn), irn);
+               ir_node *block = get_nodes_block(irn);
+               add_Block_phi(block, irn);
        }
 }  /* init_block_phis */
 
@@ -3001,18 +3002,21 @@ static void apply_cf(ir_node *block, void *ctx)
                        ir_node *pred = get_Block_cfgpred(block, i);
 
                        if (! is_Bad(pred)) {
-                               node_t *pred_bl = get_irn_node(get_nodes_block(skip_Proj(pred)));
-
-                               if (pred_bl->flagged == 0) {
-                                       pred_bl->flagged = 3;
-
-                                       if (pred_bl->type.tv == tarval_reachable) {
-                                               /*
-                                                * We will remove an edge from block to its pred.
-                                                * This might leave the pred block as an endless loop
-                                                */
-                                               if (! is_backedge(block, i))
-                                                       keep_alive(pred_bl->node);
+                               ir_node *pred_block = get_nodes_block(skip_Proj(pred));
+                               if (!is_Bad(pred_block)) {
+                                       node_t *pred_bl = get_irn_node(pred_block);
+
+                                       if (pred_bl->flagged == 0) {
+                                               pred_bl->flagged = 3;
+
+                                               if (pred_bl->type.tv == tarval_reachable) {
+                                                       /*
+                                                        * We will remove an edge from block to its pred.
+                                                        * This might leave the pred block as an endless loop
+                                                        */
+                                                       if (! is_backedge(block, i))
+                                                               keep_alive(pred_bl->node);
+                                               }
                                        }
                                }
                        }
@@ -3022,7 +3026,9 @@ static void apply_cf(ir_node *block, void *ctx)
                   finds out the opposite :-) */
                if (block != get_irg_end_block(current_ir_graph)) {
                        /* mark dead blocks */
-                       set_Block_dead(block);
+                       //set_Block_dead(block);
+                       //ir_graph *irg = get_irn_irg(block);
+                       //exchange(block, get_irg_bad(irg));
                        DB((dbg, LEVEL_1, "Removing dead %+F\n", block));
                } else {
                        /* the endblock is unreachable */
@@ -3057,18 +3063,21 @@ static void apply_cf(ir_node *block, void *ctx)
                } else {
                        DB((dbg, LEVEL_1, "Removing dead input %d from %+F (%+F)\n", i, block, pred));
                        if (! is_Bad(pred)) {
-                               node_t *pred_bl = get_irn_node(get_nodes_block(skip_Proj(pred)));
-
-                               if (pred_bl->flagged == 0) {
-                                       pred_bl->flagged = 3;
-
-                                       if (pred_bl->type.tv == tarval_reachable) {
-                                               /*
-                                                * We will remove an edge from block to its pred.
-                                                * This might leave the pred block as an endless loop
-                                                */
-                                               if (! is_backedge(block, i))
-                                                       keep_alive(pred_bl->node);
+                               ir_node *pred_block = get_nodes_block(skip_Proj(pred));
+                               if (!is_Bad(pred_block)) {
+                                       node_t *pred_bl = get_irn_node(pred_block);
+
+                                       if (!is_Bad(pred_bl->node) && pred_bl->flagged == 0) {
+                                               pred_bl->flagged = 3;
+
+                                               if (pred_bl->type.tv == tarval_reachable) {
+                                                       /*
+                                                        * We will remove an edge from block to its pred.
+                                                        * This might leave the pred block as an endless loop
+                                                        */
+                                                       if (! is_backedge(block, i))
+                                                               keep_alive(pred_bl->node);
+                                               }
                                        }
                                }
                        }
@@ -3397,13 +3406,22 @@ static void apply_end(ir_node *end, environment_t *env)
 
        /* fix the keep alive */
        for (i = j = 0; i < n; i++) {
-               ir_node *ka   = get_End_keepalive(end, i);
-               node_t  *node = get_irn_node(ka);
+               ir_node *ka = get_End_keepalive(end, i);
+               ir_node *block;
+               node_t  *node;
 
-               if (! is_Block(ka))
-                       node = get_irn_node(get_nodes_block(ka));
+               if (is_Bad(ka))
+                       continue;
+               if (!is_Block(ka)) {
+                       block = get_nodes_block(ka);
+                       if (is_Bad(block))
+                               continue;
+               } else {
+                       block = ka;
+               }
 
-               if (node->type.tv != tarval_unreachable && !is_Bad(ka))
+               node = get_irn_node(block);
+               if (node->type.tv != tarval_unreachable)
                        in[j++] = ka;
        }
        if (j != n) {
index fb0d5c1..0f519a9 100644 (file)
@@ -83,8 +83,8 @@ static ir_node *search_def_and_create_phis(ir_node *block, ir_mode *mode,
        ir_node **in;
        ir_node *dummy;
 
-       /* This is needed because we create bads sometimes */
-       if (is_Block_dead(block)) {
+       /* In case of a bad input to a block we need to return the bad value */
+       if (is_Bad(block)) {
                ir_graph *irg = get_irn_irg(block);
                return new_r_Bad(irg);
        }
@@ -762,10 +762,6 @@ void opt_jumpthreading(ir_graph* irg)
                set_irg_extblk_inconsistent(irg);
                set_irg_loopinfo_inconsistent(irg);
                set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
-
-               /* Dead code might be created. Optimize it away as it is dangerous
-                * to call optimize_df() an dead code. */
-               optimize_cf(irg);
        }
 }
 
index 5770288..9705517 100644 (file)
@@ -1509,10 +1509,6 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
 
        block = get_nodes_block(store);
 
-       /* abort on dead blocks */
-       if (is_Block_dead(block))
-               return 0;
-
        /* check if the block is post dominated by Phi-block
           and has no exception exit */
        bl_info = (block_info_t*)get_irn_link(block);
@@ -1548,10 +1544,7 @@ static unsigned optimize_phi(ir_node *phi, walk_env_t *wenv)
                if (exc != info->exc_block)
                        return 0;
 
-               /* abort on dead blocks */
                block = get_nodes_block(pred);
-               if (is_Block_dead(block))
-                       return 0;
 
                /* check if the block is post dominated by Phi-block
                   and has no exception exit. Note that block must be different from
index be6230c..ac9febd 100644 (file)
@@ -597,14 +597,6 @@ static void wq_walker(ir_node *n, void *env)
 
        set_irn_link(n, NULL);
        if (!is_Block(n)) {
-               ir_node *blk = get_nodes_block(n);
-
-               if (is_Block_dead(blk) || get_Block_dom_depth(blk) < 0) {
-                       /* We are in a dead block, do not optimize or we may fall into an endless
-                          loop. We check this here instead of requiring that all dead blocks are removed
-                          which or cf_opt do not guarantee yet. */
-                       return;
-               }
                waitq_put(wenv->wq, n);
                set_irn_link(n, wenv->wq);
        }
@@ -623,13 +615,6 @@ static void do_reassociation(walker_t *wenv)
                set_irn_link(n, NULL);
 
                blk = get_nodes_block(n);
-               if (is_Block_dead(blk) || get_Block_dom_depth(blk) < 0) {
-                       /* We are in a dead block, do not optimize or we may fall into an endless
-                          loop. We check this here instead of requiring that all dead blocks are removed
-                          which or cf_opt do not guarantee yet. */
-                       continue;
-               }
-
 
                hook_reassociate(1);
 
index 7ab1731..89e5541 100755 (executable)
@@ -59,7 +59,7 @@ def format_irgassign(node):
        if hasattr(node, "knownBlock"):
                return ""
        else:
-               return "ir_graph *irg = get_Block_irg(block);\n"
+               return "ir_graph *irg = get_irn_irg(block);\n"
 
 def format_curblock(node):
        if hasattr(node, "knownBlock"):