Better fix for the MacroBlock header
[libfirm] / ir / ir / irgopt.c
index 4a6790f..18eaccd 100644 (file)
@@ -371,12 +371,23 @@ static void copy_preds(ir_node *n, void *env) {
        nn = get_new_node(n);
 
        if (is_Block(n)) {
+               /* copy the macro block header */
+               ir_node *mbh = get_Block_MacroBlock(n);
+
+               if (mbh == n) {
+                       /* this block is a macroblock header */
+                       set_irn_n(nn, -1, nn);
+               } else {
+                       /* get the macro block header */
+                       set_irn_n(nn, -1, get_new_node(mbh));
+               }
+
                /* Don't copy Bad nodes. */
                j = 0;
                irn_arity = get_irn_arity(n);
                for (i = 0; i < irn_arity; i++) {
                        if (! is_Bad(get_irn_n(n, i))) {
-                               set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+                               set_irn_n(nn, j, get_new_node(get_irn_n(n, i)));
                                /*if (is_backedge(n, i)) set_backedge(nn, j);*/
                                j++;
                        }
@@ -401,11 +412,11 @@ static void copy_preds(ir_node *n, void *env) {
                                exchange(nn, old);
                        }
                }
-       } else if (get_irn_op(n) == op_Phi) {
+       } else if (is_Phi(n)) {
                /* Don't copy node if corresponding predecessor in block is Bad.
                   The Block itself should not be Bad. */
                block = get_nodes_block(n);
-               set_nodes_block(nn, get_new_node(block));
+               set_irn_n(nn, -1, get_new_node(block));
                j = 0;
                irn_arity = get_irn_arity(n);
                for (i = 0; i < irn_arity; i++) {
@@ -648,7 +659,7 @@ dead_node_elimination(ir_graph *irg) {
 
                /* Free memory from old unoptimized obstack */
                obstack_free(graveyard_obst, 0);  /* First empty the obstack ... */
-               xfree (graveyard_obst);           /* ... then free it.           */
+               xfree(graveyard_obst);            /* ... then free it.           */
 
                /* inform statistics that the run is over */
                hook_dead_node_elim(irg, 0);
@@ -1208,6 +1219,8 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        } else {
                set_Tuple_pred(call, pn_Call_T_result, new_Bad());
        }
+       /* handle the regular call */
+       set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
 
        /* For now, we cannot inline calls with value_base */
        set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
@@ -1228,7 +1241,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                        ir_node *ret, *irn;
                        ret = get_irn_n(end_bl, i);
                        irn = skip_Proj(ret);
-                       if (is_fragile_op(irn) || (get_irn_op(irn) == op_Raise)) {
+                       if (is_fragile_op(irn) || is_Raise(irn)) {
                                cf_pred[n_exc] = ret;
                                ++n_exc;
                        }
@@ -1258,7 +1271,6 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                        set_Tuple_pred(call, pn_Call_X_except, new_Bad());
                        set_Tuple_pred(call, pn_Call_M_except, new_Bad());
                }
-               set_Tuple_pred(call, pn_Call_X_regular, new_Bad());
        } else {
                ir_node *main_end_bl;
                int main_end_bl_arity;
@@ -1284,7 +1296,6 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                for (i = 0; i < n_exc; ++i)
                        end_preds[main_end_bl_arity + i] = cf_pred[i];
                set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
-               set_Tuple_pred(call, pn_Call_X_regular, new_Bad());
                set_Tuple_pred(call, pn_Call_X_except,  new_Bad());
                set_Tuple_pred(call, pn_Call_M_except,  new_Bad());
                free(end_preds);
@@ -1748,16 +1759,9 @@ place_floats_early(ir_node *n, waitq *worklist) {
        assert(irn_not_visited(n));
        mark_irn_visited(n);
 
-#ifndef CAN_PLACE_PROJS
-       while (is_Proj(n)) {
-               n = get_Proj_pred(n);
-               mark_irn_visited(n);
-       }
-#endif
-
        /* Place floating nodes. */
        if (get_irn_pinned(n) == op_pin_state_floats) {
-               ir_node *curr_block = get_nodes_block(n);
+               ir_node *curr_block = get_irn_n(n, -1);
                int in_dead_block   = is_Block_unreachable(curr_block);
                int depth           = 0;
                ir_node *b          = NULL;   /* The block to place this node in */
@@ -1789,7 +1793,7 @@ place_floats_early(ir_node *n, waitq *worklist) {
                                 */
                                if (! in_dead_block) {
                                        if (get_irn_pinned(pred) == op_pin_state_floats &&
-                                               is_Block_unreachable(get_nodes_block(pred)))
+                                               is_Block_unreachable(get_irn_n(pred, -1)))
                                                set_nodes_block(pred, curr_block);
                                }
                                place_floats_early(pred, worklist);
@@ -1805,14 +1809,14 @@ place_floats_early(ir_node *n, waitq *worklist) {
                        /* Because all loops contain at least one op_pin_state_pinned node, now all
                           our inputs are either op_pin_state_pinned or place_early() has already
                           been finished on them.  We do not have any unfinished inputs!  */
-                       pred_block = get_nodes_block(pred);
+                       pred_block = get_irn_n(pred, -1);
                        if ((!is_Block_dead(pred_block)) &&
                                (get_Block_dom_depth(pred_block) > depth)) {
                                b = pred_block;
                                depth = get_Block_dom_depth(pred_block);
                        }
                        /* Avoid that the node is placed in the Start block */
-                       if ((depth == 1) && (get_Block_dom_depth(get_nodes_block(n)) > 1)
+                       if ((depth == 1) && (get_Block_dom_depth(get_irn_n(n, -1)) > 1)
                                && get_irg_phase_state(current_ir_graph) != phase_backend) {
                                b = get_Block_cfg_out(get_irg_start_block(current_ir_graph), 0);
                                assert(b != get_irg_start_block(current_ir_graph));
@@ -1851,14 +1855,14 @@ place_floats_early(ir_node *n, waitq *worklist) {
                }
        } else if (is_Phi(n)) {
                ir_node *pred;
-               ir_node *curr_block = get_nodes_block(n);
+               ir_node *curr_block = get_irn_n(n, -1);
                int in_dead_block   = is_Block_unreachable(curr_block);
 
                /*
                 * Phi nodes: move nodes from dead blocks into the effective use
                 * of the Phi-input if the Phi is not in a bad block.
                 */
-               pred = get_nodes_block(n);
+               pred = get_irn_n(n, -1);
                if (irn_not_visited(pred))
                        waitq_put(worklist, pred);
 
@@ -1868,7 +1872,7 @@ place_floats_early(ir_node *n, waitq *worklist) {
                        if (irn_not_visited(pred)) {
                                if (! in_dead_block &&
                                        get_irn_pinned(pred) == op_pin_state_floats &&
-                                       is_Block_unreachable(get_nodes_block(pred))) {
+                                       is_Block_unreachable(get_irn_n(pred, -1))) {
                                        set_nodes_block(pred, get_Block_cfgpred_block(curr_block, i));
                                }
                                waitq_put(worklist, pred);
@@ -1876,13 +1880,13 @@ place_floats_early(ir_node *n, waitq *worklist) {
                }
        } else {
                ir_node *pred;
-               ir_node *curr_block = get_nodes_block(n);
+               ir_node *curr_block = get_irn_n(n, -1);
                int in_dead_block   = is_Block_unreachable(curr_block);
 
                /*
                 * All other nodes: move nodes from dead blocks into the same block.
                 */
-               pred = get_nodes_block(n);
+               pred = get_irn_n(n, -1);
                if (irn_not_visited(pred))
                        waitq_put(worklist, pred);
 
@@ -1892,7 +1896,7 @@ place_floats_early(ir_node *n, waitq *worklist) {
                        if (irn_not_visited(pred)) {
                                if (! in_dead_block &&
                                        get_irn_pinned(pred) == op_pin_state_floats &&
-                                       is_Block_unreachable(get_nodes_block(pred))) {
+                                       is_Block_unreachable(get_irn_n(pred, -1))) {
                                        set_nodes_block(pred, curr_block);
                                }
                                waitq_put(worklist, pred);
@@ -1982,7 +1986,7 @@ consumer_dom_dca(ir_node *dca, ir_node *consumer, ir_node *producer) {
                }
 
                if (! block)
-                       block = get_nodes_block(producer);
+                       block = get_irn_n(producer, -1);
        } else {
                assert(is_no_Block(consumer));
                block = get_nodes_block(consumer);
@@ -2056,7 +2060,7 @@ static ir_node *get_deepest_common_ancestor(ir_node *node, ir_node *dca)
                        dca = get_deepest_common_ancestor(succ, dca);
                } else {
                        /* ignore if succ is in dead code */
-                       succ_blk = get_nodes_block(succ);
+                       succ_blk = get_irn_n(succ, -1);
                        if (is_Block_unreachable(succ_blk))
                                continue;
                        dca = consumer_dom_dca(dca, succ, node);
@@ -2066,7 +2070,6 @@ static ir_node *get_deepest_common_ancestor(ir_node *node, ir_node *dca)
        return dca;
 }
 
-#ifdef CAN_PLACE_PROJS
 static void set_projs_block(ir_node *node, ir_node *block)
 {
        int i;
@@ -2082,7 +2085,6 @@ static void set_projs_block(ir_node *node, ir_node *block)
                set_nodes_block(succ, block);
        }
 }
-#endif
 
 /**
  * Find the latest legal block for N and place N into the
@@ -2110,7 +2112,7 @@ static void place_floats_late(ir_node *n, pdeq *worklist) {
            (get_irn_mode(n) != mode_X)) {
                /* Remember the early_blk placement of this block to move it
                   out of loop no further than the early_blk placement. */
-               early_blk = get_nodes_block(n);
+               early_blk = get_irn_n(n, -1);
 
                /*
                 * BEWARE: Here we also get code, that is live, but
@@ -2150,11 +2152,9 @@ static void place_floats_late(ir_node *n, pdeq *worklist) {
                                if (dca != NULL) {
                                        set_nodes_block(n, dca);
                                        move_out_of_loops(n, early_blk);
-#ifdef CAN_PLACE_PROJS
                                        if(get_irn_mode(n) == mode_T) {
                                                set_projs_block(n, get_nodes_block(n));
                                        }
-#endif
                                }
                        }
                }
@@ -2225,6 +2225,10 @@ void place_code(ir_graph *irg) {
        current_ir_graph = rem;
 }
 
+typedef struct cf_env {
+       char changed;       /**< flag indicates that the cf graphs has changed. */
+} cf_env;
+
 /**
  * Called by walker of remove_critical_cf_edges().
  *
@@ -2232,12 +2236,12 @@ void place_code(ir_graph *irg) {
  * predecessors and a block of multiple successors.
  *
  * @param n   IR node
- * @param env Environment of walker. The changed field.
+ * @param env Environment of walker.
  */
 static void walk_critical_cf_edges(ir_node *n, void *env) {
        int arity, i;
        ir_node *pre, *block, *jmp;
-       int *changed = env;
+       cf_env *cenv = env;
        ir_graph *irg = get_irn_irg(n);
 
        /* Block has multiple predecessors */
@@ -2251,26 +2255,34 @@ static void walk_critical_cf_edges(ir_node *n, void *env) {
 
                        pre = get_irn_n(n, i);
                        cfop = get_irn_op(skip_Proj(pre));
-                       /* Predecessor has multiple successors. Insert new control flow edge but
-                          ignore exception edges. */
-                       if (! is_op_fragile(cfop) && is_op_forking(cfop)) {
+
+                       if (is_op_fragile(cfop)) {
+                               if (cfop != op_Raise)
+                                       goto insert;
+                               continue;
+                       }
+                       if (is_op_forking(cfop)) {
+                               /* Predecessor has multiple successors. Insert new control flow edge edges. */
+insert:
                                /* set predecessor of new block */
                                block = new_r_Block(irg, 1, &pre);
                                /* insert new jmp node to new block */
                                jmp = new_r_Jmp(irg, block);
                                /* set successor of new block */
                                set_irn_n(n, i, jmp);
-                               *changed = 1;
+                               cenv->changed = 1;
                        } /* predecessor has multiple successors */
                } /* for all predecessors */
        } /* n is a multi-entry block */
 }
 
 void remove_critical_cf_edges(ir_graph *irg) {
-       int changed = 0;
+       cf_env env;
+
+       env.changed = 0;
 
-       irg_block_walk_graph(irg, NULL, walk_critical_cf_edges, &changed);
-       if (changed) {
+       irg_block_walk_graph(irg, NULL, walk_critical_cf_edges, &env);
+       if (env.changed) {
                /* control flow changed */
                set_irg_outs_inconsistent(irg);
                set_irg_extblk_inconsistent(irg);