Better fix for the MacroBlock header
[libfirm] / ir / ir / irgopt.c
index a006f8e..18eaccd 100644 (file)
@@ -34,9 +34,9 @@
 #include "irgraph_t.h"
 #include "irprog_t.h"
 
-#include "ircons.h"
+#include "iroptimize.h"
+#include "ircons_t.h"
 #include "iropt_t.h"
-#include "cfopt.h"
 #include "irgopt.h"
 #include "irgmod.h"
 #include "irgwalk.h"
  */
 static void optimize_in_place_wrapper (ir_node *n, void *env) {
        ir_node *optimized = optimize_in_place_2(n);
-       if (optimized != n) exchange (n, optimized);
+       (void) env;
+
+       if (optimized != n) {
+               exchange (n, optimized);
+       }
 }
 
 /**
@@ -111,6 +115,8 @@ void local_optimize_node(ir_node *n) {
  * Block-Walker: uses dominance depth to mark dead blocks.
  */
 static void kill_dead_blocks(ir_node *block, void *env) {
+       (void) env;
+
        if (get_Block_dom_depth(block) < 0) {
                /*
                 * Note that the new dominance code correctly handles
@@ -357,20 +363,31 @@ static void copy_node(ir_node *n, void *env) {
  * Copies new predecessors of old node to new node remembered in link.
  * Spare the Bad predecessors of Phi and Block nodes.
  */
-void
-copy_preds(ir_node *n, void *env) {
+static void copy_preds(ir_node *n, void *env) {
        ir_node *nn, *block;
        int i, j, irn_arity;
+       (void) env;
 
        nn = get_new_node(n);
 
        if (is_Block(n)) {
+               /* copy the macro block header */
+               ir_node *mbh = get_Block_MacroBlock(n);
+
+               if (mbh == n) {
+                       /* this block is a macroblock header */
+                       set_irn_n(nn, -1, nn);
+               } else {
+                       /* get the macro block header */
+                       set_irn_n(nn, -1, get_new_node(mbh));
+               }
+
                /* Don't copy Bad nodes. */
                j = 0;
                irn_arity = get_irn_arity(n);
                for (i = 0; i < irn_arity; i++) {
                        if (! is_Bad(get_irn_n(n, i))) {
-                               set_irn_n (nn, j, get_new_node(get_irn_n(n, i)));
+                               set_irn_n(nn, j, get_new_node(get_irn_n(n, i)));
                                /*if (is_backedge(n, i)) set_backedge(nn, j);*/
                                j++;
                        }
@@ -395,7 +412,7 @@ copy_preds(ir_node *n, void *env) {
                                exchange(nn, old);
                        }
                }
-       } else if (get_irn_op(n) == op_Phi) {
+       } else if (is_Phi(n)) {
                /* Don't copy node if corresponding predecessor in block is Bad.
                   The Block itself should not be Bad. */
                block = get_nodes_block(n);
@@ -490,8 +507,8 @@ static void copy_graph(ir_graph *irg, int copy_node_nr) {
        /* Note: from yet, the visited flag of the graph is equal to vfl + 1 */
 
        /* visit the anchors as well */
-       for (i = anchor_max - 1; i >= 0; --i) {
-               ir_node *n = irg->anchors[i];
+       for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
+               ir_node *n = get_irg_anchor(irg, i);
 
                if (n && (get_irn_visited(n) <= vfl)) {
                        set_irg_visited(irg, vfl);
@@ -548,7 +565,7 @@ static void copy_graph(ir_graph *irg, int copy_node_nr) {
 static void
 copy_graph_env(int copy_node_nr) {
        ir_graph *irg = current_ir_graph;
-       ir_node *old_end, *n;
+       ir_node *old_end, *new_anchor;
        int i;
 
        /* remove end_except and end_reg nodes */
@@ -559,9 +576,10 @@ copy_graph_env(int copy_node_nr) {
        /* Not all nodes remembered in irg might be reachable
           from the end node.  Assure their link is set to NULL, so that
           we can test whether new nodes have been computed. */
-       for (i = anchor_max - 1; i >= 0; --i) {
-               if (irg->anchors[i])
-                       set_new_node(irg->anchors[i], NULL);
+       for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
+               ir_node *n = get_irg_anchor(irg, i);
+               if (n != NULL)
+                       set_new_node(n, NULL);
        }
        /* we use the block walk flag for removing Bads from Blocks ins. */
        inc_irg_block_visited(irg);
@@ -569,14 +587,20 @@ copy_graph_env(int copy_node_nr) {
        /* copy the graph */
        copy_graph(irg, copy_node_nr);
 
-       /* fix the fields in irg */
-       old_end = get_irg_end(irg);
-       for (i = anchor_max - 1; i >= 0; --i) {
-               n = irg->anchors[i];
+       /* fix the anchor */
+       old_end    = get_irg_end(irg);
+       new_anchor = new_Anchor(irg);
+
+       for (i = get_irg_n_anchors(irg) - 1; i >= 0; --i) {
+               ir_node *n = get_irg_anchor(irg, i);
                if (n)
-                       irg->anchors[i] = get_new_node(n);
+                       set_irn_n(new_anchor, i, get_new_node(n));
        }
        free_End(old_end);
+       irg->anchor = new_anchor;
+
+       /* ensure the new anchor is placed in the endblock */
+       set_irn_n(new_anchor, -1, get_irg_end_block(irg));
 }
 
 /**
@@ -635,7 +659,7 @@ dead_node_elimination(ir_graph *irg) {
 
                /* Free memory from old unoptimized obstack */
                obstack_free(graveyard_obst, 0);  /* First empty the obstack ... */
-               xfree (graveyard_obst);           /* ... then free it.           */
+               xfree(graveyard_obst);            /* ... then free it.           */
 
                /* inform statistics that the run is over */
                hook_dead_node_elim(irg, 0);
@@ -655,6 +679,7 @@ dead_node_elimination(ir_graph *irg) {
 static void relink_bad_block_predecessors(ir_node *n, void *env) {
        ir_node **new_in, *irn;
        int i, new_irn_n, old_irn_arity, new_irn_arity = 0;
+       (void) env;
 
        /* if link field of block is NULL, look for bad predecessors otherwise
           this is already done */
@@ -775,6 +800,7 @@ typedef struct _survive_dce_list_t {
 
 static void dead_node_hook(void *context, ir_graph *irg, int start) {
        survive_dce_t *sd = context;
+       (void) irg;
 
        /* Create a new map before the dead node elimination is performed. */
        if (start) {
@@ -793,6 +819,7 @@ static void dead_node_hook(void *context, ir_graph *irg, int start) {
 static void dead_node_subst_hook(void *context, ir_graph *irg, ir_node *old, ir_node *nw) {
        survive_dce_t *sd = context;
        survive_dce_list_t *list = pmap_get(sd->places, old);
+       (void) irg;
 
        /* If the node is to be patched back, write the new address to all registered locations. */
        if (list) {
@@ -943,6 +970,12 @@ static int can_inline(ir_node *call, ir_graph *called_graph) {
        return res;
 }
 
+enum exc_mode {
+          exc_handler    = 0, /**< There is a handler. */
+          exc_to_end     = 1, /**< Branches to End. */
+          exc_no_handler = 2  /**< Exception handling not represented. */
+};
+
 /* Inlines a method at the given call site. */
 int inline_method(ir_node *call, ir_graph *called_graph) {
        ir_node *pre_call;
@@ -953,7 +986,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        ir_node **cf_pred;
        ir_node *ret, *phi;
        int arity, n_ret, n_exc, n_res, i, j, rem_opt, irn_arity;
-       int exc_handling;
+       enum exc_mode exc_handling;
        ir_type *called_frame;
        irg_inline_property prop = get_irg_inline_property(called_graph);
 
@@ -1013,13 +1046,13 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        {
                ir_node *proj, *Mproj = NULL, *Xproj = NULL;
                for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
-                       assert(is_Proj(proj));
-                       if (get_Proj_proj(proj) == pn_Call_X_except) Xproj = proj;
-                       if (get_Proj_proj(proj) == pn_Call_M_except) Mproj = proj;
+                       long proj_nr = get_Proj_proj(proj);
+                       if (proj_nr == pn_Call_X_except) Xproj = proj;
+                       if (proj_nr == pn_Call_M_except) Mproj = proj;
                }
-               if      (Mproj) { assert(Xproj); exc_handling = 0; } /*  Mproj           */
-               else if (Xproj) {                exc_handling = 1; } /* !Mproj &&  Xproj   */
-               else            {                exc_handling = 2; } /* !Mproj && !Xproj   */
+               if      (Mproj) { assert(Xproj); exc_handling = exc_handler; } /*  Mproj           */
+               else if (Xproj) {                exc_handling = exc_to_end; } /* !Mproj &&  Xproj   */
+               else            {                exc_handling = exc_no_handler; } /* !Mproj && !Xproj   */
        }
 
        /* --
@@ -1085,7 +1118,6 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        /* -- Performing dead node elimination inlines the graph -- */
        /* Copies the nodes to the obstack of current_ir_graph. Updates links to new
           entities. */
-       /* @@@ endless loops are not copied!! -- they should be, I think... */
        irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds,
                 get_irg_frame_type(called_graph));
 
@@ -1121,8 +1153,11 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
 
        /* -- archive keepalives -- */
        irn_arity = get_irn_arity(end);
-       for (i = 0; i < irn_arity; i++)
-               add_End_keepalive(get_irg_end(current_ir_graph), get_irn_n(end, i));
+       for (i = 0; i < irn_arity; i++) {
+               ir_node *ka = get_End_keepalive(end, i);
+               if (! is_Bad(ka))
+                       add_End_keepalive(get_irg_end(current_ir_graph), ka);
+       }
 
        /* The new end node will die.  We need not free as the in array is on the obstack:
           copy_node() only generated 'D' arrays. */
@@ -1141,7 +1176,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
 
        /* -- Build a Tuple for all results of the method.
           Add Phi node if there was more than one Return.  -- */
-       turn_into_tuple(post_call, 4); /* FIXME: is th 4 corrct here ? */
+       turn_into_tuple(post_call, pn_Call_max);
        /* First the Memory-Phi */
        n_ret = 0;
        for (i = 0; i < arity; i++) {
@@ -1184,6 +1219,12 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        } else {
                set_Tuple_pred(call, pn_Call_T_result, new_Bad());
        }
+       /* handle the regular call */
+       set_Tuple_pred(call, pn_Call_X_regular, new_Jmp());
+
+       /* For now, we cannot inline calls with value_base */
+       set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
+
        /* Finally the exception control flow.
           We have two (three) possible situations:
           First if the Call branches to an exception handler: We need to add a Phi node to
@@ -1194,14 +1235,15 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
           Second the Call branches to End, the exception is not handled.  Just
           add all inlined exception branches to the End node.
           Third: there is no Exception edge at all. Handle as case two. */
-       if (exc_handling == 0) {
+       if (exc_handling == exc_handler) {
                n_exc = 0;
                for (i = 0; i < arity; i++) {
-                       ir_node *ret;
+                       ir_node *ret, *irn;
                        ret = get_irn_n(end_bl, i);
-                       if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
+                       irn = skip_Proj(ret);
+                       if (is_fragile_op(irn) || is_Raise(irn)) {
                                cf_pred[n_exc] = ret;
-                               n_exc++;
+                               ++n_exc;
                        }
                }
                if (n_exc > 0) {
@@ -1238,23 +1280,24 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                n_exc = 0;
                for (i = 0; i < arity; i++) {
                        ir_node *ret = get_irn_n(end_bl, i);
+                       ir_node *irn = skip_Proj(ret);
 
-                       if (is_fragile_op(skip_Proj(ret)) || (get_irn_op(skip_Proj(ret)) == op_Raise)) {
+                       if (is_fragile_op(irn) || (get_irn_op(irn) == op_Raise)) {
                                cf_pred[n_exc] = ret;
                                n_exc++;
                        }
                }
                main_end_bl = get_irg_end_block(current_ir_graph);
                main_end_bl_arity = get_irn_arity(main_end_bl);
-               end_preds =  xmalloc ((n_exc + main_end_bl_arity) * sizeof(*end_preds));
+               end_preds =  xmalloc((n_exc + main_end_bl_arity) * sizeof(*end_preds));
 
                for (i = 0; i < main_end_bl_arity; ++i)
                        end_preds[i] = get_irn_n(main_end_bl, i);
                for (i = 0; i < n_exc; ++i)
                        end_preds[main_end_bl_arity + i] = cf_pred[i];
                set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
-               set_Tuple_pred(call, pn_Call_X_except, new_Bad());
-               set_Tuple_pred(call, pn_Call_M_except, new_Bad());
+               set_Tuple_pred(call, pn_Call_X_except,  new_Bad());
+               set_Tuple_pred(call, pn_Call_M_except,  new_Bad());
                free(end_preds);
        }
        free(res_pred);
@@ -1995,6 +2038,54 @@ static void move_out_of_loops(ir_node *n, ir_node *early) {
        }
 }
 
+/* deepest common ancestor in the dominator tree of all nodes'
+   blocks depending on us; our final placement has to dominate DCA. */
+static ir_node *get_deepest_common_ancestor(ir_node *node, ir_node *dca)
+{
+       int i;
+
+       for (i = get_irn_n_outs(node) - 1; i >= 0; --i) {
+               ir_node *succ = get_irn_out(node, i);
+               ir_node *succ_blk;
+
+               if (is_End(succ)) {
+                       /*
+                        * This consumer is the End node, a keep alive edge.
+                        * This is not a real consumer, so we ignore it
+                        */
+                       continue;
+               }
+
+               if(is_Proj(succ)) {
+                       dca = get_deepest_common_ancestor(succ, dca);
+               } else {
+                       /* ignore if succ is in dead code */
+                       succ_blk = get_irn_n(succ, -1);
+                       if (is_Block_unreachable(succ_blk))
+                               continue;
+                       dca = consumer_dom_dca(dca, succ, node);
+               }
+       }
+
+       return dca;
+}
+
+static void set_projs_block(ir_node *node, ir_node *block)
+{
+       int i;
+
+       for (i = get_irn_n_outs(node) - 1; i >= 0; --i) {
+               ir_node *succ = get_irn_out(node, i);
+
+               assert(is_Proj(succ));
+
+               if(get_irn_mode(succ) == mode_T) {
+                       set_projs_block(succ, block);
+               }
+               set_nodes_block(succ, block);
+       }
+}
+
 /**
  * Find the latest legal block for N and place N into the
  * `optimal' Block between the latest and earliest legal block.
@@ -2054,31 +2145,16 @@ static void place_floats_late(ir_node *n, pdeq *worklist) {
                            (op != op_SymConst) &&
                            (op != op_Proj))
                        {
-                               ir_node *dca = NULL;  /* deepest common ancestor in the
-                                                                                dominator tree of all nodes'
-                                                                                blocks depending on us; our final
-                                                                                placement has to dominate DCA. */
-                               for (i = get_irn_n_outs(n) - 1; i >= 0; --i) {
-                                       ir_node *succ = get_irn_out(n, i);
-                                       ir_node *succ_blk;
-
-                                       if (get_irn_op(succ) == op_End) {
-                                               /*
-                                                * This consumer is the End node, a keep alive edge.
-                                                * This is not a real consumer, so we ignore it
-                                                */
-                                               continue;
-                                       }
-
-                                       /* ignore if succ is in dead code */
-                                       succ_blk = get_irn_n(succ, -1);
-                                       if (is_Block_unreachable(succ_blk))
-                                               continue;
-                                       dca = consumer_dom_dca(dca, succ, n);
-                               }
-                               if (dca) {
+                               /* deepest common ancestor in the dominator tree of all nodes'
+                                  blocks depending on us; our final placement has to dominate
+                                  DCA. */
+                               ir_node *dca = get_deepest_common_ancestor(n, NULL);
+                               if (dca != NULL) {
                                        set_nodes_block(n, dca);
                                        move_out_of_loops(n, early_blk);
+                                       if(get_irn_mode(n) == mode_T) {
+                                               set_projs_block(n, get_nodes_block(n));
+                                       }
                                }
                        }
                }
@@ -2149,6 +2225,10 @@ void place_code(ir_graph *irg) {
        current_ir_graph = rem;
 }
 
+typedef struct cf_env {
+       char changed;       /**< flag indicates that the cf graphs has changed. */
+} cf_env;
+
 /**
  * Called by walker of remove_critical_cf_edges().
  *
@@ -2156,12 +2236,12 @@ void place_code(ir_graph *irg) {
  * predecessors and a block of multiple successors.
  *
  * @param n   IR node
- * @param env Environment of walker. The changed field.
+ * @param env Environment of walker.
  */
 static void walk_critical_cf_edges(ir_node *n, void *env) {
        int arity, i;
        ir_node *pre, *block, *jmp;
-       int *changed = env;
+       cf_env *cenv = env;
        ir_graph *irg = get_irn_irg(n);
 
        /* Block has multiple predecessors */
@@ -2175,26 +2255,34 @@ static void walk_critical_cf_edges(ir_node *n, void *env) {
 
                        pre = get_irn_n(n, i);
                        cfop = get_irn_op(skip_Proj(pre));
-                       /* Predecessor has multiple successors. Insert new control flow edge but
-                          ignore exception edges. */
-                       if (! is_op_fragile(cfop) && is_op_forking(cfop)) {
+
+                       if (is_op_fragile(cfop)) {
+                               if (cfop != op_Raise)
+                                       goto insert;
+                               continue;
+                       }
+                       if (is_op_forking(cfop)) {
+                               /* Predecessor has multiple successors. Insert new control flow edge edges. */
+insert:
                                /* set predecessor of new block */
                                block = new_r_Block(irg, 1, &pre);
                                /* insert new jmp node to new block */
                                jmp = new_r_Jmp(irg, block);
                                /* set successor of new block */
                                set_irn_n(n, i, jmp);
-                               *changed = 1;
+                               cenv->changed = 1;
                        } /* predecessor has multiple successors */
                } /* for all predecessors */
        } /* n is a multi-entry block */
 }
 
 void remove_critical_cf_edges(ir_graph *irg) {
-       int changed = 0;
+       cf_env env;
+
+       env.changed = 0;
 
-       irg_block_walk_graph(irg, NULL, walk_critical_cf_edges, &changed);
-       if (changed) {
+       irg_block_walk_graph(irg, NULL, walk_critical_cf_edges, &env);
+       if (env.changed) {
                /* control flow changed */
                set_irg_outs_inconsistent(irg);
                set_irg_extblk_inconsistent(irg);