really remove type_id
[libfirm] / ir / opt / opt_inline.c
index fdba0f9..f2753aa 100644 (file)
@@ -50,7 +50,7 @@
 #include "irouts.h"
 #include "irloop_t.h"
 #include "irbackedge_t.h"
-#include "opt_inline_t.h"
+#include "opt_init.h"
 #include "cgana.h"
 #include "trouts.h"
 #include "error.h"
@@ -61,6 +61,7 @@
 #include "irhooks.h"
 #include "irtools.h"
 #include "iropt_dbg.h"
+#include "irpass_t.h"
 
 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
 
@@ -164,15 +165,10 @@ static void copy_node(ir_node *n, void *env) {
        }
        copy_node_attr(n, nn);
 
-#ifdef DEBUG_libfirm
-       {
-               int copy_node_nr = env != NULL;
-               if (copy_node_nr) {
-                       /* for easier debugging, we want to copy the node numbers too */
-                       nn->node_nr = n->node_nr;
-               }
+       if (env != NULL) {
+               /* for easier debugging, we want to copy the node numbers too */
+               nn->node_nr = n->node_nr;
        }
-#endif
 
        set_new_node(n, nn);
        hook_dead_node_elim_subst(current_ir_graph, n, nn);
@@ -221,7 +217,7 @@ static void copy_preds(ir_node *n, void *env) {
                   in array contained Bads.  Now it's possible.
                   We don't call optimize_in_place as it requires
                   that the fields in ir_graph are set properly. */
-               if (!has_Block_label(nn) &&
+               if (!has_Block_entity(nn) &&
                    get_opt_control_flow_straightening() &&
                    get_Block_n_cfgpreds(nn) == 1 &&
                    is_Jmp(get_Block_cfgpred(nn, 0))) {
@@ -496,6 +492,10 @@ void dead_node_elimination(ir_graph *irg) {
 #endif
 }
 
+ir_graph_pass_t *dead_node_elimination_pass(const char *name) {
+       return def_graph_pass(name ? name : "dce", dead_node_elimination);
+}
+
 /**
  * Relink bad predecessors of a block and store the old in array to the
  * link field. This function is called by relink_bad_predecessors().
@@ -703,7 +703,7 @@ void survive_dce_register_irn(survive_dce_t *sd, ir_node **place) {
        if (*place != NULL) {
                ir_node *irn      = *place;
                survive_dce_list_t *curr = pmap_get(sd->places, irn);
-               survive_dce_list_t *nw   = obstack_alloc(&sd->obst, sizeof(nw[0]));
+               survive_dce_list_t *nw   = OALLOC(&sd->obst, survive_dce_list_t);
 
                nw->next  = curr;
                nw->place = place;
@@ -731,14 +731,15 @@ static void copy_node_inline(ir_node *n, void *env) {
 
        copy_node(n, NULL);
        if (is_Sel(n)) {
-               nn = get_new_node (n);
+               nn = get_new_node(n);
                assert(is_Sel(nn));
+               /* use copied entities from the new frame */
                if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
                        set_Sel_entity(nn, get_entity_link(get_Sel_entity(n)));
                }
        } else if (is_Block(n)) {
-               nn = get_new_node (n);
-               nn->attr.block.irg = current_ir_graph;
+               nn = get_new_node(n);
+               nn->attr.block.irg.irg = current_ir_graph;
        }
 }
 
@@ -836,9 +837,8 @@ static int can_inline(ir_node *call, ir_graph *called_graph) {
 }
 
 enum exc_mode {
-       exc_handler    = 0, /**< There is a handler. */
-       exc_to_end     = 1, /**< Branches to End. */
-       exc_no_handler = 2  /**< Exception handling not represented. */
+       exc_handler,    /**< There is a handler. */
+       exc_no_handler  /**< Exception handling not represented. */
 };
 
 /* Inlines a method at the given call site. */
@@ -852,6 +852,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        ir_node             **args_in;
        ir_node             *ret, *phi;
        int                 arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity, n_params;
+       int                 n_mem_phi;
        enum exc_mode       exc_handling;
        ir_type             *called_frame, *curr_frame, *mtp, *ctp;
        ir_entity           *ent;
@@ -866,18 +867,22 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
 
        mtp = get_entity_type(ent);
        ctp = get_Call_type(call);
-       if (get_method_n_params(mtp) > get_method_n_params(ctp)) {
+       n_params = get_method_n_params(mtp);
+       n_res    = get_method_n_ress(mtp);
+       if (n_params > get_method_n_params(ctp)) {
                /* this is a bad feature of C: without a prototype, we can
                 * call a function with less parameters than needed. Currently
                 * we don't support this, although we could use Unknown than. */
                return 0;
        }
+       if (n_res != get_method_n_ress(ctp)) {
+               return 0;
+       }
 
        /* Argh, compiling C has some bad consequences:
         * It is implementation dependent what happens in that case.
         * We support inlining, if the bitsize of the types matches AND
         * the same arithmetic is used. */
-       n_params = get_method_n_params(mtp);
        for (i = n_params - 1; i >= 0; --i) {
                ir_type *param_tp = get_method_param_type(mtp, i);
                ir_type *arg_tp   = get_method_param_type(ctp, i);
@@ -895,6 +900,22 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                        /* otherwise we can simply "reinterpret" the bits */
                }
        }
+       for (i = n_res - 1; i >= 0; --i) {
+               ir_type *decl_res_tp = get_method_res_type(mtp, i);
+               ir_type *used_res_tp = get_method_res_type(ctp, i);
+
+               if (decl_res_tp != used_res_tp) {
+                       ir_mode *decl_mode = get_type_mode(decl_res_tp);
+                       ir_mode *used_mode = get_type_mode(used_res_tp);
+                       if (decl_mode == NULL || used_mode == NULL)
+                               return 0;
+                       if (get_mode_size_bits(decl_mode) != get_mode_size_bits(used_mode))
+                               return 0;
+                       if (get_mode_arithmetic(decl_mode) != get_mode_arithmetic(used_mode))
+                               return 0;
+                       /* otherwise we can "reinterpret" the bits */
+               }
+       }
 
        irg = get_irn_irg(call);
 
@@ -943,18 +964,15 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
           for the Call node, or do we branch directly to End on an exception?
           exc_handling:
           0 There is a handler.
-          1 Branches to End.
           2 Exception handling not represented in Firm. -- */
        {
-               ir_node *proj, *Mproj = NULL, *Xproj = NULL;
+               ir_node *Xproj = NULL;
+               ir_node *proj;
                for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
                        long proj_nr = get_Proj_proj(proj);
                        if (proj_nr == pn_Call_X_except) Xproj = proj;
-                       if (proj_nr == pn_Call_M_except) Mproj = proj;
                }
-               if      (Mproj) { assert(Xproj); exc_handling = exc_handler; } /*  Mproj           */
-               else if (Xproj) {                exc_handling = exc_to_end; } /* !Mproj &&  Xproj   */
-               else            {                exc_handling = exc_no_handler; } /* !Mproj && !Xproj   */
+               exc_handling = Xproj != NULL ? exc_handler : exc_no_handler;
        }
 
        /* create the argument tuple */
@@ -967,7 +985,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                ir_mode *mode     = get_type_mode(param_tp);
 
                if (mode != get_irn_mode(arg)) {
-                       arg = new_r_Conv(irg, block, arg, mode);
+                       arg = new_r_Conv(block, arg, mode);
                }
                args_in[i] = arg;
        }
@@ -1092,7 +1110,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                ir_node *ret;
                ret = get_Block_cfgpred(end_bl, i);
                if (is_Return(ret)) {
-                       cf_pred[n_ret] = new_r_Jmp(irg, get_nodes_block(ret));
+                       cf_pred[n_ret] = new_r_Jmp(get_nodes_block(ret));
                        n_ret++;
                }
        }
@@ -1102,16 +1120,24 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
           Add Phi node if there was more than one Return.  -- */
        turn_into_tuple(post_call, pn_Call_max);
        /* First the Memory-Phi */
-       n_ret = 0;
+       n_mem_phi = 0;
        for (i = 0; i < arity; i++) {
                ret = get_Block_cfgpred(end_bl, i);
                if (is_Return(ret)) {
-                       cf_pred[n_ret] = get_Return_mem(ret);
-                       n_ret++;
+                       cf_pred[n_mem_phi++] = get_Return_mem(ret);
+               }
+               /* memory output for some exceptions is directly connected to End */
+               if (is_Call(ret)) {
+                       cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 3);
+               } else if (is_fragile_op(ret)) {
+                       /* We rely that all cfops have the memory output at the same position. */
+                       cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 0);
+               } else if (is_Raise(ret)) {
+                       cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 1);
                }
        }
-       phi = new_Phi(n_ret, cf_pred, mode_M);
-       set_Tuple_pred(call, pn_Call_M_regular, phi);
+       phi = new_Phi(n_mem_phi, cf_pred, mode_M);
+       set_Tuple_pred(call, pn_Call_M, phi);
        /* Conserve Phi-list for further inlinings -- but might be optimized */
        if (get_nodes_block(phi) == post_bl) {
                set_irn_link(phi, get_irn_link(post_bl));
@@ -1120,11 +1146,18 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        /* Now the real results */
        if (n_res > 0) {
                for (j = 0; j < n_res; j++) {
+                       ir_type *res_type = get_method_res_type(ctp, j);
+                       ir_mode *res_mode = get_type_mode(res_type);
                        n_ret = 0;
                        for (i = 0; i < arity; i++) {
                                ret = get_Block_cfgpred(end_bl, i);
                                if (is_Return(ret)) {
-                                       cf_pred[n_ret] = get_Return_res(ret, j);
+                                       ir_node *res = get_Return_res(ret, j);
+                                       if (get_irn_mode(res) != res_mode) {
+                                               ir_node *block = get_nodes_block(res);
+                                               res = new_r_Conv(block, res, res_mode);
+                                       }
+                                       cf_pred[n_ret] = res;
                                        n_ret++;
                                }
                        }
@@ -1150,15 +1183,16 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
 
        /* Finally the exception control flow.
-          We have two (three) possible situations:
-          First if the Call branches to an exception handler: We need to add a Phi node to
+          We have two possible situations:
+          First if the Call branches to an exception handler:
+          We need to add a Phi node to
           collect the memory containing the exception objects.  Further we need
           to add another block to get a correct representation of this Phi.  To
           this block we add a Jmp that resolves into the X output of the Call
           when the Call is turned into a tuple.
-          Second the Call branches to End, the exception is not handled.  Just
-          add all inlined exception branches to the End node.
-          Third: there is no Exception edge at all. Handle as case two. */
+          Second: There is no exception edge. Just add all inlined exception
+          branches to the End node.
+        */
        if (exc_handling == exc_handler) {
                n_exc = 0;
                for (i = 0; i < arity; i++) {
@@ -1173,29 +1207,9 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                if (n_exc > 0) {
                        ir_node *block = new_Block(n_exc, cf_pred);
                        set_cur_block(block);
-
                        set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
-                       /* The Phi for the memories with the exception objects */
-                       n_exc = 0;
-                       for (i = 0; i < arity; i++) {
-                               ir_node *ret;
-                               ret = skip_Proj(get_Block_cfgpred(end_bl, i));
-                               if (is_Call(ret)) {
-                                       cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 3);
-                                       n_exc++;
-                               } else if (is_fragile_op(ret)) {
-                                       /* We rely that all cfops have the memory output at the same position. */
-                                       cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 0);
-                                       n_exc++;
-                               } else if (is_Raise(ret)) {
-                                       cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 1);
-                                       n_exc++;
-                               }
-                       }
-                       set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
                } else {
                        set_Tuple_pred(call, pn_Call_X_except, new_Bad());
-                       set_Tuple_pred(call, pn_Call_M_except, new_Bad());
                }
        } else {
                ir_node *main_end_bl;
@@ -1222,8 +1236,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                for (i = 0; i < n_exc; ++i)
                        end_preds[main_end_bl_arity + i] = cf_pred[i];
                set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
-               set_Tuple_pred(call, pn_Call_X_except,  new_Bad());
-               set_Tuple_pred(call, pn_Call_M_except,  new_Bad());
+               set_Tuple_pred(call, pn_Call_X_except, new_Bad());
                free(end_preds);
        }
        free(res_pred);
@@ -1290,7 +1303,7 @@ static void collect_calls(ir_node *call, void *env) {
                if (called_irg != NULL) {
                        /* The Call node calls a locally defined method.  Remember to inline. */
                        inline_env_t *ienv  = env;
-                       call_entry   *entry = obstack_alloc(&ienv->obst, sizeof(*entry));
+                       call_entry   *entry = OALLOC(&ienv->obst, call_entry);
                        entry->call       = call;
                        entry->callee     = called_irg;
                        entry->loop_depth = 0;
@@ -1355,6 +1368,31 @@ void inline_small_irgs(ir_graph *irg, int size) {
        current_ir_graph = rem;
 }
 
+struct inline_small_irgs_pass_t {
+       ir_graph_pass_t pass;
+       int            size;
+};
+
+/**
+ * Wrapper to run inline_small_irgs() as a pass.
+ */
+static int inline_small_irgs_wrapper(ir_graph *irg, void *context) {
+       struct inline_small_irgs_pass_t *pass = context;
+
+       inline_small_irgs(irg, pass->size);
+       return 0;
+}
+
+/* create a pass for inline_small_irgs() */
+ir_graph_pass_t *inline_small_irgs_pass(const char *name, int size) {
+       struct inline_small_irgs_pass_t *pass =
+               XMALLOCZ(struct inline_small_irgs_pass_t);
+
+       pass->size = size;
+       return def_graph_pass_constructor(
+               &pass->pass, name ? name : "inline_small_irgs", inline_small_irgs_wrapper);
+}
+
 /**
  * Environment for inlining irgs.
  */
@@ -1369,7 +1407,6 @@ typedef struct {
        unsigned  n_callers;         /**< Number of known graphs that call this graphs. */
        unsigned  n_callers_orig;    /**< for statistics */
        unsigned  got_inline:1;      /**< Set, if at least one call inside this graph was inlined. */
-       unsigned  local_vars:1;      /**< Set, if an inlined function got the address of a local variable. */
        unsigned  recursive:1;       /**< Set, if this function is self recursive. */
 } inline_irg_env;
 
@@ -1377,7 +1414,7 @@ typedef struct {
  * Allocate a new environment for inlining.
  */
 static inline_irg_env *alloc_inline_irg_env(void) {
-       inline_irg_env *env    = obstack_alloc(&temp_obst, sizeof(*env));
+       inline_irg_env *env    = OALLOC(&temp_obst, inline_irg_env);
        INIT_LIST_HEAD(&env->calls);
        env->local_weights     = NULL;
        env->n_nodes           = -2; /* do not count count Start, End */
@@ -1388,7 +1425,6 @@ static inline_irg_env *alloc_inline_irg_env(void) {
        env->n_callers         = 0;
        env->n_callers_orig    = 0;
        env->got_inline        = 0;
-       env->local_vars        = 0;
        env->recursive         = 0;
        return env;
 }
@@ -1450,7 +1486,7 @@ static void collect_calls2(ir_node *call, void *ctx) {
                        x->recursive = 1;
 
                /* link it in the list of possible inlinable entries */
-               entry = obstack_alloc(&temp_obst, sizeof(*entry));
+               entry = OALLOC(&temp_obst, call_entry);
                entry->call       = call;
                entry->callee     = callee;
                entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth;
@@ -1490,7 +1526,7 @@ inline static int is_smaller(ir_graph *callee, unsigned size) {
  */
 static call_entry *duplicate_call_entry(const call_entry *entry,
                                         ir_node *new_call, int loop_depth_delta) {
-       call_entry *nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
+       call_entry *nentry = OALLOC(&temp_obst, call_entry);
        nentry->call       = new_call;
        nentry->callee     = entry->callee;
        nentry->benefice   = entry->benefice;
@@ -1776,6 +1812,45 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
        current_ir_graph = rem;
 }
 
+struct inline_leave_functions_pass_t {
+       ir_prog_pass_t pass;
+       unsigned       maxsize;
+       unsigned       leavesize;
+       unsigned       size;
+       int            ignore_runtime;
+};
+
+/**
+ * Wrapper to run inline_leave_functions() as a ir_prog pass.
+ */
+static int inline_leave_functions_wrapper(ir_prog *irp, void *context) {
+       struct inline_leave_functions_pass_t *pass = context;
+
+       (void)irp;
+       inline_leave_functions(
+               pass->maxsize, pass->leavesize,
+               pass->size, pass->ignore_runtime);
+       return 0;
+}
+
+/* create a pass for inline_leave_functions() */
+ir_prog_pass_t *inline_leave_functions_pass(
+       const char *name, unsigned maxsize, unsigned leavesize,
+       unsigned size, int ignore_runtime) {
+       struct inline_leave_functions_pass_t *pass =
+               XMALLOCZ(struct inline_leave_functions_pass_t);
+
+       pass->maxsize        = maxsize;
+       pass->leavesize      = leavesize;
+       pass->size           = size;
+       pass->ignore_runtime = ignore_runtime;
+
+       return def_prog_pass_constructor(
+               &pass->pass,
+               name ? name : "inline_leave_functions",
+               inline_leave_functions_wrapper);
+}
+
 /**
  * Calculate the parameter weights for transmitting the address of a local variable.
  */
@@ -2207,8 +2282,6 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
 
                /* callee was inline. Append it's call list. */
                env->got_inline = 1;
-               if (curr_call->local_adr)
-                       env->local_vars = 1;
                --env->n_call_nodes;
 
                /* we just generate a bunch of new calls */
@@ -2245,7 +2318,9 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
  * Heuristic inliner. Calculates a benefice value for every call and inlines
  * those calls with a value higher than the threshold.
  */
-void inline_functions(unsigned maxsize, int inline_threshold) {
+void inline_functions(unsigned maxsize, int inline_threshold,
+                      opt_ptr after_inline_opt)
+{
        inline_irg_env   *env;
        int              i, n_irgs;
        ir_graph         *rem;
@@ -2291,21 +2366,9 @@ void inline_functions(unsigned maxsize, int inline_threshold) {
                ir_graph *irg = irgs[i];
 
                env = get_irg_link(irg);
-               if (env->got_inline) {
+               if (env->got_inline && after_inline_opt != NULL) {
                        /* this irg got calls inlined: optimize it */
-                       if (get_opt_combo()) {
-                               if (env->local_vars) {
-                                       scalar_replacement_opt(irg);
-                               }
-                               combo(irg);
-                       } else {
-                               if (env->local_vars) {
-                                       if (scalar_replacement_opt(irg)) {
-                                               optimize_graph_df(irg);
-                                       }
-                               }
-                               optimize_cf(irg);
-                       }
+                       after_inline_opt(irg);
                }
                if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
                        DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
@@ -2331,6 +2394,41 @@ void inline_functions(unsigned maxsize, int inline_threshold) {
        current_ir_graph = rem;
 }
 
+struct inline_functions_pass_t {
+       ir_prog_pass_t pass;
+       unsigned       maxsize;
+       int            inline_threshold;
+       opt_ptr        after_inline_opt;
+};
+
+/**
+ * Wrapper to run inline_functions() as a ir_prog pass.
+ */
+static int inline_functions_wrapper(ir_prog *irp, void *context) {
+       struct inline_functions_pass_t *pass = context;
+
+       (void)irp;
+       inline_functions(pass->maxsize, pass->inline_threshold,
+                        pass->after_inline_opt);
+       return 0;
+}
+
+/* create a ir_prog pass for inline_functions */
+ir_prog_pass_t *inline_functions_pass(
+         const char *name, unsigned maxsize, int inline_threshold,
+         opt_ptr after_inline_opt) {
+       struct inline_functions_pass_t *pass =
+               XMALLOCZ(struct inline_functions_pass_t);
+
+       pass->maxsize          = maxsize;
+       pass->inline_threshold = inline_threshold;
+       pass->after_inline_opt = after_inline_opt;
+
+       return def_prog_pass_constructor(
+               &pass->pass, name ? name : "inline_functions",
+               inline_functions_wrapper);
+}
+
 void firm_init_inline(void) {
        FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
 }