reenable floatin of const functions (don't care about obscure endless loop cases...
[libfirm] / ir / opt / opt_inline.c
index 315632d..ffa5fbf 100644 (file)
@@ -60,6 +60,7 @@
 #include "irflag_t.h"
 #include "irhooks.h"
 #include "irtools.h"
+#include "iropt_dbg.h"
 
 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
 
@@ -759,6 +760,7 @@ static void copy_preds_inline(ir_node *n, void *env) {
 
                n = identify_remember(current_ir_graph->value_table, nn);
                if (nn != n) {
+                       DBG_OPT_CSE(nn, n);
                        exchange(nn, n);
                }
        }
@@ -1061,7 +1063,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        /* -- Precompute some values -- */
        end_bl = get_new_node(get_irg_end_block(called_graph));
        end = get_new_node(get_irg_end(called_graph));
-       arity = get_irn_arity(end_bl);    /* arity = n_exc + n_ret  */
+       arity = get_Block_n_cfgpreds(end_bl);    /* arity = n_exc + n_ret  */
        n_res = get_method_n_ress(get_Call_type(call));
 
        res_pred = xmalloc(n_res * sizeof(*res_pred));
@@ -1084,7 +1086,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        n_ret = 0;
        for (i = 0; i < arity; i++) {
                ir_node *ret;
-               ret = get_irn_n(end_bl, i);
+               ret = get_Block_cfgpred(end_bl, i);
                if (is_Return(ret)) {
                        cf_pred[n_ret] = new_r_Jmp(irg, get_nodes_block(ret));
                        n_ret++;
@@ -1098,7 +1100,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        /* First the Memory-Phi */
        n_ret = 0;
        for (i = 0; i < arity; i++) {
-               ret = get_irn_n(end_bl, i);
+               ret = get_Block_cfgpred(end_bl, i);
                if (is_Return(ret)) {
                        cf_pred[n_ret] = get_Return_mem(ret);
                        n_ret++;
@@ -1116,7 +1118,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                for (j = 0; j < n_res; j++) {
                        n_ret = 0;
                        for (i = 0; i < arity; i++) {
-                               ret = get_irn_n(end_bl, i);
+                               ret = get_Block_cfgpred(end_bl, i);
                                if (is_Return(ret)) {
                                        cf_pred[n_ret] = get_Return_res(ret, j);
                                        n_ret++;
@@ -1157,7 +1159,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                n_exc = 0;
                for (i = 0; i < arity; i++) {
                        ir_node *ret, *irn;
-                       ret = get_irn_n(end_bl, i);
+                       ret = get_Block_cfgpred(end_bl, i);
                        irn = skip_Proj(ret);
                        if (is_fragile_op(irn) || is_Raise(irn)) {
                                cf_pred[n_exc] = ret;
@@ -1171,7 +1173,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                        n_exc = 0;
                        for (i = 0; i < arity; i++) {
                                ir_node *ret;
-                               ret = skip_Proj(get_irn_n(end_bl, i));
+                               ret = skip_Proj(get_Block_cfgpred(end_bl, i));
                                if (is_Call(ret)) {
                                        cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 3);
                                        n_exc++;
@@ -1197,7 +1199,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                /* assert(exc_handling == 1 || no exceptions. ) */
                n_exc = 0;
                for (i = 0; i < arity; i++) {
-                       ir_node *ret = get_irn_n(end_bl, i);
+                       ir_node *ret = get_Block_cfgpred(end_bl, i);
                        ir_node *irn = skip_Proj(ret);
 
                        if (is_fragile_op(irn) || is_Raise(irn)) {
@@ -1962,11 +1964,13 @@ static int calc_inline_benefice(ir_node *call, ir_graph *callee, unsigned *local
        }
 
        callee_env = get_irg_link(callee);
-       if (get_entity_visibility(ent) == visibility_local &&
-           callee_env->n_callers_orig == 1 &&
-           callee != current_ir_graph) {
+       if (callee_env->n_callers == 1 && callee != current_ir_graph) {
                /* we are the only caller, give big bonus */
-               weight += 5000;
+               if (get_entity_visibility(ent) == visibility_local) {
+                       weight += 5000;
+               } else {
+                       weight += 200;
+               }
        }
 
        /* do not inline big functions */
@@ -2005,6 +2009,36 @@ static int calc_inline_benefice(ir_node *call, ir_graph *callee, unsigned *local
        return weight;
 }
 
+static ir_graph **irgs;
+static int      last_irg;
+
+static void callgraph_walker(ir_graph *irg, void *data)
+{
+       (void) data;
+       irgs[last_irg++] = irg;
+}
+
+static ir_graph **create_irg_list(void)
+{
+       ir_entity **free_methods;
+       int       arr_len;
+       int       n_irgs = get_irp_n_irgs();
+
+       cgana(&arr_len, &free_methods);
+       xfree(free_methods);
+
+       compute_callgraph();
+
+       last_irg = 0;
+       irgs     = xmalloc(n_irgs * sizeof(*irgs));
+       memset(irgs, 0, sizeof(n_irgs * sizeof(*irgs)));
+
+       callgraph_walk(NULL, callgraph_walker, NULL);
+       assert(n_irgs == last_irg);
+
+       return irgs;
+}
+
 /**
  * Heuristic inliner. Calculates a benefice value for every call and inlines
  * those calls with a value higher than the threshold.
@@ -2019,23 +2053,26 @@ void inline_functions(int maxsize, int inline_threshold) {
        const call_entry *centry;
        pmap             *copied_graphs;
        pmap_entry       *pm_entry;
+       ir_graph         **irgs;
 
        rem = current_ir_graph;
        obstack_init(&temp_obst);
 
+       irgs = create_irg_list();
+
        /* a map for the copied graphs, used to inline recursive calls */
        copied_graphs = pmap_create();
 
        /* extend all irgs by a temporary data structure for inlining. */
        n_irgs = get_irp_n_irgs();
        for (i = 0; i < n_irgs; ++i)
-               set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
+               set_irg_link(irgs[i], alloc_inline_irg_env());
 
        /* Precompute information in temporary data structure. */
        wenv.ignore_runtime = 0;
        wenv.ignore_callers = 0;
        for (i = 0; i < n_irgs; ++i) {
-               ir_graph *irg = get_irp_irg(i);
+               ir_graph *irg = irgs[i];
 
                assert(get_irg_phase_state(irg) != phase_building);
                free_callee_info(irg);
@@ -2050,7 +2087,7 @@ void inline_functions(int maxsize, int inline_threshold) {
        for (i = 0; i < n_irgs; ++i) {
                int      phiproj_computed = 0;
                ir_node  *call;
-               ir_graph *irg = get_irp_irg(i);
+               ir_graph *irg = irgs[i];
 
                current_ir_graph = irg;
                env = get_irg_link(irg);
@@ -2164,6 +2201,12 @@ void inline_functions(int maxsize, int inline_threshold) {
                        curr_call = curr_call->next;
                }
 
+       }
+
+       for (i = 0; i < n_irgs; ++i) {
+               ir_graph *irg = irgs[i];
+
+               env = get_irg_link(irg);
                if (env->got_inline) {
                        /* this irg got calls inlined: optimize it */
 
@@ -2195,6 +2238,8 @@ void inline_functions(int maxsize, int inline_threshold) {
        }
        pmap_destroy(copied_graphs);
 
+       xfree(irgs);
+
        obstack_free(&temp_obst, NULL);
        current_ir_graph = rem;
 }