Ignore generated files.
[libfirm] / ir / opt / opt_inline.c
index d9647c8..eb01365 100644 (file)
@@ -50,7 +50,7 @@
 #include "irouts.h"
 #include "irloop_t.h"
 #include "irbackedge_t.h"
-#include "opt_inline_t.h"
+#include "opt_init.h"
 #include "cgana.h"
 #include "trouts.h"
 #include "error.h"
@@ -492,6 +492,10 @@ void dead_node_elimination(ir_graph *irg) {
 #endif
 }
 
+ir_graph_pass_t *dead_node_elimination_pass(const char *name) {
+       return def_graph_pass(name ? name : "dce", dead_node_elimination);
+}
+
 /**
  * Relink bad predecessors of a block and store the old in array to the
  * link field. This function is called by relink_bad_predecessors().
@@ -699,7 +703,7 @@ void survive_dce_register_irn(survive_dce_t *sd, ir_node **place) {
        if (*place != NULL) {
                ir_node *irn      = *place;
                survive_dce_list_t *curr = pmap_get(sd->places, irn);
-               survive_dce_list_t *nw   = obstack_alloc(&sd->obst, sizeof(nw[0]));
+               survive_dce_list_t *nw   = OALLOC(&sd->obst, survive_dce_list_t);
 
                nw->next  = curr;
                nw->place = place;
@@ -833,9 +837,8 @@ static int can_inline(ir_node *call, ir_graph *called_graph) {
 }
 
 enum exc_mode {
-       exc_handler    = 0, /**< There is a handler. */
-       exc_to_end     = 1, /**< Branches to End. */
-       exc_no_handler = 2  /**< Exception handling not represented. */
+       exc_handler,    /**< There is a handler. */
+       exc_no_handler  /**< Exception handling not represented. */
 };
 
 /* Inlines a method at the given call site. */
@@ -849,6 +852,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        ir_node             **args_in;
        ir_node             *ret, *phi;
        int                 arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity, n_params;
+       int                 n_mem_phi;
        enum exc_mode       exc_handling;
        ir_type             *called_frame, *curr_frame, *mtp, *ctp;
        ir_entity           *ent;
@@ -960,18 +964,15 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
           for the Call node, or do we branch directly to End on an exception?
           exc_handling:
           0 There is a handler.
-          1 Branches to End.
           2 Exception handling not represented in Firm. -- */
        {
-               ir_node *proj, *Mproj = NULL, *Xproj = NULL;
+               ir_node *Xproj = NULL;
+               ir_node *proj;
                for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
                        long proj_nr = get_Proj_proj(proj);
                        if (proj_nr == pn_Call_X_except) Xproj = proj;
-                       if (proj_nr == pn_Call_M_except) Mproj = proj;
                }
-               if      (Mproj) { assert(Xproj); exc_handling = exc_handler; } /*  Mproj           */
-               else if (Xproj) {                exc_handling = exc_to_end; } /* !Mproj &&  Xproj   */
-               else            {                exc_handling = exc_no_handler; } /* !Mproj && !Xproj   */
+               exc_handling = Xproj != NULL ? exc_handler : exc_no_handler;
        }
 
        /* create the argument tuple */
@@ -1119,16 +1120,24 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
           Add Phi node if there was more than one Return.  -- */
        turn_into_tuple(post_call, pn_Call_max);
        /* First the Memory-Phi */
-       n_ret = 0;
+       n_mem_phi = 0;
        for (i = 0; i < arity; i++) {
                ret = get_Block_cfgpred(end_bl, i);
                if (is_Return(ret)) {
-                       cf_pred[n_ret] = get_Return_mem(ret);
-                       n_ret++;
+                       cf_pred[n_mem_phi++] = get_Return_mem(ret);
+               }
+               /* memory output for some exceptions is directly connected to End */
+               if (is_Call(ret)) {
+                       cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 3);
+               } else if (is_fragile_op(ret)) {
+                       /* We rely that all cfops have the memory output at the same position. */
+                       cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 0);
+               } else if (is_Raise(ret)) {
+                       cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 1);
                }
        }
-       phi = new_Phi(n_ret, cf_pred, mode_M);
-       set_Tuple_pred(call, pn_Call_M_regular, phi);
+       phi = new_Phi(n_mem_phi, cf_pred, mode_M);
+       set_Tuple_pred(call, pn_Call_M, phi);
        /* Conserve Phi-list for further inlinings -- but might be optimized */
        if (get_nodes_block(phi) == post_bl) {
                set_irn_link(phi, get_irn_link(post_bl));
@@ -1174,15 +1183,16 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
 
        /* Finally the exception control flow.
-          We have two (three) possible situations:
-          First if the Call branches to an exception handler: We need to add a Phi node to
+          We have two possible situations:
+          First if the Call branches to an exception handler:
+          We need to add a Phi node to
           collect the memory containing the exception objects.  Further we need
           to add another block to get a correct representation of this Phi.  To
           this block we add a Jmp that resolves into the X output of the Call
           when the Call is turned into a tuple.
-          Second the Call branches to End, the exception is not handled.  Just
-          add all inlined exception branches to the End node.
-          Third: there is no Exception edge at all. Handle as case two. */
+          Second: There is no exception edge. Just add all inlined exception
+          branches to the End node.
+        */
        if (exc_handling == exc_handler) {
                n_exc = 0;
                for (i = 0; i < arity; i++) {
@@ -1195,31 +1205,16 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                        }
                }
                if (n_exc > 0) {
-                       ir_node *block = new_Block(n_exc, cf_pred);
-                       set_cur_block(block);
-
-                       set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
-                       /* The Phi for the memories with the exception objects */
-                       n_exc = 0;
-                       for (i = 0; i < arity; i++) {
-                               ir_node *ret;
-                               ret = skip_Proj(get_Block_cfgpred(end_bl, i));
-                               if (is_Call(ret)) {
-                                       cf_pred[n_exc] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 3);
-                                       n_exc++;
-                               } else if (is_fragile_op(ret)) {
-                                       /* We rely that all cfops have the memory output at the same position. */
-                                       cf_pred[n_exc] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 0);
-                                       n_exc++;
-                               } else if (is_Raise(ret)) {
-                                       cf_pred[n_exc] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 1);
-                                       n_exc++;
-                               }
+                       if (n_exc == 1) {
+                               /* simple fix */
+                               set_Tuple_pred(call, pn_Call_X_except, cf_pred[0]);
+                       } else {
+                               ir_node *block = new_Block(n_exc, cf_pred);
+                               set_cur_block(block);
+                               set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
                        }
-                       set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
                } else {
                        set_Tuple_pred(call, pn_Call_X_except, new_Bad());
-                       set_Tuple_pred(call, pn_Call_M_except, new_Bad());
                }
        } else {
                ir_node *main_end_bl;
@@ -1247,7 +1242,6 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                        end_preds[main_end_bl_arity + i] = cf_pred[i];
                set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
                set_Tuple_pred(call, pn_Call_X_except, new_Bad());
-               set_Tuple_pred(call, pn_Call_M_except, new_Bad());
                free(end_preds);
        }
        free(res_pred);
@@ -1291,12 +1285,17 @@ typedef struct _inline_env_t {
  *
  * @param call  the call node
  */
-static ir_graph *get_call_called_irg(ir_node *call) {
+static ir_graph *get_call_called_irg(ir_node *call)
+{
        ir_node *addr;
 
        addr = get_Call_ptr(call);
        if (is_Global(addr)) {
                ir_entity *ent = get_Global_entity(addr);
+               /* we don't know which function gets finally bound to a weak symbol */
+               if (get_entity_linkage(ent) & IR_LINKAGE_WEAK)
+                       return NULL;
+
                return get_entity_irg(ent);
        }
 
@@ -1314,7 +1313,7 @@ static void collect_calls(ir_node *call, void *env) {
                if (called_irg != NULL) {
                        /* The Call node calls a locally defined method.  Remember to inline. */
                        inline_env_t *ienv  = env;
-                       call_entry   *entry = obstack_alloc(&ienv->obst, sizeof(*entry));
+                       call_entry   *entry = OALLOC(&ienv->obst, call_entry);
                        entry->call       = call;
                        entry->callee     = called_irg;
                        entry->loop_depth = 0;
@@ -1363,8 +1362,7 @@ void inline_small_irgs(ir_graph *irg, int size) {
                        ir_graph            *callee = entry->callee;
                        irg_inline_property prop    = get_irg_inline_property(callee);
 
-                       if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
-                               /* do not inline forbidden / weak graphs */
+                       if (prop == irg_inline_forbidden) {
                                continue;
                        }
 
@@ -1418,7 +1416,6 @@ typedef struct {
        unsigned  n_callers;         /**< Number of known graphs that call this graphs. */
        unsigned  n_callers_orig;    /**< for statistics */
        unsigned  got_inline:1;      /**< Set, if at least one call inside this graph was inlined. */
-       unsigned  local_vars:1;      /**< Set, if an inlined function got the address of a local variable. */
        unsigned  recursive:1;       /**< Set, if this function is self recursive. */
 } inline_irg_env;
 
@@ -1426,7 +1423,7 @@ typedef struct {
  * Allocate a new environment for inlining.
  */
 static inline_irg_env *alloc_inline_irg_env(void) {
-       inline_irg_env *env    = obstack_alloc(&temp_obst, sizeof(*env));
+       inline_irg_env *env    = OALLOC(&temp_obst, inline_irg_env);
        INIT_LIST_HEAD(&env->calls);
        env->local_weights     = NULL;
        env->n_nodes           = -2; /* do not count count Start, End */
@@ -1437,7 +1434,6 @@ static inline_irg_env *alloc_inline_irg_env(void) {
        env->n_callers         = 0;
        env->n_callers_orig    = 0;
        env->got_inline        = 0;
-       env->local_vars        = 0;
        env->recursive         = 0;
        return env;
 }
@@ -1499,7 +1495,7 @@ static void collect_calls2(ir_node *call, void *ctx) {
                        x->recursive = 1;
 
                /* link it in the list of possible inlinable entries */
-               entry = obstack_alloc(&temp_obst, sizeof(*entry));
+               entry = OALLOC(&temp_obst, call_entry);
                entry->call       = call;
                entry->callee     = callee;
                entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth;
@@ -1539,7 +1535,7 @@ inline static int is_smaller(ir_graph *callee, unsigned size) {
  */
 static call_entry *duplicate_call_entry(const call_entry *entry,
                                         ir_node *new_call, int loop_depth_delta) {
-       call_entry *nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
+       call_entry *nentry = OALLOC(&temp_obst, call_entry);
        nentry->call       = new_call;
        nentry->callee     = entry->callee;
        nentry->benefice   = entry->benefice;
@@ -1644,8 +1640,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                callee = entry->callee;
 
                                prop = get_irg_inline_property(callee);
-                               if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
-                                       /* do not inline forbidden / weak graphs */
+                               if (prop == irg_inline_forbidden) {
                                        continue;
                                }
 
@@ -1699,8 +1694,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                        callee = entry->callee;
 
                        prop = get_irg_inline_property(callee);
-                       if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
-                               /* do not inline forbidden / weak graphs */
+                       if (prop == irg_inline_forbidden) {
                                continue;
                        }
 
@@ -2003,7 +1997,7 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
                return entry->benefice = INT_MIN;
        }
 
-       if (get_irg_additional_properties(callee) & (mtp_property_noreturn | mtp_property_weak)) {
+       if (get_irg_additional_properties(callee) & mtp_property_noreturn) {
                DB((dbg, LEVEL_2, "In %+F Call to %+F: not inlining noreturn or weak\n",
                    call, callee));
                return entry->benefice = INT_MIN;
@@ -2056,7 +2050,7 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
        callee_env = get_irg_link(callee);
        if (callee_env->n_callers == 1 &&
            callee != current_ir_graph &&
-               get_entity_visibility(ent) == visibility_local) {
+           !entity_is_externally_visible(ent)) {
                weight += 700;
        }
 
@@ -2295,8 +2289,6 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
 
                /* callee was inline. Append it's call list. */
                env->got_inline = 1;
-               if (curr_call->local_adr)
-                       env->local_vars = 1;
                --env->n_call_nodes;
 
                /* we just generate a bunch of new calls */
@@ -2333,7 +2325,9 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
  * Heuristic inliner. Calculates a benefice value for every call and inlines
  * those calls with a value higher than the threshold.
  */
-void inline_functions(unsigned maxsize, int inline_threshold) {
+void inline_functions(unsigned maxsize, int inline_threshold,
+                      opt_ptr after_inline_opt)
+{
        inline_irg_env   *env;
        int              i, n_irgs;
        ir_graph         *rem;
@@ -2379,21 +2373,9 @@ void inline_functions(unsigned maxsize, int inline_threshold) {
                ir_graph *irg = irgs[i];
 
                env = get_irg_link(irg);
-               if (env->got_inline) {
+               if (env->got_inline && after_inline_opt != NULL) {
                        /* this irg got calls inlined: optimize it */
-                       if (get_opt_combo()) {
-                               if (env->local_vars) {
-                                       scalar_replacement_opt(irg);
-                               }
-                               combo(irg);
-                       } else {
-                               if (env->local_vars) {
-                                       if (scalar_replacement_opt(irg)) {
-                                               optimize_graph_df(irg);
-                                       }
-                               }
-                               optimize_cf(irg);
-                       }
+                       after_inline_opt(irg);
                }
                if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
                        DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
@@ -2423,6 +2405,7 @@ struct inline_functions_pass_t {
        ir_prog_pass_t pass;
        unsigned       maxsize;
        int            inline_threshold;
+       opt_ptr        after_inline_opt;
 };
 
 /**
@@ -2432,18 +2415,21 @@ static int inline_functions_wrapper(ir_prog *irp, void *context) {
        struct inline_functions_pass_t *pass = context;
 
        (void)irp;
-       inline_functions(pass->maxsize, pass->inline_threshold);
+       inline_functions(pass->maxsize, pass->inline_threshold,
+                        pass->after_inline_opt);
        return 0;
 }
 
 /* create a ir_prog pass for inline_functions */
 ir_prog_pass_t *inline_functions_pass(
-         const char *name, unsigned maxsize, int inline_threshold) {
+         const char *name, unsigned maxsize, int inline_threshold,
+         opt_ptr after_inline_opt) {
        struct inline_functions_pass_t *pass =
                XMALLOCZ(struct inline_functions_pass_t);
 
        pass->maxsize          = maxsize;
        pass->inline_threshold = inline_threshold;
+       pass->after_inline_opt = after_inline_opt;
 
        return def_prog_pass_constructor(
                &pass->pass, name ? name : "inline_functions",