Ignore generated files.
[libfirm] / ir / opt / opt_inline.c
index 0344ccf..eb01365 100644 (file)
@@ -23,9 +23,7 @@
  * @author   Michael Beck, Goetz Lindenmaier
  * @version  $Id$
  */
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
 
 #include <limits.h>
 #include <assert.h>
 #include "irgmod.h"
 #include "irgwalk.h"
 
-#include "adt/array.h"
-#include "adt/pset.h"
-#include "adt/pmap.h"
-#include "adt/pdeq.h"
-#include "adt/xmalloc.h"
-#include "adt/pqueue.h"
+#include "array_t.h"
+#include "list.h"
+#include "pset.h"
+#include "pmap.h"
+#include "pdeq.h"
+#include "xmalloc.h"
+#include "pqueue.h"
 
 #include "irouts.h"
 #include "irloop_t.h"
 #include "irbackedge_t.h"
-#include "opt_inline_t.h"
+#include "opt_init.h"
 #include "cgana.h"
 #include "trouts.h"
 #include "error.h"
@@ -62,6 +61,7 @@
 #include "irhooks.h"
 #include "irtools.h"
 #include "iropt_dbg.h"
+#include "irpass_t.h"
 
 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
 
@@ -93,7 +93,7 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg;)
  * accesses.  This function is called for all Phi and Block nodes
  * in a Block.
  */
-static INLINE int
+static inline int
 compute_new_arity(ir_node *b) {
        int i, res, irn_arity;
        int irg_v, block_v;
@@ -165,15 +165,10 @@ static void copy_node(ir_node *n, void *env) {
        }
        copy_node_attr(n, nn);
 
-#ifdef DEBUG_libfirm
-       {
-               int copy_node_nr = env != NULL;
-               if (copy_node_nr) {
-                       /* for easier debugging, we want to copy the node numbers too */
-                       nn->node_nr = n->node_nr;
-               }
+       if (env != NULL) {
+               /* for easier debugging, we want to copy the node numbers too */
+               nn->node_nr = n->node_nr;
        }
-#endif
 
        set_new_node(n, nn);
        hook_dead_node_elim_subst(current_ir_graph, n, nn);
@@ -222,9 +217,10 @@ static void copy_preds(ir_node *n, void *env) {
                   in array contained Bads.  Now it's possible.
                   We don't call optimize_in_place as it requires
                   that the fields in ir_graph are set properly. */
-               if ((get_opt_control_flow_straightening()) &&
-                       (get_Block_n_cfgpreds(nn) == 1) &&
-                       is_Jmp(get_Block_cfgpred(nn, 0))) {
+               if (!has_Block_entity(nn) &&
+                   get_opt_control_flow_straightening() &&
+                   get_Block_n_cfgpreds(nn) == 1 &&
+                   is_Jmp(get_Block_cfgpred(nn, 0))) {
                        ir_node *old = get_nodes_block(get_Block_cfgpred(nn, 0));
                        if (nn == old) {
                                /* Jmp jumps into the block it is in -- deal self cycle. */
@@ -471,7 +467,7 @@ void dead_node_elimination(ir_graph *irg) {
        graveyard_obst = irg->obst;
 
        /* A new obstack, where the reachable nodes will be copied to. */
-       rebirth_obst = xmalloc(sizeof(*rebirth_obst));
+       rebirth_obst = XMALLOC(struct obstack);
        irg->obst = rebirth_obst;
        obstack_init(irg->obst);
        irg->last_node_idx = 0;
@@ -496,6 +492,10 @@ void dead_node_elimination(ir_graph *irg) {
 #endif
 }
 
+ir_graph_pass_t *dead_node_elimination_pass(const char *name) {
+       return def_graph_pass(name ? name : "dce", dead_node_elimination);
+}
+
 /**
  * Relink bad predecessors of a block and store the old in array to the
  * link field. This function is called by relink_bad_predecessors().
@@ -662,7 +662,7 @@ static void dead_node_subst_hook(void *context, ir_graph *irg, ir_node *old, ir_
  * Make a new Survive DCE environment.
  */
 survive_dce_t *new_survive_dce(void) {
-       survive_dce_t *res = xmalloc(sizeof(res[0]));
+       survive_dce_t *res = XMALLOC(survive_dce_t);
        obstack_init(&res->obst);
        res->places     = pmap_create();
        res->new_places = NULL;
@@ -675,10 +675,6 @@ survive_dce_t *new_survive_dce(void) {
        res->dead_node_elim_subst.context = res;
        res->dead_node_elim_subst.next    = NULL;
 
-#ifndef FIRM_ENABLE_HOOKS
-       assert(0 && "need hooks enabled");
-#endif
-
        register_hook(hook_dead_node_elim, &res->dead_node_elim);
        register_hook(hook_dead_node_elim_subst, &res->dead_node_elim_subst);
        return res;
@@ -707,7 +703,7 @@ void survive_dce_register_irn(survive_dce_t *sd, ir_node **place) {
        if (*place != NULL) {
                ir_node *irn      = *place;
                survive_dce_list_t *curr = pmap_get(sd->places, irn);
-               survive_dce_list_t *nw   = obstack_alloc(&sd->obst, sizeof(nw[0]));
+               survive_dce_list_t *nw   = OALLOC(&sd->obst, survive_dce_list_t);
 
                nw->next  = curr;
                nw->place = place;
@@ -735,14 +731,15 @@ static void copy_node_inline(ir_node *n, void *env) {
 
        copy_node(n, NULL);
        if (is_Sel(n)) {
-               nn = get_new_node (n);
+               nn = get_new_node(n);
                assert(is_Sel(nn));
+               /* use copied entities from the new frame */
                if (get_entity_owner(get_Sel_entity(n)) == frame_tp) {
                        set_Sel_entity(nn, get_entity_link(get_Sel_entity(n)));
                }
        } else if (is_Block(n)) {
-               nn = get_new_node (n);
-               nn->attr.block.irg = current_ir_graph;
+               nn = get_new_node(n);
+               nn->attr.block.irg.irg = current_ir_graph;
        }
 }
 
@@ -772,10 +769,16 @@ static void copy_preds_inline(ir_node *n, void *env) {
  */
 static void find_addr(ir_node *node, void *env) {
        int *allow_inline = env;
-       if (is_Proj(node) &&
-                       is_Start(get_Proj_pred(node)) &&
-                       get_Proj_proj(node) == pn_Start_P_value_arg_base) {
-               *allow_inline = 0;
+       if (is_Sel(node)) {
+               ir_graph *irg = current_ir_graph;
+               if (get_Sel_ptr(node) == get_irg_frame(irg)) {
+                       /* access to frame */
+                       ir_entity *ent = get_Sel_entity(node);
+                       if (get_entity_owner(ent) != get_irg_frame_type(irg)) {
+                               /* access to value_type */
+                               *allow_inline = 0;
+                       }
+               }
        } else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
                /* From GCC:
                 * Refuse to inline alloca call unless user explicitly forced so as this
@@ -783,7 +786,7 @@ static void find_addr(ir_node *node, void *env) {
                 * using alloca is called in loop.  In GCC present in SPEC2000 inlining
                 * into schedule_block cause it to require 2GB of ram instead of 256MB.
                 *
-                * Sorryly this is true with our implementation also.
+                * Sorrily this is true with our implementation also.
                 * Moreover, we cannot differentiate between alloca() and VLA yet, so this
                 * disables inlining of functions using VLA (with are completely save).
                 *
@@ -834,9 +837,8 @@ static int can_inline(ir_node *call, ir_graph *called_graph) {
 }
 
 enum exc_mode {
-       exc_handler    = 0, /**< There is a handler. */
-       exc_to_end     = 1, /**< Branches to End. */
-       exc_no_handler = 2  /**< Exception handling not represented. */
+       exc_handler,    /**< There is a handler. */
+       exc_no_handler  /**< Exception handling not represented. */
 };
 
 /* Inlines a method at the given call site. */
@@ -850,6 +852,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        ir_node             **args_in;
        ir_node             *ret, *phi;
        int                 arity, n_ret, n_exc, n_res, i, n, j, rem_opt, irn_arity, n_params;
+       int                 n_mem_phi;
        enum exc_mode       exc_handling;
        ir_type             *called_frame, *curr_frame, *mtp, *ctp;
        ir_entity           *ent;
@@ -864,19 +867,22 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
 
        mtp = get_entity_type(ent);
        ctp = get_Call_type(call);
-       if (get_method_n_params(mtp) > get_method_n_params(ctp)) {
-               /* this is a bad feature of C: without a prototype, we can can call a function with less
-               parameters than needed. Currently we don't support this, although it would be
-               to use Unknown than. */
+       n_params = get_method_n_params(mtp);
+       n_res    = get_method_n_ress(mtp);
+       if (n_params > get_method_n_params(ctp)) {
+               /* this is a bad feature of C: without a prototype, we can
+                * call a function with less parameters than needed. Currently
+                * we don't support this, although we could use Unknown than. */
+               return 0;
+       }
+       if (n_res != get_method_n_ress(ctp)) {
                return 0;
        }
 
        /* Argh, compiling C has some bad consequences:
-          the call type AND the method type might be different.
-          It is implementation defendant what happens in that case.
-          We support inlining, if the bitsize of the types matches AND
-          the same arithmetic is used. */
-       n_params = get_method_n_params(mtp);
+        * It is implementation dependent what happens in that case.
+        * We support inlining, if the bitsize of the types matches AND
+        * the same arithmetic is used. */
        for (i = n_params - 1; i >= 0; --i) {
                ir_type *param_tp = get_method_param_type(mtp, i);
                ir_type *arg_tp   = get_method_param_type(ctp, i);
@@ -894,6 +900,22 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                        /* otherwise we can simply "reinterpret" the bits */
                }
        }
+       for (i = n_res - 1; i >= 0; --i) {
+               ir_type *decl_res_tp = get_method_res_type(mtp, i);
+               ir_type *used_res_tp = get_method_res_type(ctp, i);
+
+               if (decl_res_tp != used_res_tp) {
+                       ir_mode *decl_mode = get_type_mode(decl_res_tp);
+                       ir_mode *used_mode = get_type_mode(used_res_tp);
+                       if (decl_mode == NULL || used_mode == NULL)
+                               return 0;
+                       if (get_mode_size_bits(decl_mode) != get_mode_size_bits(used_mode))
+                               return 0;
+                       if (get_mode_arithmetic(decl_mode) != get_mode_arithmetic(used_mode))
+                               return 0;
+                       /* otherwise we can "reinterpret" the bits */
+               }
+       }
 
        irg = get_irn_irg(call);
 
@@ -930,6 +952,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        set_irg_doms_inconsistent(irg);
        set_irg_loopinfo_inconsistent(irg);
        set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
+       set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
 
        /* -- Check preconditions -- */
        assert(is_Call(call));
@@ -941,18 +964,15 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
           for the Call node, or do we branch directly to End on an exception?
           exc_handling:
           0 There is a handler.
-          1 Branches to End.
           2 Exception handling not represented in Firm. -- */
        {
-               ir_node *proj, *Mproj = NULL, *Xproj = NULL;
+               ir_node *Xproj = NULL;
+               ir_node *proj;
                for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
                        long proj_nr = get_Proj_proj(proj);
                        if (proj_nr == pn_Call_X_except) Xproj = proj;
-                       if (proj_nr == pn_Call_M_except) Mproj = proj;
                }
-               if      (Mproj) { assert(Xproj); exc_handling = exc_handler; } /*  Mproj           */
-               else if (Xproj) {                exc_handling = exc_to_end; } /* !Mproj &&  Xproj   */
-               else            {                exc_handling = exc_no_handler; } /* !Mproj && !Xproj   */
+               exc_handling = Xproj != NULL ? exc_handler : exc_no_handler;
        }
 
        /* create the argument tuple */
@@ -965,7 +985,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                ir_mode *mode     = get_type_mode(param_tp);
 
                if (mode != get_irn_mode(arg)) {
-                       arg = new_r_Conv(irg, block, arg, mode);
+                       arg = new_r_Conv(block, arg, mode);
                }
                args_in[i] = arg;
        }
@@ -982,9 +1002,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        in[pn_Start_P_frame_base]     = get_irg_frame(irg);
        in[pn_Start_P_tls]            = get_irg_tls(irg);
        in[pn_Start_T_args]           = new_Tuple(n_params, args_in);
-       /* in[pn_Start_P_value_arg_base] = ??? */
-       assert(pn_Start_P_value_arg_base == pn_Start_max - 1 && "pn_Start_P_value_arg_base not supported, fix");
-       pre_call = new_Tuple(pn_Start_max - 1, in);
+       pre_call = new_Tuple(pn_Start_max, in);
        post_call = call;
 
        /* --
@@ -1022,6 +1040,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
 
        /* -- Replicate local entities of the called_graph -- */
        /* copy the entities. */
+       irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
        called_frame = get_irg_frame_type(called_graph);
        curr_frame   = get_irg_frame_type(irg);
        for (i = 0, n = get_class_n_members(called_frame); i < n; ++i) {
@@ -1042,6 +1061,8 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        irg_walk(get_irg_end(called_graph), copy_node_inline, copy_preds_inline,
                 get_irg_frame_type(called_graph));
 
+       irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
+
        /* Repair called_graph */
        set_irg_visited(called_graph, get_irg_visited(irg));
        set_irg_block_visited(called_graph, get_irg_block_visited(irg));
@@ -1067,8 +1088,8 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        arity = get_Block_n_cfgpreds(end_bl);    /* arity = n_exc + n_ret  */
        n_res = get_method_n_ress(get_Call_type(call));
 
-       res_pred = xmalloc(n_res * sizeof(*res_pred));
-       cf_pred  = xmalloc(arity * sizeof(*res_pred));
+       res_pred = XMALLOCN(ir_node*, n_res);
+       cf_pred  = XMALLOCN(ir_node*, arity);
 
        set_irg_current_block(irg, post_bl); /* just to make sure */
 
@@ -1089,7 +1110,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                ir_node *ret;
                ret = get_Block_cfgpred(end_bl, i);
                if (is_Return(ret)) {
-                       cf_pred[n_ret] = new_r_Jmp(irg, get_nodes_block(ret));
+                       cf_pred[n_ret] = new_r_Jmp(get_nodes_block(ret));
                        n_ret++;
                }
        }
@@ -1099,16 +1120,24 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
           Add Phi node if there was more than one Return.  -- */
        turn_into_tuple(post_call, pn_Call_max);
        /* First the Memory-Phi */
-       n_ret = 0;
+       n_mem_phi = 0;
        for (i = 0; i < arity; i++) {
                ret = get_Block_cfgpred(end_bl, i);
                if (is_Return(ret)) {
-                       cf_pred[n_ret] = get_Return_mem(ret);
-                       n_ret++;
+                       cf_pred[n_mem_phi++] = get_Return_mem(ret);
+               }
+               /* memory output for some exceptions is directly connected to End */
+               if (is_Call(ret)) {
+                       cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 3);
+               } else if (is_fragile_op(ret)) {
+                       /* We rely that all cfops have the memory output at the same position. */
+                       cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 0);
+               } else if (is_Raise(ret)) {
+                       cf_pred[n_mem_phi++] = new_r_Proj(get_nodes_block(ret), ret, mode_M, 1);
                }
        }
-       phi = new_Phi(n_ret, cf_pred, mode_M);
-       set_Tuple_pred(call, pn_Call_M_regular, phi);
+       phi = new_Phi(n_mem_phi, cf_pred, mode_M);
+       set_Tuple_pred(call, pn_Call_M, phi);
        /* Conserve Phi-list for further inlinings -- but might be optimized */
        if (get_nodes_block(phi) == post_bl) {
                set_irn_link(phi, get_irn_link(post_bl));
@@ -1117,11 +1146,18 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        /* Now the real results */
        if (n_res > 0) {
                for (j = 0; j < n_res; j++) {
+                       ir_type *res_type = get_method_res_type(ctp, j);
+                       ir_mode *res_mode = get_type_mode(res_type);
                        n_ret = 0;
                        for (i = 0; i < arity; i++) {
                                ret = get_Block_cfgpred(end_bl, i);
                                if (is_Return(ret)) {
-                                       cf_pred[n_ret] = get_Return_res(ret, j);
+                                       ir_node *res = get_Return_res(ret, j);
+                                       if (get_irn_mode(res) != res_mode) {
+                                               ir_node *block = get_nodes_block(res);
+                                               res = new_r_Conv(block, res, res_mode);
+                                       }
+                                       cf_pred[n_ret] = res;
                                        n_ret++;
                                }
                        }
@@ -1147,15 +1183,16 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
        set_Tuple_pred(call, pn_Call_P_value_res_base, new_Bad());
 
        /* Finally the exception control flow.
-          We have two (three) possible situations:
-          First if the Call branches to an exception handler: We need to add a Phi node to
+          We have two possible situations:
+          First if the Call branches to an exception handler:
+          We need to add a Phi node to
           collect the memory containing the exception objects.  Further we need
           to add another block to get a correct representation of this Phi.  To
           this block we add a Jmp that resolves into the X output of the Call
           when the Call is turned into a tuple.
-          Second the Call branches to End, the exception is not handled.  Just
-          add all inlined exception branches to the End node.
-          Third: there is no Exception edge at all. Handle as case two. */
+          Second: There is no exception edge. Just add all inlined exception
+          branches to the End node.
+        */
        if (exc_handling == exc_handler) {
                n_exc = 0;
                for (i = 0; i < arity; i++) {
@@ -1168,29 +1205,16 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                        }
                }
                if (n_exc > 0) {
-                       new_Block(n_exc, cf_pred);      /* watch it: current_block is changed! */
-                       set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
-                       /* The Phi for the memories with the exception objects */
-                       n_exc = 0;
-                       for (i = 0; i < arity; i++) {
-                               ir_node *ret;
-                               ret = skip_Proj(get_Block_cfgpred(end_bl, i));
-                               if (is_Call(ret)) {
-                                       cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 3);
-                                       n_exc++;
-                               } else if (is_fragile_op(ret)) {
-                                       /* We rely that all cfops have the memory output at the same position. */
-                                       cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 0);
-                                       n_exc++;
-                               } else if (is_Raise(ret)) {
-                                       cf_pred[n_exc] = new_r_Proj(irg, get_nodes_block(ret), ret, mode_M, 1);
-                                       n_exc++;
-                               }
+                       if (n_exc == 1) {
+                               /* simple fix */
+                               set_Tuple_pred(call, pn_Call_X_except, cf_pred[0]);
+                       } else {
+                               ir_node *block = new_Block(n_exc, cf_pred);
+                               set_cur_block(block);
+                               set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
                        }
-                       set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
                } else {
                        set_Tuple_pred(call, pn_Call_X_except, new_Bad());
-                       set_Tuple_pred(call, pn_Call_M_except, new_Bad());
                }
        } else {
                ir_node *main_end_bl;
@@ -1208,17 +1232,16 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
                                n_exc++;
                        }
                }
-               main_end_bl = get_irg_end_block(irg);
+               main_end_bl       = get_irg_end_block(irg);
                main_end_bl_arity = get_irn_arity(main_end_bl);
-               end_preds =  xmalloc((n_exc + main_end_bl_arity) * sizeof(*end_preds));
+               end_preds         = XMALLOCN(ir_node*, n_exc + main_end_bl_arity);
 
                for (i = 0; i < main_end_bl_arity; ++i)
                        end_preds[i] = get_irn_n(main_end_bl, i);
                for (i = 0; i < n_exc; ++i)
                        end_preds[main_end_bl_arity + i] = cf_pred[i];
                set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
-               set_Tuple_pred(call, pn_Call_X_except,  new_Bad());
-               set_Tuple_pred(call, pn_Call_M_except,  new_Bad());
+               set_Tuple_pred(call, pn_Call_X_except, new_Bad());
                free(end_preds);
        }
        free(res_pred);
@@ -1232,28 +1255,28 @@ int inline_method(ir_node *call, ir_graph *called_graph) {
 }
 
 /********************************************************************/
-/* Apply inlineing to small methods.                                */
+/* Apply inlining to small methods.                                 */
 /********************************************************************/
 
 static struct obstack  temp_obst;
 
 /** Represents a possible inlinable call in a graph. */
-typedef struct _call_entry call_entry;
-struct _call_entry {
-       ir_node    *call;      /**< the Call node */
-       ir_graph   *callee;    /**< the callee IR-graph called here */
-       call_entry *next;      /**< for linking the next one */
-       int         loop_depth; /**< the loop depth of this call */
-       unsigned    local_adr : 1;
-};
+typedef struct _call_entry {
+       ir_node    *call;       /**< The Call node. */
+       ir_graph   *callee;     /**< The callee IR-graph. */
+       list_head  list;        /**< List head for linking the next one. */
+       int        loop_depth;  /**< The loop depth of this call. */
+       int        benefice;    /**< The calculated benefice of this call. */
+       unsigned   local_adr:1; /**< Set if this call gets an address of a local variable. */
+       unsigned   all_const:1; /**< Set if this call has only constant parameters. */
+} call_entry;
 
 /**
  * environment for inlining small irgs
  */
 typedef struct _inline_env_t {
-       struct obstack obst;  /**< an obstack where call_entries are allocated on. */
-       call_entry *head;     /**< the head of the call entry list */
-       call_entry *tail;     /**< the tail of the call entry list */
+       struct obstack obst;  /**< An obstack where call_entries are allocated on. */
+       list_head      calls; /**< The call entry list. */
 } inline_env_t;
 
 /**
@@ -1262,12 +1285,17 @@ typedef struct _inline_env_t {
  *
  * @param call  the call node
  */
-static ir_graph *get_call_called_irg(ir_node *call) {
+static ir_graph *get_call_called_irg(ir_node *call)
+{
        ir_node *addr;
 
        addr = get_Call_ptr(call);
        if (is_Global(addr)) {
                ir_entity *ent = get_Global_entity(addr);
+               /* we don't know which function gets finally bound to a weak symbol */
+               if (get_entity_linkage(ent) & IR_LINKAGE_WEAK)
+                       return NULL;
+
                return get_entity_irg(ent);
        }
 
@@ -1278,23 +1306,22 @@ static ir_graph *get_call_called_irg(ir_node *call) {
  * Walker: Collect all calls to known graphs inside a graph.
  */
 static void collect_calls(ir_node *call, void *env) {
+       (void) env;
        if (is_Call(call)) {
                ir_graph *called_irg = get_call_called_irg(call);
 
                if (called_irg != NULL) {
                        /* The Call node calls a locally defined method.  Remember to inline. */
                        inline_env_t *ienv  = env;
-                       call_entry   *entry = obstack_alloc(&ienv->obst, sizeof(*entry));
+                       call_entry   *entry = OALLOC(&ienv->obst, call_entry);
                        entry->call       = call;
                        entry->callee     = called_irg;
-                       entry->next       = NULL;
                        entry->loop_depth = 0;
+                       entry->benefice   = 0;
+                       entry->local_adr  = 0;
+                       entry->all_const  = 0;
 
-                       if (ienv->tail == NULL)
-                               ienv->head = entry;
-                       else
-                               ienv->tail->next = entry;
-                       ienv->tail = entry;
+                       list_add_tail(&entry->list, &ienv->calls);
                }
        }
 }
@@ -1318,24 +1345,24 @@ void inline_small_irgs(ir_graph *irg, int size) {
        free_callee_info(irg);
 
        /* Find Call nodes to inline.
-          (We can not inline during a walk of the graph, as inlineing the same
+          (We can not inline during a walk of the graph, as inlining the same
           method several times changes the visited flag of the walked graph:
-          after the first inlineing visited of the callee equals visited of
-          the caller.  With the next inlineing both are increased.) */
+          after the first inlining visited of the callee equals visited of
+          the caller.  With the next inlining both are increased.) */
        obstack_init(&env.obst);
-       env.head = env.tail = NULL;
+       INIT_LIST_HEAD(&env.calls);
        irg_walk_graph(irg, NULL, collect_calls, &env);
 
-       if (env.head != NULL) {
+       if (! list_empty(&env.calls)) {
                /* There are calls to inline */
+               ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
                collect_phiprojs(irg);
 
-               for (entry = env.head; entry != NULL; entry = entry->next) {
+               list_for_each_entry(call_entry, entry, &env.calls, list) {
                        ir_graph            *callee = entry->callee;
                        irg_inline_property prop    = get_irg_inline_property(callee);
 
-                       if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
-                               /* do not inline forbidden / weak graphs */
+                       if (prop == irg_inline_forbidden) {
                                continue;
                        }
 
@@ -1344,48 +1371,70 @@ void inline_small_irgs(ir_graph *irg, int size) {
                                inline_method(entry->call, callee);
                        }
                }
+               ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
        }
        obstack_free(&env.obst, NULL);
        current_ir_graph = rem;
 }
 
+struct inline_small_irgs_pass_t {
+       ir_graph_pass_t pass;
+       int            size;
+};
+
+/**
+ * Wrapper to run inline_small_irgs() as a pass.
+ */
+static int inline_small_irgs_wrapper(ir_graph *irg, void *context) {
+       struct inline_small_irgs_pass_t *pass = context;
+
+       inline_small_irgs(irg, pass->size);
+       return 0;
+}
+
+/* create a pass for inline_small_irgs() */
+ir_graph_pass_t *inline_small_irgs_pass(const char *name, int size) {
+       struct inline_small_irgs_pass_t *pass =
+               XMALLOCZ(struct inline_small_irgs_pass_t);
+
+       pass->size = size;
+       return def_graph_pass_constructor(
+               &pass->pass, name ? name : "inline_small_irgs", inline_small_irgs_wrapper);
+}
+
 /**
  * Environment for inlining irgs.
  */
 typedef struct {
-       unsigned n_nodes;             /**< Number of nodes in graph except Id, Tuple, Proj, Start, End. */
-       unsigned n_blocks;            /**< Number of Blocks in graph without Start and End block. */
-       unsigned n_nodes_orig;        /**< for statistics */
-       unsigned n_call_nodes;        /**< Number of Call nodes in the graph. */
-       unsigned n_call_nodes_orig;   /**< for statistics */
-       unsigned n_callers;           /**< Number of known graphs that call this graphs. */
-       unsigned n_callers_orig;      /**< for statistics */
-       unsigned got_inline:1;   /**< Set, if at least one call inside this graph was inlined. */
-       unsigned local_vars:1;   /**< Set, if a inlined function gets the address of an inlined variable. */
-       unsigned recursive:1;    /**< Set, if this function is self recursive. */
-       call_entry *call_head;   /**< The head of the list of all call nodes in this graph. */
-       call_entry *call_tail;   /**< The tail of the list of all call nodes in this graph .*/
-       unsigned *local_weights; /**< Once allocated, the beneficial weight for transmitting local addresses. */
+       list_head calls;             /**< List of of all call nodes in this graph. */
+       unsigned  *local_weights;    /**< Once allocated, the beneficial weight for transmitting local addresses. */
+       unsigned  n_nodes;           /**< Number of nodes in graph except Id, Tuple, Proj, Start, End. */
+       unsigned  n_blocks;          /**< Number of Blocks in graph without Start and End block. */
+       unsigned  n_nodes_orig;      /**< for statistics */
+       unsigned  n_call_nodes;      /**< Number of Call nodes in the graph. */
+       unsigned  n_call_nodes_orig; /**< for statistics */
+       unsigned  n_callers;         /**< Number of known graphs that call this graphs. */
+       unsigned  n_callers_orig;    /**< for statistics */
+       unsigned  got_inline:1;      /**< Set, if at least one call inside this graph was inlined. */
+       unsigned  recursive:1;       /**< Set, if this function is self recursive. */
 } inline_irg_env;
 
 /**
  * Allocate a new environment for inlining.
  */
 static inline_irg_env *alloc_inline_irg_env(void) {
-       inline_irg_env *env    = obstack_alloc(&temp_obst, sizeof(*env));
+       inline_irg_env *env    = OALLOC(&temp_obst, inline_irg_env);
+       INIT_LIST_HEAD(&env->calls);
+       env->local_weights     = NULL;
        env->n_nodes           = -2; /* do not count count Start, End */
        env->n_blocks          = -2; /* do not count count Start, End Block */
        env->n_nodes_orig      = -2; /* do not count Start, End */
-       env->call_head         = NULL;
-       env->call_tail         = NULL;
        env->n_call_nodes      = 0;
        env->n_call_nodes_orig = 0;
        env->n_callers         = 0;
        env->n_callers_orig    = 0;
        env->got_inline        = 0;
-       env->local_vars        = 0;
        env->recursive         = 0;
-       env->local_weights     = NULL;
        return env;
 }
 
@@ -1446,19 +1495,15 @@ static void collect_calls2(ir_node *call, void *ctx) {
                        x->recursive = 1;
 
                /* link it in the list of possible inlinable entries */
-               entry = obstack_alloc(&temp_obst, sizeof(*entry));
+               entry = OALLOC(&temp_obst, call_entry);
                entry->call       = call;
                entry->callee     = callee;
-               entry->next       = NULL;
                entry->loop_depth = get_irn_loop(get_nodes_block(call))->depth;
+               entry->benefice   = 0;
+               entry->local_adr  = 0;
+               entry->all_const  = 0;
 
-               entry->next  = x->call_head;
-               x->call_head = entry;
-
-               if (entry->next == NULL) {
-                       /* keep tail up to date */
-                       x->call_tail = entry;
-               }
+               list_add_tail(&entry->list, &x->calls);
        }
 }
 
@@ -1466,7 +1511,7 @@ static void collect_calls2(ir_node *call, void *ctx) {
  * Returns TRUE if the number of callers is 0 in the irg's environment,
  * hence this irg is a leave.
  */
-INLINE static int is_leave(ir_graph *irg) {
+inline static int is_leave(ir_graph *irg) {
        inline_irg_env *env = get_irg_link(irg);
        return env->n_call_nodes == 0;
 }
@@ -1475,43 +1520,54 @@ INLINE static int is_leave(ir_graph *irg) {
  * Returns TRUE if the number of nodes in the callee is
  * smaller then size in the irg's environment.
  */
-INLINE static int is_smaller(ir_graph *callee, unsigned size) {
+inline static int is_smaller(ir_graph *callee, unsigned size) {
        inline_irg_env *env = get_irg_link(callee);
        return env->n_nodes < size;
 }
 
 /**
- * Append the nodes of the list src to the nodes of the list in environment dst.
+ * Duplicate a call entry.
+ *
+ * @param entry     the original entry to duplicate
+ * @param new_call  the new call node
+ * @param loop_depth_delta
+ *                  delta value for the loop depth
  */
-static void append_call_list(inline_irg_env *dst, call_entry *src) {
-       call_entry *entry, *nentry;
-
-       /* Note that the src list points to Call nodes in the inlined graph, but
-          we need Call nodes in our graph. Luckily the inliner leaves this information
-          in the link field. */
-       for (entry = src; entry != NULL; entry = entry->next) {
-               nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
-               nentry->call         = get_irn_link(entry->call);
-               nentry->callee       = entry->callee;
-               nentry->next         = NULL;
-               nentry->loop_depth   = entry->loop_depth;
-               dst->call_tail->next = nentry;
-               dst->call_tail       = nentry;
-       }
-}
-
 static call_entry *duplicate_call_entry(const call_entry *entry,
-               int loop_depth_delta, ir_node *new_call)
-{
-       call_entry *nentry = obstack_alloc(&temp_obst, sizeof(*nentry));
+                                        ir_node *new_call, int loop_depth_delta) {
+       call_entry *nentry = OALLOC(&temp_obst, call_entry);
        nentry->call       = new_call;
        nentry->callee     = entry->callee;
-       nentry->next       = NULL;
+       nentry->benefice   = entry->benefice;
        nentry->loop_depth = entry->loop_depth + loop_depth_delta;
+       nentry->local_adr  = entry->local_adr;
+       nentry->all_const  = entry->all_const;
 
        return nentry;
 }
 
+/**
+ * Append all call nodes of the source environment to the nodes of in the destination
+ * environment.
+ *
+ * @param dst         destination environment
+ * @param src         source environment
+ * @param loop_depth  the loop depth of the call that is replaced by the src list
+ */
+static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_depth) {
+       call_entry *entry, *nentry;
+
+       /* Note that the src list points to Call nodes in the inlined graph, but
+          we need Call nodes in our graph. Luckily the inliner leaves this information
+          in the link field. */
+       list_for_each_entry(call_entry, entry, &src->calls, list) {
+               nentry = duplicate_call_entry(entry, get_irn_link(entry->call), loop_depth);
+               list_add_tail(&nentry->list, &dst->calls);
+       }
+       dst->n_call_nodes += src->n_call_nodes;
+       dst->n_nodes      += src->n_nodes;
+}
+
 /*
  * Inlines small leave methods at call sites where the called address comes
  * from a Const node that references the entity representing the called
@@ -1521,7 +1577,7 @@ static call_entry *duplicate_call_entry(const call_entry *entry,
  * size are inlined.
  */
 void inline_leave_functions(unsigned maxsize, unsigned leavesize,
-               unsigned size, int ignore_runtime)
+                            unsigned size, int ignore_runtime)
 {
        inline_irg_env   *env;
        ir_graph         *irg;
@@ -1529,7 +1585,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
        ir_graph         *rem;
        int              did_inline;
        wenv_t           wenv;
-       call_entry       *entry, *tail;
+       call_entry       *entry, *next;
        const call_entry *centry;
        pmap             *copied_graphs;
        pmap_entry       *pm_entry;
@@ -1545,7 +1601,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
        for (i = 0; i < n_irgs; ++i)
                set_irg_link(get_irp_irg(i), alloc_inline_irg_env());
 
-       /* Precompute information in temporary data structure. */
+       /* Pre-compute information in temporary data structure. */
        wenv.ignore_runtime = ignore_runtime;
        wenv.ignore_callers = 0;
        for (i = 0; i < n_irgs; ++i) {
@@ -1570,10 +1626,10 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                        int phiproj_computed = 0;
 
                        current_ir_graph = get_irp_irg(i);
-                       env = (inline_irg_env *)get_irg_link(current_ir_graph);
+                       env              = get_irg_link(current_ir_graph);
 
-                       tail = NULL;
-                       for (entry = env->call_head; entry != NULL; entry = entry->next) {
+                       ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
+                       list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
                                ir_graph            *callee;
                                irg_inline_property  prop;
 
@@ -1584,8 +1640,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                callee = entry->callee;
 
                                prop = get_irg_inline_property(callee);
-                               if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
-                                       /* do not inline forbidden / weak graphs */
+                               if (prop == irg_inline_forbidden) {
                                        continue;
                                }
 
@@ -1598,9 +1653,9 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                        did_inline = inline_method(call, callee);
 
                                        if (did_inline) {
-                                               inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
+                                               inline_irg_env *callee_env = get_irg_link(callee);
 
-                                               /* was inlined, must be recomputed */
+                                               /* call was inlined, Phi/Projs for current graph must be recomputed */
                                                phiproj_computed = 0;
 
                                                /* Do some statistics */
@@ -1610,16 +1665,12 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                                --callee_env->n_callers;
 
                                                /* remove this call from the list */
-                                               if (tail != NULL)
-                                                       tail->next = entry->next;
-                                               else
-                                                       env->call_head = entry->next;
+                                               list_del(&entry->list);
                                                continue;
                                        }
                                }
-                               tail = entry;
                        }
-                       env->call_tail = tail;
+                       ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
                }
        } while (did_inline);
 
@@ -1629,11 +1680,12 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                int phiproj_computed = 0;
 
                current_ir_graph = get_irp_irg(i);
-               env = (inline_irg_env *)get_irg_link(current_ir_graph);
+               env              = get_irg_link(current_ir_graph);
+
+               ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
 
                /* note that the list of possible calls is updated during the process */
-               tail = NULL;
-               for (entry = env->call_head; entry != NULL; entry = entry->next) {
+               list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
                        irg_inline_property prop;
                        ir_graph            *callee;
                        pmap_entry          *e;
@@ -1642,8 +1694,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                        callee = entry->callee;
 
                        prop = get_irg_inline_property(callee);
-                       if (prop == irg_inline_forbidden || get_irg_additional_properties(callee) & mtp_property_weak) {
-                               /* do not inline forbidden / weak graphs */
+                       if (prop == irg_inline_forbidden) {
                                continue;
                        }
 
@@ -1668,6 +1719,8 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                        inline_irg_env *callee_env;
                                        ir_graph       *copy;
 
+                                       ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
+
                                        /*
                                         * No copy yet, create one.
                                         * Note that recursive methods are never leaves, so it is sufficient
@@ -1678,6 +1731,8 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                        /* create_irg_copy() destroys the Proj links, recompute them */
                                        phiproj_computed = 0;
 
+                                       ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
+
                                        /* allocate new environment */
                                        callee_env = alloc_inline_irg_env();
                                        set_irg_link(copy, callee_env);
@@ -1709,40 +1764,34 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                if (did_inline) {
                                        inline_irg_env *callee_env = (inline_irg_env *)get_irg_link(callee);
 
-                                       /* was inlined, must be recomputed */
+                                       /* call was inlined, Phi/Projs for current graph must be recomputed */
                                        phiproj_computed = 0;
 
                                        /* callee was inline. Append it's call list. */
                                        env->got_inline = 1;
                                        --env->n_call_nodes;
-                                       append_call_list(env, callee_env->call_head);
-                                       env->n_call_nodes += callee_env->n_call_nodes;
-                                       env->n_nodes += callee_env->n_nodes;
+                                       append_call_list(env, callee_env, entry->loop_depth);
                                        --callee_env->n_callers;
 
                                        /* after we have inlined callee, all called methods inside callee
                                           are now called once more */
-                                       for (centry = callee_env->call_head; centry != NULL; centry = centry->next) {
+                                       list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
                                                inline_irg_env *penv = get_irg_link(centry->callee);
                                                ++penv->n_callers;
                                        }
 
                                        /* remove this call from the list */
-                                       if (tail != NULL)
-                                               tail->next = entry->next;
-                                       else
-                                               env->call_head = entry->next;
+                                       list_del(&entry->list);
                                        continue;
                                }
                        }
-                       tail = entry;
                }
-               env->call_tail = tail;
+               ir_free_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
        }
 
        for (i = 0; i < n_irgs; ++i) {
                irg = get_irp_irg(i);
-               env = (inline_irg_env *)get_irg_link(irg);
+               env = get_irg_link(irg);
 
                if (env->got_inline) {
                        optimize_graph_df(irg);
@@ -1770,6 +1819,45 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
        current_ir_graph = rem;
 }
 
+struct inline_leave_functions_pass_t {
+       ir_prog_pass_t pass;
+       unsigned       maxsize;
+       unsigned       leavesize;
+       unsigned       size;
+       int            ignore_runtime;
+};
+
+/**
+ * Wrapper to run inline_leave_functions() as a ir_prog pass.
+ */
+static int inline_leave_functions_wrapper(ir_prog *irp, void *context) {
+       struct inline_leave_functions_pass_t *pass = context;
+
+       (void)irp;
+       inline_leave_functions(
+               pass->maxsize, pass->leavesize,
+               pass->size, pass->ignore_runtime);
+       return 0;
+}
+
+/* create a pass for inline_leave_functions() */
+ir_prog_pass_t *inline_leave_functions_pass(
+       const char *name, unsigned maxsize, unsigned leavesize,
+       unsigned size, int ignore_runtime) {
+       struct inline_leave_functions_pass_t *pass =
+               XMALLOCZ(struct inline_leave_functions_pass_t);
+
+       pass->maxsize        = maxsize;
+       pass->leavesize      = leavesize;
+       pass->size           = size;
+       pass->ignore_runtime = ignore_runtime;
+
+       return def_prog_pass_constructor(
+               &pass->pass,
+               name ? name : "inline_leave_functions",
+               inline_leave_functions_wrapper);
+}
+
 /**
  * Calculate the parameter weights for transmitting the address of a local variable.
  */
@@ -1888,11 +1976,8 @@ static unsigned get_method_local_adress_weight(ir_graph *callee, int pos) {
  *
  * @param call       the call node we have to inspect
  * @param callee     the called graph
- * @param local_adr  set after return if an address of a local variable is
- *                   transmitted as a parameter
  */
-static int calc_inline_benefice(const call_entry *entry, ir_graph *callee,
-                                unsigned *local_adr)
+static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
 {
        ir_node   *call = entry->call;
        ir_entity *ent  = get_irg_entity(callee);
@@ -1909,15 +1994,13 @@ static int calc_inline_benefice(const call_entry *entry, ir_graph *callee,
        if (prop == irg_inline_forbidden) {
                DB((dbg, LEVEL_2, "In %+F Call to %+F: inlining forbidden\n",
                    call, callee));
-               return INT_MIN;
+               return entry->benefice = INT_MIN;
        }
 
-       if ( (get_irg_additional_properties(callee)
-                               | get_entity_additional_properties(ent))
-                       & (mtp_property_noreturn | mtp_property_weak)) {
+       if (get_irg_additional_properties(callee) & mtp_property_noreturn) {
                DB((dbg, LEVEL_2, "In %+F Call to %+F: not inlining noreturn or weak\n",
                    call, callee));
-               return INT_MIN;
+               return entry->benefice = INT_MIN;
        }
 
        /* costs for every passed parameter */
@@ -1958,15 +2041,16 @@ static int calc_inline_benefice(const call_entry *entry, ir_graph *callee,
                                v = get_method_local_adress_weight(callee, i);
                                weight += v;
                                if (v > 0)
-                                       *local_adr = 1;
+                                       entry->local_adr = 1;
                        }
                }
        }
+       entry->all_const = all_const;
 
        callee_env = get_irg_link(callee);
-       if (callee_env->n_callers == 1
-                       && callee != current_ir_graph
-                       && get_entity_visibility(ent) == visibility_local) {
+       if (callee_env->n_callers == 1 &&
+           callee != current_ir_graph &&
+           !entity_is_externally_visible(ent)) {
                weight += 700;
        }
 
@@ -1983,13 +2067,6 @@ static int calc_inline_benefice(const call_entry *entry, ir_graph *callee,
        if (callee_env->n_call_nodes == 0)
                weight += 400;
 
-       /*
-        * Reduce the weight for recursive function IFF not all arguments are const.
-        * inlining recursive functions is rarely good.
-        */
-       if (callee_env->recursive && !all_const)
-               weight -= 2000;
-
        /** it's important to inline inner loops first */
        if (entry->loop_depth > 30)
                weight += 30 * 1024;
@@ -2000,22 +2077,28 @@ static int calc_inline_benefice(const call_entry *entry, ir_graph *callee,
         * All arguments constant is probably a good sign, give an extra bonus
         */
        if (all_const)
-               weight += 1308;
+               weight += 1024;
 
-       return weight;
+       return entry->benefice = weight;
 }
 
 static ir_graph **irgs;
 static int      last_irg;
 
-static void callgraph_walker(ir_graph *irg, void *data)
-{
+/**
+ * Callgraph walker, collect all visited graphs.
+ */
+static void callgraph_walker(ir_graph *irg, void *data) {
        (void) data;
        irgs[last_irg++] = irg;
 }
 
-static ir_graph **create_irg_list(void)
-{
+/**
+ * Creates an inline order for all graphs.
+ *
+ * @return the list of graphs.
+ */
+static ir_graph **create_irg_list(void) {
        ir_entity **free_methods;
        int       arr_len;
        int       n_irgs = get_irp_n_irgs();
@@ -2026,8 +2109,7 @@ static ir_graph **create_irg_list(void)
        compute_callgraph();
 
        last_irg = 0;
-       irgs     = xmalloc(n_irgs * sizeof(*irgs));
-       memset(irgs, 0, sizeof(n_irgs * sizeof(*irgs)));
+       irgs     = XMALLOCNZ(ir_graph*, n_irgs);
 
        callgraph_walk(NULL, callgraph_walker, NULL);
        assert(n_irgs == last_irg);
@@ -2035,33 +2117,53 @@ static ir_graph **create_irg_list(void)
        return irgs;
 }
 
+/**
+ * Push a call onto the priority list if its benefice is big enough.
+ *
+ * @param pqueue   the priority queue of calls
+ * @param call     the call entry
+ * @param inlien_threshold
+ *                 the threshold value
+ */
 static void maybe_push_call(pqueue_t *pqueue, call_entry *call,
                             int inline_threshold)
 {
-       ir_graph            *callee    = call->callee;
-       irg_inline_property  prop      = get_irg_inline_property(callee);
-       unsigned             local_adr;
-       int                  benefice;
-
-       benefice        = calc_inline_benefice(call, callee, &local_adr);
-       call->local_adr = local_adr;
+       ir_graph            *callee  = call->callee;
+       irg_inline_property prop     = get_irg_inline_property(callee);
+       int                 benefice = calc_inline_benefice(call, callee);
 
        DB((dbg, LEVEL_2, "In %+F Call %+F to %+F has benefice %d\n",
-               get_irn_irg(call->call), call->call, callee, benefice));
+           get_irn_irg(call->call), call->call, callee, benefice));
 
-       if (benefice < inline_threshold && !(prop & irg_inline_forced))
+       if (prop < irg_inline_forced && benefice < inline_threshold) {
                return;
+       }
 
        pqueue_put(pqueue, call, benefice);
 }
 
+/**
+ * Try to inline calls into a graph.
+ *
+ * @param irg      the graph into which we inline
+ * @param maxsize  do NOT inline if the size of irg gets
+ *                 bigger than this amount
+ * @param inline_threshold
+ *                 threshold value for inline decision
+ * @param copied_graphs
+ *                 map containing copied of recursive graphs
+ */
 static void inline_into(ir_graph *irg, unsigned maxsize,
-               int inline_threshold, pmap *copied_graphs)
+                        int inline_threshold, pmap *copied_graphs)
 {
-       int             phiproj_computed = 0;
+       int            phiproj_computed = 0;
        inline_irg_env *env = get_irg_link(irg);
        call_entry     *curr_call;
-       wenv_t          wenv;
+       wenv_t         wenv;
+       pqueue_t       *pqueue;
+
+       if (env->n_call_nodes == 0)
+               return;
 
        if (env->n_nodes > maxsize) {
                DB((dbg, LEVEL_2, "%+F: too big (%d)\n", irg, env->n_nodes));
@@ -2069,34 +2171,29 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
        }
 
        current_ir_graph = irg;
+       ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
 
        /* put irgs into the pqueue */
-       pqueue_t *pqueue = new_pqueue();
+       pqueue = new_pqueue();
 
-       for (curr_call = env->call_head; curr_call != NULL;
-                       curr_call = curr_call->next) {
-
-               if (is_Tuple(curr_call->call))
-                       continue;
+       list_for_each_entry(call_entry, curr_call, &env->calls, list) {
                assert(is_Call(curr_call->call));
-
                maybe_push_call(pqueue, curr_call, inline_threshold);
        }
 
        /* note that the list of possible calls is updated during the process */
        while (!pqueue_empty(pqueue)) {
-               int                  did_inline;
-               pmap_entry          *e;
-               call_entry          *curr_call = pqueue_pop_front(pqueue);
-               ir_graph            *callee    = curr_call->callee;
-               ir_node             *call_node = curr_call->call;
-               irg_inline_property  prop      = get_irg_inline_property(callee);
-               const call_entry    *centry;
+               int                 did_inline;
+               call_entry          *curr_call  = pqueue_pop_front(pqueue);
+               ir_graph            *callee     = curr_call->callee;
+               ir_node             *call_node  = curr_call->call;
                inline_irg_env      *callee_env = get_irg_link(callee);
-               int                  depth      = curr_call->loop_depth;
+               irg_inline_property prop        = get_irg_inline_property(callee);
+               int                 loop_depth;
+               const call_entry    *centry;
+               pmap_entry          *e;
 
-               if (! (prop & irg_inline_forced)
-                               && env->n_nodes + callee_env->n_nodes > maxsize) {
+               if ((prop < irg_inline_forced) && env->n_nodes + callee_env->n_nodes > maxsize) {
                        DB((dbg, LEVEL_2, "%+F: too big (%d) + %+F (%d)\n", irg,
                                                env->n_nodes, callee, callee_env->n_nodes));
                        continue;
@@ -2104,11 +2201,21 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
 
                e = pmap_find(copied_graphs, callee);
                if (e != NULL) {
+                       int benefice = curr_call->benefice;
+                       /*
+                        * Reduce the weight for recursive function IFF not all arguments are const.
+                        * inlining recursive functions is rarely good.
+                        */
+                       if (!curr_call->all_const)
+                               benefice -= 2000;
+                       if (benefice < inline_threshold)
+                               continue;
+
                        /*
-                       * Remap callee if we have a copy.
-                       * FIXME: Should we do this only for recursive Calls ?
-                       */
-                       callee = e->value;
+                        * Remap callee if we have a copy.
+                        */
+                       callee     = e->value;
+                       callee_env = get_irg_link(callee);
                }
 
                if (current_ir_graph == callee) {
@@ -2117,8 +2224,19 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                         * walk the graph and change it. So we have to make a copy of
                         * the graph first.
                         */
-                       inline_irg_env *callee_env;
-                       ir_graph       *copy;
+                       int benefice = curr_call->benefice;
+                       ir_graph *copy;
+
+                       /*
+                        * Reduce the weight for recursive function IFF not all arguments are const.
+                        * inlining recursive functions is rarely good.
+                        */
+                       if (!curr_call->all_const)
+                               benefice -= 2000;
+                       if (benefice < inline_threshold)
+                               continue;
+
+                       ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
 
                        /*
                         * No copy yet, create one.
@@ -2130,7 +2248,9 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                        /* create_irg_copy() destroys the Proj links, recompute them */
                        phiproj_computed = 0;
 
-                       /* allocate new environment */
+                       ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
+
+                       /* allocate a new environment */
                        callee_env = alloc_inline_irg_env();
                        set_irg_link(copy, callee_env);
 
@@ -2161,37 +2281,35 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                if (!did_inline)
                        continue;
 
-               /* was inlined, must be recomputed */
+               /* call was inlined, Phi/Projs for current graph must be recomputed */
                phiproj_computed = 0;
 
-               /* after we have inlined callee, all called methods inside
-                * callee are now called once more */
-               for (centry = callee_env->call_head; centry != NULL;
-                               centry = centry->next) {
-                       inline_irg_env *penv = get_irg_link(centry->callee);
-                       ++penv->n_callers;
-               }
+               /* remove it from the caller list */
+               list_del(&curr_call->list);
 
                /* callee was inline. Append it's call list. */
                env->got_inline = 1;
-               if (curr_call->local_adr)
-                       env->local_vars = 1;
                --env->n_call_nodes;
 
                /* we just generate a bunch of new calls */
-               for (centry = callee_env->call_head; centry != NULL;
-                               centry = centry->next) {
-                       /* Note that the src list points to Call nodes in the inlined graph,
-                        * but we need Call nodes in our graph. Luckily the inliner leaves
-                        * this information in the link field. */
+               loop_depth = curr_call->loop_depth;
+               list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
+                       inline_irg_env *penv = get_irg_link(centry->callee);
+                       ir_node        *new_call;
+                       call_entry     *new_entry;
 
-                       ir_node    *new_call = get_irn_link(centry->call);
-                       call_entry *new_entry;
+                       /* after we have inlined callee, all called methods inside
+                        * callee are now called once more */
+                       ++penv->n_callers;
 
-                       if (!is_Call(new_call))
-                               continue;
+                       /* Note that the src list points to Call nodes in the inlined graph,
+                        * but we need Call nodes in our graph. Luckily the inliner leaves
+                        * this information in the link field. */
+                       new_call = get_irn_link(centry->call);
+                       assert(is_Call(new_call));
 
-                       new_entry = duplicate_call_entry(centry, depth, new_call);
+                       new_entry = duplicate_call_entry(centry, new_call, loop_depth);
+                       list_add_tail(&new_entry->list, &env->calls);
                        maybe_push_call(pqueue, new_entry, inline_threshold);
                }
 
@@ -2199,15 +2317,17 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                env->n_nodes += callee_env->n_nodes;
                --callee_env->n_callers;
        }
-
+       ir_free_resources(irg, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
        del_pqueue(pqueue);
 }
 
-/**
+/*
  * Heuristic inliner. Calculates a benefice value for every call and inlines
  * those calls with a value higher than the threshold.
  */
-void inline_functions(unsigned maxsize, int inline_threshold) {
+void inline_functions(unsigned maxsize, int inline_threshold,
+                      opt_ptr after_inline_opt)
+{
        inline_irg_env   *env;
        int              i, n_irgs;
        ir_graph         *rem;
@@ -2229,7 +2349,7 @@ void inline_functions(unsigned maxsize, int inline_threshold) {
        for (i = 0; i < n_irgs; ++i)
                set_irg_link(irgs[i], alloc_inline_irg_env());
 
-       /* Precompute information in temporary data structure. */
+       /* Pre-compute information in temporary data structure. */
        wenv.ignore_runtime = 0;
        wenv.ignore_callers = 0;
        for (i = 0; i < n_irgs; ++i) {
@@ -2237,7 +2357,7 @@ void inline_functions(unsigned maxsize, int inline_threshold) {
 
                free_callee_info(irg);
 
-               wenv.x         = get_irg_link(irg);
+               wenv.x = get_irg_link(irg);
                assure_cf_loop(irg);
                irg_walk_graph(irg, NULL, collect_calls2, &wenv);
        }
@@ -2253,25 +2373,9 @@ void inline_functions(unsigned maxsize, int inline_threshold) {
                ir_graph *irg = irgs[i];
 
                env = get_irg_link(irg);
-               if (env->got_inline) {
+               if (env->got_inline && after_inline_opt != NULL) {
                        /* this irg got calls inlined: optimize it */
-
-                       if (0) {
-                               /* scalar replacement does not work well with Tuple nodes, so optimize them away */
-                               optimize_graph_df(irg);
-
-                               if (env->local_vars) {
-                                       if (scalar_replacement_opt(irg)) {
-                                               optimize_graph_df(irg);
-                                       }
-                               }
-                               optimize_cf(irg);
-                       } else {
-                               if (env->local_vars) {
-                                       scalar_replacement_opt(irg);
-                               }
-                               combo(irg);
-                       }
+                       after_inline_opt(irg);
                }
                if (env->got_inline || (env->n_callers_orig != env->n_callers)) {
                        DB((dbg, LEVEL_1, "Nodes:%3d ->%3d, calls:%3d ->%3d, callers:%3d ->%3d, -- %s\n",
@@ -2297,6 +2401,41 @@ void inline_functions(unsigned maxsize, int inline_threshold) {
        current_ir_graph = rem;
 }
 
+struct inline_functions_pass_t {
+       ir_prog_pass_t pass;
+       unsigned       maxsize;
+       int            inline_threshold;
+       opt_ptr        after_inline_opt;
+};
+
+/**
+ * Wrapper to run inline_functions() as a ir_prog pass.
+ */
+static int inline_functions_wrapper(ir_prog *irp, void *context) {
+       struct inline_functions_pass_t *pass = context;
+
+       (void)irp;
+       inline_functions(pass->maxsize, pass->inline_threshold,
+                        pass->after_inline_opt);
+       return 0;
+}
+
+/* create a ir_prog pass for inline_functions */
+ir_prog_pass_t *inline_functions_pass(
+         const char *name, unsigned maxsize, int inline_threshold,
+         opt_ptr after_inline_opt) {
+       struct inline_functions_pass_t *pass =
+               XMALLOCZ(struct inline_functions_pass_t);
+
+       pass->maxsize          = maxsize;
+       pass->inline_threshold = inline_threshold;
+       pass->after_inline_opt = after_inline_opt;
+
+       return def_prog_pass_constructor(
+               &pass->pass, name ? name : "inline_functions",
+               inline_functions_wrapper);
+}
+
 void firm_init_inline(void) {
        FIRM_DBG_REGISTER(dbg, "firm.opt.inline");
 }