fix bug introduced in 'cleanup' commit
[libfirm] / ir / opt / opt_inline.c
index 7b868aa..1eb9122 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
+ * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
  *
  * This file is part of libFirm.
  *
@@ -21,7 +21,6 @@
  * @file
  * @brief    Dead node elimination and Procedure Inlining.
  * @author   Michael Beck, Goetz Lindenmaier
- * @version  $Id$
  */
 #include "config.h"
 
@@ -63,7 +62,7 @@
 #include "irtools.h"
 #include "iropt_dbg.h"
 #include "irpass_t.h"
-#include "irphase_t.h"
+#include "irnodemap.h"
 
 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
 
@@ -108,7 +107,6 @@ static void copy_node_inline(ir_node *node, void *env)
        ir_node  *new_node = irn_copy_into_irg(node, new_irg);
 
        set_new_node(node, new_node);
-
        if (is_Sel(node)) {
                ir_graph  *old_irg        = get_irn_irg(node);
                ir_type   *old_frame_type = get_irg_frame_type(old_irg);
@@ -116,7 +114,7 @@ static void copy_node_inline(ir_node *node, void *env)
                assert(is_Sel(new_node));
                /* use copied entities from the new frame */
                if (get_entity_owner(old_entity) == old_frame_type) {
-                       ir_entity *new_entity = get_entity_link(old_entity);
+                       ir_entity *new_entity = (ir_entity*)get_entity_link(old_entity);
                        assert(new_entity != NULL);
                        set_Sel_entity(new_node, new_entity);
                }
@@ -145,9 +143,15 @@ static void set_preds_inline(ir_node *node, void *env)
  */
 static void find_addr(ir_node *node, void *env)
 {
-       bool *allow_inline = env;
+       bool *allow_inline = (bool*)env;
 
-       if (is_Sel(node)) {
+       if (is_Block(node) && get_Block_entity(node)) {
+               /**
+                * Currently we can't handle blocks whose address was taken correctly
+                * when inlining
+                */
+               *allow_inline = false;
+       } else if (is_Sel(node)) {
                ir_graph *irg = current_ir_graph;
                if (get_Sel_ptr(node) == get_irg_frame(irg)) {
                        /* access to frame */
@@ -156,6 +160,9 @@ static void find_addr(ir_node *node, void *env)
                                /* access to value_type */
                                *allow_inline = false;
                        }
+                       if (is_parameter_entity(ent)) {
+                               *allow_inline = false;
+                       }
                }
        } else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
                /* From GCC:
@@ -191,11 +198,11 @@ static bool can_inline(ir_node *call, ir_graph *called_graph)
        ir_entity          *called      = get_irg_entity(called_graph);
        ir_type            *called_type = get_entity_type(called);
        ir_type            *call_type   = get_Call_type(call);
-       int                 n_params    = get_method_n_params(called_type);
-       int                 n_arguments = get_method_n_params(call_type);
-       int                 n_res       = get_method_n_ress(called_type);
+       size_t              n_params    = get_method_n_params(called_type);
+       size_t              n_arguments = get_method_n_params(call_type);
+       size_t              n_res       = get_method_n_ress(called_type);
        irg_inline_property prop        = get_irg_inline_property(called_graph);
-       int                 i;
+       size_t              i;
        bool                res;
 
        if (prop == irg_inline_forbidden)
@@ -215,7 +222,7 @@ static bool can_inline(ir_node *call, ir_graph *called_graph)
         * It is implementation dependent what happens in that case.
         * We support inlining, if the bitsize of the types matches AND
         * the same arithmetic is used. */
-       for (i = n_params - 1; i >= 0; --i) {
+       for (i = 0; i < n_params; ++i) {
                ir_type *param_tp = get_method_param_type(called_type, i);
                ir_type *arg_tp   = get_method_param_type(call_type, i);
 
@@ -232,7 +239,7 @@ static bool can_inline(ir_node *call, ir_graph *called_graph)
                        /* otherwise we can simply "reinterpret" the bits */
                }
        }
-       for (i = n_res - 1; i >= 0; --i) {
+       for (i = 0; i < n_res; ++i) {
                ir_type *decl_res_tp = get_method_res_type(called_type, i);
                ir_type *used_res_tp = get_method_res_type(call_type, i);
 
@@ -284,14 +291,15 @@ static void copy_frame_entities(ir_graph *from, ir_graph *to)
 {
        ir_type *from_frame = get_irg_frame_type(from);
        ir_type *to_frame   = get_irg_frame_type(to);
-       int      n_members  = get_class_n_members(from_frame);
-       int      i;
+       size_t   n_members  = get_class_n_members(from_frame);
+       size_t   i;
        assert(from_frame != to_frame);
 
        for (i = 0; i < n_members; ++i) {
                ir_entity *old_ent = get_class_member(from_frame, i);
                ir_entity *new_ent = copy_entity_own(old_ent, to_frame);
                set_entity_link(old_ent, new_ent);
+               assert (!is_parameter_entity(old_ent));
        }
 }
 
@@ -300,7 +308,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
 {
        ir_node       *pre_call;
        ir_node       *post_call, *post_bl;
-       ir_node       *in[pn_Start_max];
+       ir_node       *in[pn_Start_max+1];
        ir_node       *end, *end_bl, *block;
        ir_node       **res_pred;
        ir_node       **cf_pred;
@@ -344,12 +352,10 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        assert(get_irg_phase_state(irg) != phase_building);
        assert(get_irg_pinned(irg) == op_pin_state_pinned);
        assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
-       set_irg_outs_inconsistent(irg);
-       set_irg_extblk_inconsistent(irg);
-       set_irg_doms_inconsistent(irg);
-       set_irg_loopinfo_inconsistent(irg);
+       clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
+                          | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
        set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
-       set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
+       clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
        edges_deactivate(irg);
 
        /* here we know we WILL inline, so inform the statistics */
@@ -363,7 +369,8 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        {
                ir_node *Xproj = NULL;
                ir_node *proj;
-               for (proj = get_irn_link(call); proj; proj = get_irn_link(proj)) {
+               for (proj = (ir_node*)get_irn_link(call); proj != NULL;
+                    proj = (ir_node*)get_irn_link(proj)) {
                        long proj_nr = get_Proj_proj(proj);
                        if (proj_nr == pn_Call_X_except) Xproj = proj;
                }
@@ -393,9 +400,8 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        in[pn_Start_M]              = get_Call_mem(call);
        in[pn_Start_X_initial_exec] = new_r_Jmp(post_bl);
        in[pn_Start_P_frame_base]   = get_irg_frame(irg);
-       in[pn_Start_P_tls]          = get_irg_tls(irg);
        in[pn_Start_T_args]         = new_r_Tuple(post_bl, n_params, args_in);
-       pre_call = new_r_Tuple(post_bl, pn_Start_max, in);
+       pre_call = new_r_Tuple(post_bl, pn_Start_max+1, in);
        post_call = call;
 
        /* --
@@ -415,7 +421,6 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        {
                ir_node *start_block;
                ir_node *start;
-               ir_node *bad;
                ir_node *nomem;
 
                start_block = get_irg_start_block(called_graph);
@@ -426,10 +431,6 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                set_new_node(start, pre_call);
                mark_irn_visited(start);
 
-               bad = get_irg_bad(called_graph);
-               set_new_node(bad, get_irg_bad(irg));
-               mark_irn_visited(bad);
-
                nomem = get_irg_no_mem(called_graph);
                set_new_node(nomem, get_irg_no_mem(irg));
                mark_irn_visited(nomem);
@@ -437,7 +438,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
 
        /* entitiy link is used to link entities on old stackframe to the
         * new stackframe */
-       irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
+       irp_reserve_resources(irp, IRP_RESOURCE_ENTITY_LINK);
 
        /* copy entities and nodes */
        assert(!irn_visited(get_irg_end(called_graph)));
@@ -445,7 +446,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        irg_walk_core(get_irg_end(called_graph), copy_node_inline, set_preds_inline,
                      irg);
 
-       irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
+       irp_free_resources(irp, IRP_RESOURCE_ENTITY_LINK);
 
        /* -- Merge the end of the inlined procedure with the call site -- */
        /* We will turn the old Call node into a Tuple with the following
@@ -493,7 +494,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
 
        /* build a Tuple for all results of the method.
         * add Phi node if there was more than one Return. */
-       turn_into_tuple(post_call, pn_Call_max);
+       turn_into_tuple(post_call, pn_Call_max+1);
        /* First the Memory-Phi */
        n_mem_phi = 0;
        for (i = 0; i < arity; i++) {
@@ -538,10 +539,9 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                                }
                        }
                        if (n_ret > 0) {
-                               ir_mode *mode = get_irn_mode(cf_pred[0]);
-                               phi = new_r_Phi(post_bl, n_ret, cf_pred, mode);
+                               phi = new_r_Phi(post_bl, n_ret, cf_pred, res_mode);
                        } else {
-                               phi = new_r_Bad(irg);
+                               phi = new_r_Bad(irg, res_mode);
                        }
                        res_pred[j] = phi;
                        /* Conserve Phi-list for further inlinings -- but might be optimized */
@@ -553,14 +553,11 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                result_tuple = new_r_Tuple(post_bl, n_res, res_pred);
                set_Tuple_pred(call, pn_Call_T_result, result_tuple);
        } else {
-               set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
+               set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
        }
        /* handle the regular call */
        set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(post_bl));
 
-       /* For now, we cannot inline calls with value_base */
-       set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg));
-
        /* Finally the exception control flow.
           We have two possible situations:
           First if the Call branches to an exception handler:
@@ -592,7 +589,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                                set_Tuple_pred(call, pn_Call_X_except, new_r_Jmp(block));
                        }
                } else {
-                       set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+                       set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
                }
        } else {
                ir_node *main_end_bl;
@@ -619,7 +616,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                for (i = 0; i < n_exc; ++i)
                        end_preds[main_end_bl_arity + i] = cf_pred[i];
                set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
-               set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+               set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
                free(end_preds);
        }
        free(res_pred);
@@ -668,8 +665,8 @@ static ir_graph *get_call_called_irg(ir_node *call)
        ir_node *addr;
 
        addr = get_Call_ptr(call);
-       if (is_Global(addr)) {
-               ir_entity *ent = get_Global_entity(addr);
+       if (is_SymConst_addr_ent(addr)) {
+               ir_entity *ent = get_SymConst_entity(addr);
                /* we don't know which function gets finally bound to a weak symbol */
                if (get_entity_linkage(ent) & IR_LINKAGE_WEAK)
                        return NULL;
@@ -691,7 +688,7 @@ static void collect_calls(ir_node *call, void *env)
 
                if (called_irg != NULL) {
                        /* The Call node calls a locally defined method.  Remember to inline. */
-                       inline_env_t *ienv  = env;
+                       inline_env_t *ienv  = (inline_env_t*)env;
                        call_entry   *entry = OALLOC(&ienv->obst, call_entry);
                        entry->call       = call;
                        entry->callee     = called_irg;
@@ -717,7 +714,6 @@ void inline_small_irgs(ir_graph *irg, int size)
 {
        ir_graph *rem = current_ir_graph;
        inline_env_t env;
-       call_entry *entry;
 
        current_ir_graph = irg;
        /* Handle graph state */
@@ -757,17 +753,17 @@ void inline_small_irgs(ir_graph *irg, int size)
        current_ir_graph = rem;
 }
 
-struct inline_small_irgs_pass_t {
+typedef struct inline_small_irgs_pass_t {
        ir_graph_pass_t pass;
        int            size;
-};
+} inline_small_irgs_pass_t;
 
 /**
  * Wrapper to run inline_small_irgs() as a pass.
  */
 static int inline_small_irgs_wrapper(ir_graph *irg, void *context)
 {
-       struct inline_small_irgs_pass_t *pass = context;
+       inline_small_irgs_pass_t *pass = (inline_small_irgs_pass_t*)context;
 
        inline_small_irgs(irg, pass->size);
        return 0;
@@ -776,8 +772,7 @@ static int inline_small_irgs_wrapper(ir_graph *irg, void *context)
 /* create a pass for inline_small_irgs() */
 ir_graph_pass_t *inline_small_irgs_pass(const char *name, int size)
 {
-       struct inline_small_irgs_pass_t *pass =
-               XMALLOCZ(struct inline_small_irgs_pass_t);
+       inline_small_irgs_pass_t *pass = XMALLOCZ(inline_small_irgs_pass_t);
 
        pass->size = size;
        return def_graph_pass_constructor(
@@ -833,9 +828,9 @@ typedef struct walker_env {
  */
 static void collect_calls2(ir_node *call, void *ctx)
 {
-       wenv_t         *env = ctx;
+       wenv_t         *env = (wenv_t*)ctx;
        inline_irg_env *x = env->x;
-       ir_opcode      code = get_irn_opcode(call);
+       unsigned        code = get_irn_opcode(call);
        ir_graph       *callee;
        call_entry     *entry;
 
@@ -855,8 +850,8 @@ static void collect_calls2(ir_node *call, void *ctx)
        if (env->ignore_runtime) {
                ir_node *symc = get_Call_ptr(call);
 
-               if (is_Global(symc)) {
-                       ir_entity *ent = get_Global_entity(symc);
+               if (is_SymConst_addr_ent(symc)) {
+                       ir_entity *ent = get_SymConst_entity(symc);
 
                        if (get_entity_additional_properties(ent) & mtp_property_runtime)
                                return;
@@ -870,7 +865,7 @@ static void collect_calls2(ir_node *call, void *ctx)
        callee = get_call_called_irg(call);
        if (callee != NULL) {
                if (! env->ignore_callers) {
-                       inline_irg_env *callee_env = get_irg_link(callee);
+                       inline_irg_env *callee_env = (inline_irg_env*)get_irg_link(callee);
                        /* count all static callers */
                        ++callee_env->n_callers;
                        ++callee_env->n_callers_orig;
@@ -893,11 +888,11 @@ static void collect_calls2(ir_node *call, void *ctx)
 
 /**
  * Returns TRUE if the number of callers is 0 in the irg's environment,
- * hence this irg is a leave.
+ * hence this irg is a leaf.
  */
-inline static int is_leave(ir_graph *irg)
+inline static int is_leaf(ir_graph *irg)
 {
-       inline_irg_env *env = get_irg_link(irg);
+       inline_irg_env *env = (inline_irg_env*)get_irg_link(irg);
        return env->n_call_nodes == 0;
 }
 
@@ -907,7 +902,7 @@ inline static int is_leave(ir_graph *irg)
  */
 inline static int is_smaller(ir_graph *callee, unsigned size)
 {
-       inline_irg_env *env = get_irg_link(callee);
+       inline_irg_env *env = (inline_irg_env*)get_irg_link(callee);
        return env->n_nodes < size;
 }
 
@@ -943,13 +938,13 @@ static call_entry *duplicate_call_entry(const call_entry *entry,
  */
 static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_depth)
 {
-       call_entry *entry, *nentry;
+       call_entry *nentry;
 
        /* Note that the src list points to Call nodes in the inlined graph, but
           we need Call nodes in our graph. Luckily the inliner leaves this information
           in the link field. */
        list_for_each_entry(call_entry, entry, &src->calls, list) {
-               nentry = duplicate_call_entry(entry, get_irn_link(entry->call), loop_depth);
+               nentry = duplicate_call_entry(entry, (ir_node*)get_irn_link(entry->call), loop_depth);
                list_add_tail(&nentry->list, &dst->calls);
        }
        dst->n_call_nodes += src->n_call_nodes;
@@ -957,24 +952,22 @@ static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_
 }
 
 /*
- * Inlines small leave methods at call sites where the called address comes
+ * Inlines small leaf methods at call sites where the called address comes
  * from a Const node that references the entity representing the called
  * method.
  * The size argument is a rough measure for the code size of the method:
  * Methods where the obstack containing the firm graph is smaller than
  * size are inlined.
  */
-void inline_leave_functions(unsigned maxsize, unsigned leavesize,
-                            unsigned size, int ignore_runtime)
+void inline_leaf_functions(unsigned maxsize, unsigned leafsize,
+                           unsigned size, int ignore_runtime)
 {
        inline_irg_env   *env;
        ir_graph         *irg;
-       int              i, n_irgs;
+       size_t           i, n_irgs;
        ir_graph         *rem;
        int              did_inline;
        wenv_t           wenv;
-       call_entry       *entry, *next;
-       const call_entry *centry;
        pmap             *copied_graphs;
        pmap_entry       *pm_entry;
 
@@ -998,14 +991,16 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                assert(get_irg_phase_state(irg) != phase_building);
                free_callee_info(irg);
 
-               assure_cf_loop(irg);
-               wenv.x = get_irg_link(irg);
+               assure_irg_properties(irg,
+                       IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
+               wenv.x = (inline_irg_env*)get_irg_link(irg);
                irg_walk_graph(irg, NULL, collect_calls2, &wenv);
+               confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
        }
 
        /* -- and now inline. -- */
 
-       /* Inline leaves recursively -- we might construct new leaves. */
+       /* Inline leafs recursively -- we might construct new leafs. */
        do {
                did_inline = 0;
 
@@ -1014,7 +1009,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                        int phiproj_computed = 0;
 
                        current_ir_graph = get_irp_irg(i);
-                       env              = get_irg_link(current_ir_graph);
+                       env              = (inline_irg_env*)get_irg_link(current_ir_graph);
 
                        ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
                        list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
@@ -1032,8 +1027,8 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                        continue;
                                }
 
-                               if (is_leave(callee) && (
-                                   is_smaller(callee, leavesize) || prop >= irg_inline_forced)) {
+                               if (is_leaf(callee) && (
+                                   is_smaller(callee, leafsize) || prop >= irg_inline_forced)) {
                                        if (!phiproj_computed) {
                                                phiproj_computed = 1;
                                                collect_phiprojs(current_ir_graph);
@@ -1041,7 +1036,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                        did_inline = inline_method(call, callee);
 
                                        if (did_inline) {
-                                               inline_irg_env *callee_env = get_irg_link(callee);
+                                               inline_irg_env *callee_env = (inline_irg_env*)get_irg_link(callee);
 
                                                /* call was inlined, Phi/Projs for current graph must be recomputed */
                                                phiproj_computed = 0;
@@ -1068,7 +1063,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                int phiproj_computed = 0;
 
                current_ir_graph = get_irp_irg(i);
-               env              = get_irg_link(current_ir_graph);
+               env              = (inline_irg_env*)get_irg_link(current_ir_graph);
 
                ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
 
@@ -1076,7 +1071,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
                        irg_inline_property prop;
                        ir_graph            *callee;
-                       pmap_entry          *e;
+                       ir_graph            *calleee;
 
                        call   = entry->call;
                        callee = entry->callee;
@@ -1086,13 +1081,13 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                continue;
                        }
 
-                       e = pmap_find(copied_graphs, callee);
-                       if (e != NULL) {
+                       calleee = pmap_get(ir_graph, copied_graphs, callee);
+                       if (calleee != NULL) {
                                /*
                                 * Remap callee if we have a copy.
                                 * FIXME: Should we do this only for recursive Calls ?
                                 */
-                               callee = e->value;
+                               callee = calleee;
                        }
 
                        if (prop >= irg_inline_forced ||
@@ -1111,7 +1106,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
 
                                        /*
                                         * No copy yet, create one.
-                                        * Note that recursive methods are never leaves, so it is sufficient
+                                        * Note that recursive methods are never leafs, so it is sufficient
                                         * to test this condition here.
                                         */
                                        copy = create_irg_copy(callee);
@@ -1125,7 +1120,8 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                        callee_env = alloc_inline_irg_env();
                                        set_irg_link(copy, callee_env);
 
-                                       assure_cf_loop(copy);
+                                       assure_irg_properties(copy,
+                                               IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
                                        wenv.x              = callee_env;
                                        wenv.ignore_callers = 1;
                                        irg_walk_graph(copy, NULL, collect_calls2, &wenv);
@@ -1155,7 +1151,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                        /* call was inlined, Phi/Projs for current graph must be recomputed */
                                        phiproj_computed = 0;
 
-                                       /* callee was inline. Append it's call list. */
+                                       /* callee was inline. Append its call list. */
                                        env->got_inline = 1;
                                        --env->n_call_nodes;
                                        append_call_list(env, callee_env, entry->loop_depth);
@@ -1164,7 +1160,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                        /* after we have inlined callee, all called methods inside callee
                                           are now called once more */
                                        list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
-                                               inline_irg_env *penv = get_irg_link(centry->callee);
+                                               inline_irg_env *penv = (inline_irg_env*)get_irg_link(centry->callee);
                                                ++penv->n_callers;
                                        }
 
@@ -1179,7 +1175,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
 
        for (i = 0; i < n_irgs; ++i) {
                irg = get_irp_irg(i);
-               env = get_irg_link(irg);
+               env = (inline_irg_env*)get_irg_link(irg);
 
                if (env->got_inline) {
                        optimize_graph_df(irg);
@@ -1195,7 +1191,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
 
        /* kill the copied graphs: we don't need them anymore */
        foreach_pmap(copied_graphs, pm_entry) {
-               ir_graph *copy = pm_entry->value;
+               ir_graph *copy = (ir_graph*)pm_entry->value;
 
                /* reset the entity, otherwise it will be deleted in the next step ... */
                set_irg_entity(copy, NULL);
@@ -1207,45 +1203,44 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
        current_ir_graph = rem;
 }
 
-struct inline_leave_functions_pass_t {
+typedef struct inline_leaf_functions_pass_t {
        ir_prog_pass_t pass;
        unsigned       maxsize;
-       unsigned       leavesize;
+       unsigned       leafsize;
        unsigned       size;
        int            ignore_runtime;
-};
+} inline_leaf_functions_pass_t;
 
 /**
- * Wrapper to run inline_leave_functions() as a ir_prog pass.
+ * Wrapper to run inline_leaf_functions() as a ir_prog pass.
  */
-static int inline_leave_functions_wrapper(ir_prog *irp, void *context)
+static int inline_leaf_functions_wrapper(ir_prog *irp, void *context)
 {
-       struct inline_leave_functions_pass_t *pass = context;
+       inline_leaf_functions_pass_t *pass = (inline_leaf_functions_pass_t*)context;
 
        (void)irp;
-       inline_leave_functions(
-               pass->maxsize, pass->leavesize,
+       inline_leaf_functions(
+               pass->maxsize, pass->leafsize,
                pass->size, pass->ignore_runtime);
        return 0;
 }
 
-/* create a pass for inline_leave_functions() */
-ir_prog_pass_t *inline_leave_functions_pass(
-       const char *name, unsigned maxsize, unsigned leavesize,
+/* create a pass for inline_leaf_functions() */
+ir_prog_pass_t *inline_leaf_functions_pass(
+       const char *name, unsigned maxsize, unsigned leafsize,
        unsigned size, int ignore_runtime)
 {
-       struct inline_leave_functions_pass_t *pass =
-               XMALLOCZ(struct inline_leave_functions_pass_t);
+       inline_leaf_functions_pass_t *pass = XMALLOCZ(inline_leaf_functions_pass_t);
 
        pass->maxsize        = maxsize;
-       pass->leavesize      = leavesize;
+       pass->leafsize       = leafsize;
        pass->size           = size;
        pass->ignore_runtime = ignore_runtime;
 
        return def_prog_pass_constructor(
                &pass->pass,
-               name ? name : "inline_leave_functions",
-               inline_leave_functions_wrapper);
+               name ? name : "inline_leaf_functions",
+               inline_leaf_functions_wrapper);
 }
 
 /**
@@ -1320,7 +1315,9 @@ static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg)
 {
        ir_entity *ent = get_irg_entity(irg);
        ir_type  *mtp;
-       int      nparams, i, proj_nr;
+       size_t   nparams;
+       int      i;
+       long     proj_nr;
        ir_node  *irg_args, *arg;
 
        mtp      = get_entity_type(ent);
@@ -1347,17 +1344,12 @@ static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg)
  * After inlining, the local variable might be transformed into a
  * SSA variable by scalar_replacement().
  */
-static unsigned get_method_local_adress_weight(ir_graph *callee, int pos)
+static unsigned get_method_local_adress_weight(ir_graph *callee, size_t pos)
 {
-       inline_irg_env *env = get_irg_link(callee);
+       inline_irg_env *env = (inline_irg_env*)get_irg_link(callee);
 
-       if (env->local_weights != NULL) {
-               if (pos < ARR_LEN(env->local_weights))
-                       return env->local_weights[pos];
-               return 0;
-       }
-
-       analyze_irg_local_weights(env, callee);
+       if (env->local_weights == NULL)
+               analyze_irg_local_weights(env, callee);
 
        if (pos < ARR_LEN(env->local_weights))
                return env->local_weights[pos];
@@ -1374,10 +1366,12 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
 {
        ir_node   *call = entry->call;
        ir_entity *ent  = get_irg_entity(callee);
+       ir_type   *callee_frame;
+       size_t    i, n_members, n_params;
        ir_node   *frame_ptr;
        ir_type   *mtp;
        int       weight = 0;
-       int       i, n_params, all_const;
+       int       all_const;
        unsigned  cc, v;
        irg_inline_property prop;
 
@@ -1390,6 +1384,18 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
                return entry->benefice = INT_MIN;
        }
 
+       callee_frame = get_irg_frame_type(callee);
+       n_members = get_class_n_members(callee_frame);
+       for (i = 0; i < n_members; ++i) {
+               ir_entity *frame_ent = get_class_member(callee_frame, i);
+               if (is_parameter_entity(frame_ent)) {
+                       // TODO inliner should handle parameter entities by inserting Store operations
+                       DB((dbg, LEVEL_2, "In %+F Call to %+F: inlining forbidden due to parameter entity\n", call, callee));
+                       set_irg_inline_property(callee, irg_inline_forbidden);
+                       return entry->benefice = INT_MIN;
+               }
+       }
+
        if (get_irg_additional_properties(callee) & mtp_property_noreturn) {
                DB((dbg, LEVEL_2, "In %+F Call to %+F: not inlining noreturn or weak\n",
                    call, callee));
@@ -1402,7 +1408,7 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
        cc       = get_method_calling_convention(mtp);
        if (cc & cc_reg_param) {
                /* register parameter, smaller costs for register parameters */
-               int max_regs = cc & ~cc_bits;
+               size_t max_regs = cc & ~cc_bits;
 
                if (max_regs < n_params)
                        weight += max_regs * 2 + (n_params - max_regs) * 5;
@@ -1440,7 +1446,7 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
        }
        entry->all_const = all_const;
 
-       callee_env = get_irg_link(callee);
+       callee_env = (inline_irg_env*)get_irg_link(callee);
        if (callee_env->n_callers == 1 &&
            callee != current_ir_graph &&
            !entity_is_externally_visible(ent)) {
@@ -1455,7 +1461,7 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
        if (callee_env->n_nodes < 30 && !callee_env->recursive)
                weight += 2000;
 
-       /* and finally for leaves: they do not increase the register pressure
+       /* and finally for leafs: they do not increase the register pressure
           because of callee safe registers */
        if (callee_env->n_call_nodes == 0)
                weight += 400;
@@ -1475,16 +1481,18 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
        return entry->benefice = weight;
 }
 
-static ir_graph **irgs;
-static int      last_irg;
+typedef struct walk_env_t {
+       ir_graph **irgs;
+       size_t   last_irg;
+} walk_env_t;
 
 /**
  * Callgraph walker, collect all visited graphs.
  */
 static void callgraph_walker(ir_graph *irg, void *data)
 {
-       (void) data;
-       irgs[last_irg++] = irg;
+       walk_env_t *env = (walk_env_t *)data;
+       env->irgs[env->last_irg++] = irg;
 }
 
 /**
@@ -1494,22 +1502,24 @@ static void callgraph_walker(ir_graph *irg, void *data)
  */
 static ir_graph **create_irg_list(void)
 {
-       ir_entity **free_methods;
-       int       arr_len;
-       int       n_irgs = get_irp_n_irgs();
+       ir_entity  **free_methods;
+       size_t     n_irgs = get_irp_n_irgs();
+       walk_env_t env;
 
-       cgana(&arr_len, &free_methods);
+       cgana(&free_methods);
        xfree(free_methods);
 
        compute_callgraph();
 
-       last_irg = 0;
-       irgs     = XMALLOCNZ(ir_graph*, n_irgs);
+       env.irgs     = XMALLOCNZ(ir_graph*, n_irgs);
+       env.last_irg = 0;
+
+       callgraph_walk(NULL, callgraph_walker, &env);
+       assert(n_irgs == env.last_irg);
 
-       callgraph_walk(NULL, callgraph_walker, NULL);
-       assert(n_irgs == last_irg);
+       free_callgraph();
 
-       return irgs;
+       return env.irgs;
 }
 
 /**
@@ -1552,8 +1562,7 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                         int inline_threshold, pmap *copied_graphs)
 {
        int            phiproj_computed = 0;
-       inline_irg_env *env = get_irg_link(irg);
-       call_entry     *curr_call;
+       inline_irg_env *env = (inline_irg_env*)get_irg_link(irg);
        wenv_t         wenv;
        pqueue_t       *pqueue;
 
@@ -1579,14 +1588,13 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
        /* note that the list of possible calls is updated during the process */
        while (!pqueue_empty(pqueue)) {
                int                 did_inline;
-               call_entry          *curr_call  = pqueue_pop_front(pqueue);
+               call_entry          *curr_call  = (call_entry*)pqueue_pop_front(pqueue);
                ir_graph            *callee     = curr_call->callee;
                ir_node             *call_node  = curr_call->call;
-               inline_irg_env      *callee_env = get_irg_link(callee);
+               inline_irg_env      *callee_env = (inline_irg_env*)get_irg_link(callee);
                irg_inline_property prop        = get_irg_inline_property(callee);
+               ir_graph            *calleee;
                int                 loop_depth;
-               const call_entry    *centry;
-               pmap_entry          *e;
 
                if ((prop < irg_inline_forced) && env->n_nodes + callee_env->n_nodes > maxsize) {
                        DB((dbg, LEVEL_2, "%+F: too big (%d) + %+F (%d)\n", irg,
@@ -1594,8 +1602,8 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                        continue;
                }
 
-               e = pmap_find(copied_graphs, callee);
-               if (e != NULL) {
+               calleee = pmap_get(ir_graph, copied_graphs, callee);
+               if (calleee != NULL) {
                        int benefice = curr_call->benefice;
                        /*
                         * Reduce the weight for recursive function IFF not all arguments are const.
@@ -1609,8 +1617,8 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                        /*
                         * Remap callee if we have a copy.
                         */
-                       callee     = e->value;
-                       callee_env = get_irg_link(callee);
+                       callee     = calleee;
+                       callee_env = (inline_irg_env*)get_irg_link(callee);
                }
 
                if (current_ir_graph == callee) {
@@ -1635,7 +1643,7 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
 
                        /*
                         * No copy yet, create one.
-                        * Note that recursive methods are never leaves, so it is
+                        * Note that recursive methods are never leafs, so it is
                         * sufficient to test this condition here.
                         */
                        copy = create_irg_copy(callee);
@@ -1649,7 +1657,8 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                        callee_env = alloc_inline_irg_env();
                        set_irg_link(copy, callee_env);
 
-                       assure_cf_loop(copy);
+                       assure_irg_properties(copy, IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
+                       memset(&wenv, 0, sizeof(wenv));
                        wenv.x              = callee_env;
                        wenv.ignore_callers = 1;
                        irg_walk_graph(copy, NULL, collect_calls2, &wenv);
@@ -1682,14 +1691,14 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                /* remove it from the caller list */
                list_del(&curr_call->list);
 
-               /* callee was inline. Append it's call list. */
+               /* callee was inline. Append its call list. */
                env->got_inline = 1;
                --env->n_call_nodes;
 
                /* we just generate a bunch of new calls */
                loop_depth = curr_call->loop_depth;
                list_for_each_entry(call_entry, centry, &callee_env->calls, list) {
-                       inline_irg_env *penv = get_irg_link(centry->callee);
+                       inline_irg_env *penv = (inline_irg_env*)get_irg_link(centry->callee);
                        ir_node        *new_call;
                        call_entry     *new_entry;
 
@@ -1700,7 +1709,14 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                        /* Note that the src list points to Call nodes in the inlined graph,
                         * but we need Call nodes in our graph. Luckily the inliner leaves
                         * this information in the link field. */
-                       new_call = get_irn_link(centry->call);
+                       new_call = (ir_node*)get_irn_link(centry->call);
+                       if (get_irn_irg(new_call) != irg) {
+                               /* centry->call has not been copied, which means it is dead.
+                                * This might happen during inlining, if a const function,
+                                * which cannot be inlined is only used as an unused argument
+                                * of another function, which is inlined. */
+                               continue;
+                       }
                        assert(is_Call(new_call));
 
                        new_entry = duplicate_call_entry(centry, new_call, loop_depth);
@@ -1724,7 +1740,7 @@ void inline_functions(unsigned maxsize, int inline_threshold,
                       opt_ptr after_inline_opt)
 {
        inline_irg_env   *env;
-       int              i, n_irgs;
+       size_t           i, n_irgs;
        ir_graph         *rem;
        wenv_t           wenv;
        pmap             *copied_graphs;
@@ -1752,8 +1768,8 @@ void inline_functions(unsigned maxsize, int inline_threshold,
 
                free_callee_info(irg);
 
-               wenv.x = get_irg_link(irg);
-               assure_cf_loop(irg);
+               wenv.x = (inline_irg_env*)get_irg_link(irg);
+               assure_loopinfo(irg);
                irg_walk_graph(irg, NULL, collect_calls2, &wenv);
        }
 
@@ -1767,7 +1783,7 @@ void inline_functions(unsigned maxsize, int inline_threshold,
        for (i = 0; i < n_irgs; ++i) {
                ir_graph *irg = irgs[i];
 
-               env = get_irg_link(irg);
+               env = (inline_irg_env*)get_irg_link(irg);
                if (env->got_inline && after_inline_opt != NULL) {
                        /* this irg got calls inlined: optimize it */
                        after_inline_opt(irg);
@@ -1782,7 +1798,7 @@ void inline_functions(unsigned maxsize, int inline_threshold,
 
        /* kill the copied graphs: we don't need them anymore */
        foreach_pmap(copied_graphs, pm_entry) {
-               ir_graph *copy = pm_entry->value;
+               ir_graph *copy = (ir_graph*)pm_entry->value;
 
                /* reset the entity, otherwise it will be deleted in the next step ... */
                set_irg_entity(copy, NULL);
@@ -1796,19 +1812,19 @@ void inline_functions(unsigned maxsize, int inline_threshold,
        current_ir_graph = rem;
 }
 
-struct inline_functions_pass_t {
+typedef struct inline_functions_pass_t {
        ir_prog_pass_t pass;
        unsigned       maxsize;
        int            inline_threshold;
        opt_ptr        after_inline_opt;
-};
+} inline_functions_pass_t;
 
 /**
  * Wrapper to run inline_functions() as a ir_prog pass.
  */
 static int inline_functions_wrapper(ir_prog *irp, void *context)
 {
-       struct inline_functions_pass_t *pass = context;
+       inline_functions_pass_t *pass = (inline_functions_pass_t*)context;
 
        (void)irp;
        inline_functions(pass->maxsize, pass->inline_threshold,
@@ -1821,8 +1837,7 @@ ir_prog_pass_t *inline_functions_pass(
          const char *name, unsigned maxsize, int inline_threshold,
          opt_ptr after_inline_opt)
 {
-       struct inline_functions_pass_t *pass =
-               XMALLOCZ(struct inline_functions_pass_t);
+       inline_functions_pass_t *pass = XMALLOCZ(inline_functions_pass_t);
 
        pass->maxsize          = maxsize;
        pass->inline_threshold = inline_threshold;