ia32: Remove the ia32_x87_attr_t from ia32_asm_attr_t.
[libfirm] / ir / opt / opt_inline.c
index edadecd..7196e5a 100644 (file)
@@ -21,7 +21,6 @@
  * @file
  * @brief    Dead node elimination and Procedure Inlining.
  * @author   Michael Beck, Goetz Lindenmaier
- * @version  $Id$
  */
 #include "config.h"
 
@@ -63,7 +62,7 @@
 #include "irtools.h"
 #include "iropt_dbg.h"
 #include "irpass_t.h"
-#include "irphase_t.h"
+#include "irnodemap.h"
 
 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
 
@@ -146,7 +145,13 @@ static void find_addr(ir_node *node, void *env)
 {
        bool *allow_inline = (bool*)env;
 
-       if (is_Sel(node)) {
+       if (is_Block(node) && get_Block_entity(node)) {
+               /**
+                * Currently we can't handle blocks whose address was taken correctly
+                * when inlining
+                */
+               *allow_inline = false;
+       } else if (is_Sel(node)) {
                ir_graph *irg = current_ir_graph;
                if (get_Sel_ptr(node) == get_irg_frame(irg)) {
                        /* access to frame */
@@ -155,6 +160,9 @@ static void find_addr(ir_node *node, void *env)
                                /* access to value_type */
                                *allow_inline = false;
                        }
+                       if (is_parameter_entity(ent)) {
+                               *allow_inline = false;
+                       }
                }
        } else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
                /* From GCC:
@@ -193,11 +201,11 @@ static bool can_inline(ir_node *call, ir_graph *called_graph)
        size_t              n_params    = get_method_n_params(called_type);
        size_t              n_arguments = get_method_n_params(call_type);
        size_t              n_res       = get_method_n_ress(called_type);
-       irg_inline_property prop        = get_irg_inline_property(called_graph);
+       mtp_additional_properties props = get_entity_additional_properties(called);
        size_t              i;
        bool                res;
 
-       if (prop == irg_inline_forbidden)
+       if (props & mtp_property_noinline)
                return false;
 
        if (n_arguments != n_params) {
@@ -291,64 +299,44 @@ static void copy_frame_entities(ir_graph *from, ir_graph *to)
                ir_entity *old_ent = get_class_member(from_frame, i);
                ir_entity *new_ent = copy_entity_own(old_ent, to_frame);
                set_entity_link(old_ent, new_ent);
+               assert (!is_parameter_entity(old_ent));
        }
 }
 
 /* Inlines a method at the given call site. */
 int inline_method(ir_node *call, ir_graph *called_graph)
 {
-       ir_node       *pre_call;
-       ir_node       *post_call, *post_bl;
-       ir_node       *in[pn_Start_max];
-       ir_node       *end, *end_bl, *block;
-       ir_node       **res_pred;
-       ir_node       **cf_pred;
-       ir_node       **args_in;
-       ir_node       *ret, *phi;
-       int           arity, n_ret, n_exc, n_res, i, j, rem_opt;
-       int           irn_arity, n_params;
-       int           n_mem_phi;
-       enum exc_mode exc_handling;
-       ir_type       *mtp;
-       ir_type       *ctp;
-       ir_entity     *ent;
-       ir_graph      *rem;
-       ir_graph      *irg = get_irn_irg(call);
-
        /* we cannot inline some types of calls */
        if (! can_inline(call, called_graph))
                return 0;
 
        /* We cannot inline a recursive call. The graph must be copied before
         * the call the inline_method() using create_irg_copy(). */
+       ir_graph *irg = get_irn_irg(call);
        if (called_graph == irg)
                return 0;
 
-       ent      = get_irg_entity(called_graph);
-       mtp      = get_entity_type(ent);
-       ctp      = get_Call_type(call);
-       n_params = get_method_n_params(mtp);
-       n_res    = get_method_n_ress(mtp);
+       ir_entity *ent      = get_irg_entity(called_graph);
+       ir_type   *mtp      = get_entity_type(ent);
+       ir_type   *ctp      = get_Call_type(call);
+       int        n_params = get_method_n_params(mtp);
 
-       rem = current_ir_graph;
+       ir_graph *rem = current_ir_graph;
        current_ir_graph = irg;
 
        DB((dbg, LEVEL_1, "Inlining %+F(%+F) into %+F\n", call, called_graph, irg));
 
        /* optimizations can cause problems when allocating new nodes */
-       rem_opt = get_opt_optimize();
+       int rem_opt = get_opt_optimize();
        set_optimize(0);
 
        /* Handle graph state */
-       assert(get_irg_phase_state(irg) != phase_building);
        assert(get_irg_pinned(irg) == op_pin_state_pinned);
        assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
-       set_irg_outs_inconsistent(irg);
-       set_irg_extblk_inconsistent(irg);
-       set_irg_doms_inconsistent(irg);
-       set_irg_loopinfo_inconsistent(irg);
+       clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
+                          | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
        set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
-       set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
+       clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
        edges_deactivate(irg);
 
        /* here we know we WILL inline, so inform the statistics */
@@ -359,22 +347,19 @@ int inline_method(ir_node *call, ir_graph *called_graph)
           exc_handling:
           0 There is a handler.
           2 Exception handling not represented in Firm. -- */
-       {
-               ir_node *Xproj = NULL;
-               ir_node *proj;
-               for (proj = (ir_node*)get_irn_link(call); proj != NULL;
-                    proj = (ir_node*)get_irn_link(proj)) {
-                       long proj_nr = get_Proj_proj(proj);
-                       if (proj_nr == pn_Call_X_except) Xproj = proj;
-               }
-               exc_handling = Xproj != NULL ? exc_handler : exc_no_handler;
+       ir_node *Xproj = NULL;
+       for (ir_node *proj = (ir_node*)get_irn_link(call); proj != NULL;
+                proj = (ir_node*)get_irn_link(proj)) {
+               long proj_nr = get_Proj_proj(proj);
+               if (proj_nr == pn_Call_X_except) Xproj = proj;
        }
+       enum exc_mode exc_handling = Xproj != NULL ? exc_handler : exc_no_handler;
 
        /* create the argument tuple */
-       args_in = ALLOCAN(ir_node*, n_params);
+       ir_node **args_in = ALLOCAN(ir_node*, n_params);
 
-       block = get_nodes_block(call);
-       for (i = n_params - 1; i >= 0; --i) {
+       ir_node *block = get_nodes_block(call);
+       for (int i = n_params - 1; i >= 0; --i) {
                ir_node *arg      = get_Call_param(call, i);
                ir_type *param_tp = get_method_param_type(mtp, i);
                ir_mode *mode     = get_type_mode(param_tp);
@@ -388,14 +373,15 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        /* the procedure and later replaces the Start node of the called graph.
         * Post_call is the old Call node and collects the results of the called
         * graph. Both will end up being a tuple. */
-       post_bl = get_nodes_block(call);
+       ir_node *post_bl = get_nodes_block(call);
        /* XxMxPxPxPxT of Start + parameter of Call */
+       ir_node *in[pn_Start_max+1];
        in[pn_Start_M]              = get_Call_mem(call);
        in[pn_Start_X_initial_exec] = new_r_Jmp(post_bl);
        in[pn_Start_P_frame_base]   = get_irg_frame(irg);
        in[pn_Start_T_args]         = new_r_Tuple(post_bl, n_params, args_in);
-       pre_call = new_r_Tuple(post_bl, pn_Start_max, in);
-       post_call = call;
+       ir_node *pre_call = new_r_Tuple(post_bl, pn_Start_max+1, in);
+       ir_node *post_call = call;
 
        /* --
           The new block gets the ins of the old block, pre_call and all its
@@ -411,32 +397,21 @@ int inline_method(ir_node *call, ir_graph *called_graph)
         * node, similar for singleton nodes like NoMem and Bad.
         * Note: this will prohibit predecessors to be copied - only do it for
         *       nodes without predecessors */
-       {
-               ir_node *start_block;
-               ir_node *start;
-               ir_node *bad;
-               ir_node *nomem;
-
-               start_block = get_irg_start_block(called_graph);
-               set_new_node(start_block, get_nodes_block(pre_call));
-               mark_irn_visited(start_block);
-
-               start = get_irg_start(called_graph);
-               set_new_node(start, pre_call);
-               mark_irn_visited(start);
-
-               bad = get_irg_bad(called_graph);
-               set_new_node(bad, get_irg_bad(irg));
-               mark_irn_visited(bad);
-
-               nomem = get_irg_no_mem(called_graph);
-               set_new_node(nomem, get_irg_no_mem(irg));
-               mark_irn_visited(nomem);
-       }
+       ir_node *start_block = get_irg_start_block(called_graph);
+       set_new_node(start_block, get_nodes_block(pre_call));
+       mark_irn_visited(start_block);
+
+       ir_node *start = get_irg_start(called_graph);
+       set_new_node(start, pre_call);
+       mark_irn_visited(start);
+
+       ir_node *nomem = get_irg_no_mem(called_graph);
+       set_new_node(nomem, get_irg_no_mem(irg));
+       mark_irn_visited(nomem);
 
        /* entitiy link is used to link entities on old stackframe to the
         * new stackframe */
-       irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
+       irp_reserve_resources(irp, IRP_RESOURCE_ENTITY_LINK);
 
        /* copy entities and nodes */
        assert(!irn_visited(get_irg_end(called_graph)));
@@ -444,7 +419,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        irg_walk_core(get_irg_end(called_graph), copy_node_inline, set_preds_inline,
                      irg);
 
-       irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
+       irp_free_resources(irp, IRP_RESOURCE_ENTITY_LINK);
 
        /* -- Merge the end of the inlined procedure with the call site -- */
        /* We will turn the old Call node into a Tuple with the following
@@ -461,27 +436,26 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        */
 
        /* Precompute some values */
-       end_bl = get_new_node(get_irg_end_block(called_graph));
-       end    = get_new_node(get_irg_end(called_graph));
-       arity  = get_Block_n_cfgpreds(end_bl);    /* arity = n_exc + n_ret  */
-       n_res  = get_method_n_ress(get_Call_type(call));
+       ir_node *end_bl = get_new_node(get_irg_end_block(called_graph));
+       ir_node *end    = get_new_node(get_irg_end(called_graph));
+       int      arity  = get_Block_n_cfgpreds(end_bl); /* arity = n_exc + n_ret  */
+       int      n_res  = get_method_n_ress(get_Call_type(call));
 
-       res_pred = XMALLOCN(ir_node*, n_res);
-       cf_pred  = XMALLOCN(ir_node*, arity);
+       ir_node **res_pred = XMALLOCN(ir_node*, n_res);
+       ir_node **cf_pred  = XMALLOCN(ir_node*, arity);
 
        /* archive keepalives */
-       irn_arity = get_irn_arity(end);
-       for (i = 0; i < irn_arity; i++) {
+       int irn_arity = get_irn_arity(end);
+       for (int i = 0; i < irn_arity; i++) {
                ir_node *ka = get_End_keepalive(end, i);
                if (! is_Bad(ka))
                        add_End_keepalive(get_irg_end(irg), ka);
        }
 
        /* replace Return nodes by Jump nodes */
-       n_ret = 0;
-       for (i = 0; i < arity; i++) {
-               ir_node *ret;
-               ret = get_Block_cfgpred(end_bl, i);
+       int n_ret = 0;
+       for (int i = 0; i < arity; i++) {
+               ir_node *ret = get_Block_cfgpred(end_bl, i);
                if (is_Return(ret)) {
                        ir_node *block = get_nodes_block(ret);
                        cf_pred[n_ret] = new_r_Jmp(block);
@@ -492,11 +466,11 @@ int inline_method(ir_node *call, ir_graph *called_graph)
 
        /* build a Tuple for all results of the method.
         * add Phi node if there was more than one Return. */
-       turn_into_tuple(post_call, pn_Call_max);
+       turn_into_tuple(post_call, pn_Call_max+1);
        /* First the Memory-Phi */
-       n_mem_phi = 0;
-       for (i = 0; i < arity; i++) {
-               ret = get_Block_cfgpred(end_bl, i);
+       int n_mem_phi = 0;
+       for (int i = 0; i < arity; i++) {
+               ir_node *ret = get_Block_cfgpred(end_bl, i);
                if (is_Return(ret)) {
                        cf_pred[n_mem_phi++] = get_Return_mem(ret);
                }
@@ -510,7 +484,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                        cf_pred[n_mem_phi++] = new_r_Proj(ret, mode_M, 1);
                }
        }
-       phi = new_r_Phi(post_bl, n_mem_phi, cf_pred, mode_M);
+       ir_node *phi = new_r_Phi(post_bl, n_mem_phi, cf_pred, mode_M);
        set_Tuple_pred(call, pn_Call_M, phi);
        /* Conserve Phi-list for further inlinings -- but might be optimized */
        if (get_nodes_block(phi) == post_bl) {
@@ -519,13 +493,12 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        }
        /* Now the real results */
        if (n_res > 0) {
-               ir_node *result_tuple;
-               for (j = 0; j < n_res; j++) {
+               for (int j = 0; j < n_res; j++) {
                        ir_type *res_type = get_method_res_type(ctp, j);
                        ir_mode *res_mode = get_type_mode(res_type);
-                       n_ret = 0;
-                       for (i = 0; i < arity; i++) {
-                               ret = get_Block_cfgpred(end_bl, i);
+                       int n_ret = 0;
+                       for (int i = 0; i < arity; i++) {
+                               ir_node *ret = get_Block_cfgpred(end_bl, i);
                                if (is_Return(ret)) {
                                        ir_node *res = get_Return_res(ret, j);
                                        if (get_irn_mode(res) != res_mode) {
@@ -537,10 +510,9 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                                }
                        }
                        if (n_ret > 0) {
-                               ir_mode *mode = get_irn_mode(cf_pred[0]);
-                               phi = new_r_Phi(post_bl, n_ret, cf_pred, mode);
+                               phi = new_r_Phi(post_bl, n_ret, cf_pred, res_mode);
                        } else {
-                               phi = new_r_Bad(irg);
+                               phi = new_r_Bad(irg, res_mode);
                        }
                        res_pred[j] = phi;
                        /* Conserve Phi-list for further inlinings -- but might be optimized */
@@ -549,10 +521,10 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                                set_Block_phis(post_bl, phi);
                        }
                }
-               result_tuple = new_r_Tuple(post_bl, n_res, res_pred);
+               ir_node *result_tuple = new_r_Tuple(post_bl, n_res, res_pred);
                set_Tuple_pred(call, pn_Call_T_result, result_tuple);
        } else {
-               set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
+               set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
        }
        /* handle the regular call */
        set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(post_bl));
@@ -569,11 +541,10 @@ int inline_method(ir_node *call, ir_graph *called_graph)
           branches to the End node.
         */
        if (exc_handling == exc_handler) {
-               n_exc = 0;
-               for (i = 0; i < arity; i++) {
-                       ir_node *ret, *irn;
-                       ret = get_Block_cfgpred(end_bl, i);
-                       irn = skip_Proj(ret);
+               int n_exc = 0;
+               for (int i = 0; i < arity; i++) {
+                       ir_node *ret = get_Block_cfgpred(end_bl, i);
+                       ir_node *irn = skip_Proj(ret);
                        if (is_fragile_op(irn) || is_Raise(irn)) {
                                cf_pred[n_exc] = ret;
                                ++n_exc;
@@ -588,16 +559,12 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                                set_Tuple_pred(call, pn_Call_X_except, new_r_Jmp(block));
                        }
                } else {
-                       set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+                       set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
                }
        } else {
-               ir_node *main_end_bl;
-               int main_end_bl_arity;
-               ir_node **end_preds;
-
                /* assert(exc_handling == 1 || no exceptions. ) */
-               n_exc = 0;
-               for (i = 0; i < arity; i++) {
+               int n_exc = 0;
+               for (int i = 0; i < arity; i++) {
                        ir_node *ret = get_Block_cfgpred(end_bl, i);
                        ir_node *irn = skip_Proj(ret);
 
@@ -606,16 +573,16 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                                n_exc++;
                        }
                }
-               main_end_bl       = get_irg_end_block(irg);
-               main_end_bl_arity = get_irn_arity(main_end_bl);
-               end_preds         = XMALLOCN(ir_node*, n_exc + main_end_bl_arity);
+               ir_node  *main_end_bl       = get_irg_end_block(irg);
+               int       main_end_bl_arity = get_irn_arity(main_end_bl);
+               ir_node **end_preds         = XMALLOCN(ir_node*, n_exc+main_end_bl_arity);
 
-               for (i = 0; i < main_end_bl_arity; ++i)
+               for (int i = 0; i < main_end_bl_arity; ++i)
                        end_preds[i] = get_irn_n(main_end_bl, i);
-               for (i = 0; i < n_exc; ++i)
+               for (int i = 0; i < n_exc; ++i)
                        end_preds[main_end_bl_arity + i] = cf_pred[i];
                set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
-               set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+               set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
                free(end_preds);
        }
        free(res_pred);
@@ -664,8 +631,8 @@ static ir_graph *get_call_called_irg(ir_node *call)
        ir_node *addr;
 
        addr = get_Call_ptr(call);
-       if (is_Global(addr)) {
-               ir_entity *ent = get_Global_entity(addr);
+       if (is_SymConst_addr_ent(addr)) {
+               ir_entity *ent = get_SymConst_entity(addr);
                /* we don't know which function gets finally bound to a weak symbol */
                if (get_entity_linkage(ent) & IR_LINKAGE_WEAK)
                        return NULL;
@@ -713,11 +680,8 @@ void inline_small_irgs(ir_graph *irg, int size)
 {
        ir_graph *rem = current_ir_graph;
        inline_env_t env;
-       call_entry *entry;
 
        current_ir_graph = irg;
-       /* Handle graph state */
-       assert(get_irg_phase_state(irg) != phase_building);
        free_callee_info(irg);
 
        /* Find Call nodes to inline.
@@ -735,14 +699,15 @@ void inline_small_irgs(ir_graph *irg, int size)
                collect_phiprojs(irg);
 
                list_for_each_entry(call_entry, entry, &env.calls, list) {
-                       ir_graph            *callee = entry->callee;
-                       irg_inline_property prop    = get_irg_inline_property(callee);
+                       ir_graph  *callee = entry->callee;
+                       ir_entity *called = get_irg_entity(callee);
+                       mtp_additional_properties props
+                               = get_entity_additional_properties(called);
 
-                       if (prop == irg_inline_forbidden) {
+                       if (props & mtp_property_noinline)
                                continue;
-                       }
 
-                       if (prop >= irg_inline_forced ||
+                       if ((props & mtp_property_always_inline) ||
                            _obstack_memory_used(callee->obst) - (int)obstack_room(callee->obst) < size) {
                                inline_method(entry->call, callee);
                        }
@@ -850,8 +815,8 @@ static void collect_calls2(ir_node *call, void *ctx)
        if (env->ignore_runtime) {
                ir_node *symc = get_Call_ptr(call);
 
-               if (is_Global(symc)) {
-                       ir_entity *ent = get_Global_entity(symc);
+               if (is_SymConst_addr_ent(symc)) {
+                       ir_entity *ent = get_SymConst_entity(symc);
 
                        if (get_entity_additional_properties(ent) & mtp_property_runtime)
                                return;
@@ -888,9 +853,9 @@ static void collect_calls2(ir_node *call, void *ctx)
 
 /**
  * Returns TRUE if the number of callers is 0 in the irg's environment,
- * hence this irg is a leave.
+ * hence this irg is a leaf.
  */
-inline static int is_leave(ir_graph *irg)
+inline static int is_leaf(ir_graph *irg)
 {
        inline_irg_env *env = (inline_irg_env*)get_irg_link(irg);
        return env->n_call_nodes == 0;
@@ -938,7 +903,7 @@ static call_entry *duplicate_call_entry(const call_entry *entry,
  */
 static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_depth)
 {
-       call_entry *entry, *nentry;
+       call_entry *nentry;
 
        /* Note that the src list points to Call nodes in the inlined graph, but
           we need Call nodes in our graph. Luckily the inliner leaves this information
@@ -952,15 +917,15 @@ static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_
 }
 
 /*
- * Inlines small leave methods at call sites where the called address comes
+ * Inlines small leaf methods at call sites where the called address comes
  * from a Const node that references the entity representing the called
  * method.
  * The size argument is a rough measure for the code size of the method:
  * Methods where the obstack containing the firm graph is smaller than
  * size are inlined.
  */
-void inline_leave_functions(unsigned maxsize, unsigned leavesize,
-                            unsigned size, int ignore_runtime)
+void inline_leaf_functions(unsigned maxsize, unsigned leafsize,
+                           unsigned size, int ignore_runtime)
 {
        inline_irg_env   *env;
        ir_graph         *irg;
@@ -968,8 +933,6 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
        ir_graph         *rem;
        int              did_inline;
        wenv_t           wenv;
-       call_entry       *entry, *next;
-       const call_entry *centry;
        pmap             *copied_graphs;
        pmap_entry       *pm_entry;
 
@@ -990,22 +953,22 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
        for (i = 0; i < n_irgs; ++i) {
                ir_graph *irg = get_irp_irg(i);
 
-               assert(get_irg_phase_state(irg) != phase_building);
                free_callee_info(irg);
 
-               assure_cf_loop(irg);
+               assure_irg_properties(irg,
+                       IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
                wenv.x = (inline_irg_env*)get_irg_link(irg);
                irg_walk_graph(irg, NULL, collect_calls2, &wenv);
+               confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
        }
 
        /* -- and now inline. -- */
 
-       /* Inline leaves recursively -- we might construct new leaves. */
+       /* Inline leafs recursively -- we might construct new leafs. */
        do {
                did_inline = 0;
 
                for (i = 0; i < n_irgs; ++i) {
-                       ir_node *call;
                        int phiproj_computed = 0;
 
                        current_ir_graph = get_irp_irg(i);
@@ -1013,22 +976,19 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
 
                        ir_reserve_resources(current_ir_graph, IR_RESOURCE_IRN_LINK|IR_RESOURCE_PHI_LIST);
                        list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
-                               ir_graph            *callee;
-                               irg_inline_property  prop;
-
                                if (env->n_nodes > maxsize)
                                        break;
 
-                               call   = entry->call;
-                               callee = entry->callee;
-
-                               prop = get_irg_inline_property(callee);
-                               if (prop == irg_inline_forbidden) {
+                               ir_node   *call   = entry->call;
+                               ir_graph  *callee = entry->callee;
+                               ir_entity *called = get_irg_entity(callee);
+                               mtp_additional_properties props
+                                       = get_entity_additional_properties(called);
+                               if (props & mtp_property_noinline)
                                        continue;
-                               }
 
-                               if (is_leave(callee) && (
-                                   is_smaller(callee, leavesize) || prop >= irg_inline_forced)) {
+                               if (is_leaf(callee) && (
+                                   is_smaller(callee, leafsize) || (props & mtp_property_always_inline))) {
                                        if (!phiproj_computed) {
                                                phiproj_computed = 1;
                                                collect_phiprojs(current_ir_graph);
@@ -1059,7 +1019,6 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
 
        /* inline other small functions. */
        for (i = 0; i < n_irgs; ++i) {
-               ir_node *call;
                int phiproj_computed = 0;
 
                current_ir_graph = get_irp_irg(i);
@@ -1069,28 +1028,24 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
 
                /* note that the list of possible calls is updated during the process */
                list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
-                       irg_inline_property prop;
-                       ir_graph            *callee;
-                       pmap_entry          *e;
+                       ir_node   *call   = entry->call;
+                       ir_graph  *callee = entry->callee;
+                       ir_entity *called = get_irg_entity(callee);
 
-                       call   = entry->call;
-                       callee = entry->callee;
-
-                       prop = get_irg_inline_property(callee);
-                       if (prop == irg_inline_forbidden) {
+                       mtp_additional_properties props = get_entity_additional_properties(called);
+                       if (props & mtp_property_noinline)
                                continue;
-                       }
 
-                       e = pmap_find(copied_graphs, callee);
-                       if (e != NULL) {
+                       ir_graph *calleee = pmap_get(ir_graph, copied_graphs, callee);
+                       if (calleee != NULL) {
                                /*
                                 * Remap callee if we have a copy.
                                 * FIXME: Should we do this only for recursive Calls ?
                                 */
-                               callee = (ir_graph*)e->value;
+                               callee = calleee;
                        }
 
-                       if (prop >= irg_inline_forced ||
+                       if ((props & mtp_property_always_inline) ||
                            (is_smaller(callee, size) && env->n_nodes < maxsize) /* small function */) {
                                if (current_ir_graph == callee) {
                                        /*
@@ -1106,7 +1061,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
 
                                        /*
                                         * No copy yet, create one.
-                                        * Note that recursive methods are never leaves, so it is sufficient
+                                        * Note that recursive methods are never leafs, so it is sufficient
                                         * to test this condition here.
                                         */
                                        copy = create_irg_copy(callee);
@@ -1120,7 +1075,8 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                        callee_env = alloc_inline_irg_env();
                                        set_irg_link(copy, callee_env);
 
-                                       assure_cf_loop(copy);
+                                       assure_irg_properties(copy,
+                                               IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
                                        wenv.x              = callee_env;
                                        wenv.ignore_callers = 1;
                                        irg_walk_graph(copy, NULL, collect_calls2, &wenv);
@@ -1202,44 +1158,44 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
        current_ir_graph = rem;
 }
 
-typedef struct inline_leave_functions_pass_t {
+typedef struct inline_leaf_functions_pass_t {
        ir_prog_pass_t pass;
        unsigned       maxsize;
-       unsigned       leavesize;
+       unsigned       leafsize;
        unsigned       size;
        int            ignore_runtime;
-} inline_leave_functions_pass_t;
+} inline_leaf_functions_pass_t;
 
 /**
- * Wrapper to run inline_leave_functions() as a ir_prog pass.
+ * Wrapper to run inline_leaf_functions() as a ir_prog pass.
  */
-static int inline_leave_functions_wrapper(ir_prog *irp, void *context)
+static int inline_leaf_functions_wrapper(ir_prog *irp, void *context)
 {
-       inline_leave_functions_pass_t *pass = (inline_leave_functions_pass_t*)context;
+       inline_leaf_functions_pass_t *pass = (inline_leaf_functions_pass_t*)context;
 
        (void)irp;
-       inline_leave_functions(
-               pass->maxsize, pass->leavesize,
+       inline_leaf_functions(
+               pass->maxsize, pass->leafsize,
                pass->size, pass->ignore_runtime);
        return 0;
 }
 
-/* create a pass for inline_leave_functions() */
-ir_prog_pass_t *inline_leave_functions_pass(
-       const char *name, unsigned maxsize, unsigned leavesize,
+/* create a pass for inline_leaf_functions() */
+ir_prog_pass_t *inline_leaf_functions_pass(
+       const char *name, unsigned maxsize, unsigned leafsize,
        unsigned size, int ignore_runtime)
 {
-       inline_leave_functions_pass_t *pass = XMALLOCZ(inline_leave_functions_pass_t);
+       inline_leaf_functions_pass_t *pass = XMALLOCZ(inline_leaf_functions_pass_t);
 
        pass->maxsize        = maxsize;
-       pass->leavesize      = leavesize;
+       pass->leafsize       = leafsize;
        pass->size           = size;
        pass->ignore_runtime = ignore_runtime;
 
        return def_prog_pass_constructor(
                &pass->pass,
-               name ? name : "inline_leave_functions",
-               inline_leave_functions_wrapper);
+               name ? name : "inline_leaf_functions",
+               inline_leaf_functions_wrapper);
 }
 
 /**
@@ -1247,10 +1203,10 @@ ir_prog_pass_t *inline_leave_functions_pass(
  */
 static unsigned calc_method_local_weight(ir_node *arg)
 {
-       int      i, j, k;
+       int      j;
        unsigned v, weight = 0;
 
-       for (i = get_irn_n_outs(arg) - 1; i >= 0; --i) {
+       for (unsigned i = get_irn_n_outs(arg); i-- > 0; ) {
                ir_node *succ = get_irn_out(arg, i);
 
                switch (get_irn_opcode(succ)) {
@@ -1284,7 +1240,7 @@ static unsigned calc_method_local_weight(ir_node *arg)
                                ir_node *pred = get_Tuple_pred(succ, j);
                                if (pred == arg) {
                                        /* look for Proj(j) */
-                                       for (k = get_irn_n_outs(succ) - 1; k >= 0; --k) {
+                                       for (unsigned k = get_irn_n_outs(succ); k-- > 0; ) {
                                                ir_node *succ_succ = get_irn_out(succ, k);
                                                if (is_Proj(succ_succ)) {
                                                        if (get_Proj_proj(succ_succ) == j) {
@@ -1315,7 +1271,6 @@ static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg)
        ir_entity *ent = get_irg_entity(irg);
        ir_type  *mtp;
        size_t   nparams;
-       int      i;
        long     proj_nr;
        ir_node  *irg_args, *arg;
 
@@ -1331,7 +1286,7 @@ static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg)
 
        assure_irg_outs(irg);
        irg_args = get_irg_args(irg);
-       for (i = get_irn_n_outs(irg_args) - 1; i >= 0; --i) {
+       for (unsigned i = get_irn_n_outs(irg_args); i-- > 0; ) {
                arg     = get_irn_out(irg_args, i);
                proj_nr = get_Proj_proj(arg);
                env->local_weights[proj_nr] = calc_method_local_weight(arg);
@@ -1365,23 +1320,36 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
 {
        ir_node   *call = entry->call;
        ir_entity *ent  = get_irg_entity(callee);
+       ir_type   *callee_frame;
+       size_t    i, n_members, n_params;
        ir_node   *frame_ptr;
        ir_type   *mtp;
        int       weight = 0;
-       int       i, n_params, all_const;
+       int       all_const;
        unsigned  cc, v;
-       irg_inline_property prop;
 
        inline_irg_env *callee_env;
 
-       prop = get_irg_inline_property(callee);
-       if (prop == irg_inline_forbidden) {
+       mtp_additional_properties props = get_entity_additional_properties(ent);
+       if (props & mtp_property_noinline) {
                DB((dbg, LEVEL_2, "In %+F Call to %+F: inlining forbidden\n",
                    call, callee));
                return entry->benefice = INT_MIN;
        }
 
-       if (get_irg_additional_properties(callee) & mtp_property_noreturn) {
+       callee_frame = get_irg_frame_type(callee);
+       n_members = get_class_n_members(callee_frame);
+       for (i = 0; i < n_members; ++i) {
+               ir_entity *frame_ent = get_class_member(callee_frame, i);
+               if (is_parameter_entity(frame_ent)) {
+                       // TODO inliner should handle parameter entities by inserting Store operations
+                       DB((dbg, LEVEL_2, "In %+F Call to %+F: inlining forbidden due to parameter entity\n", call, callee));
+                       add_entity_additional_properties(ent, mtp_property_noinline);
+                       return entry->benefice = INT_MIN;
+               }
+       }
+
+       if (props & mtp_property_noreturn) {
                DB((dbg, LEVEL_2, "In %+F Call to %+F: not inlining noreturn or weak\n",
                    call, callee));
                return entry->benefice = INT_MIN;
@@ -1393,7 +1361,7 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
        cc       = get_method_calling_convention(mtp);
        if (cc & cc_reg_param) {
                /* register parameter, smaller costs for register parameters */
-               int max_regs = cc & ~cc_bits;
+               size_t max_regs = cc & ~cc_bits;
 
                if (max_regs < n_params)
                        weight += max_regs * 2 + (n_params - max_regs) * 5;
@@ -1446,7 +1414,7 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
        if (callee_env->n_nodes < 30 && !callee_env->recursive)
                weight += 2000;
 
-       /* and finally for leaves: they do not increase the register pressure
+       /* and finally for leafs: they do not increase the register pressure
           because of callee safe registers */
        if (callee_env->n_call_nodes == 0)
                weight += 400;
@@ -1502,6 +1470,8 @@ static ir_graph **create_irg_list(void)
        callgraph_walk(NULL, callgraph_walker, &env);
        assert(n_irgs == env.last_irg);
 
+       free_callgraph();
+
        return env.irgs;
 }
 
@@ -1516,14 +1486,15 @@ static ir_graph **create_irg_list(void)
 static void maybe_push_call(pqueue_t *pqueue, call_entry *call,
                             int inline_threshold)
 {
-       ir_graph            *callee  = call->callee;
-       irg_inline_property prop     = get_irg_inline_property(callee);
-       int                 benefice = calc_inline_benefice(call, callee);
+       ir_graph *callee   = call->callee;
+       int       benefice = calc_inline_benefice(call, callee);
 
        DB((dbg, LEVEL_2, "In %+F Call %+F to %+F has benefice %d\n",
            get_irn_irg(call->call), call->call, callee, benefice));
 
-       if (prop < irg_inline_forced && benefice < inline_threshold) {
+       ir_entity                *ent   = get_irg_entity(callee);
+       mtp_additional_properties props = get_entity_additional_properties(ent);
+       if (!(props & mtp_property_always_inline) && benefice < inline_threshold) {
                return;
        }
 
@@ -1546,7 +1517,6 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
 {
        int            phiproj_computed = 0;
        inline_irg_env *env = (inline_irg_env*)get_irg_link(irg);
-       call_entry     *curr_call;
        wenv_t         wenv;
        pqueue_t       *pqueue;
 
@@ -1576,19 +1546,21 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                ir_graph            *callee     = curr_call->callee;
                ir_node             *call_node  = curr_call->call;
                inline_irg_env      *callee_env = (inline_irg_env*)get_irg_link(callee);
-               irg_inline_property prop        = get_irg_inline_property(callee);
+               ir_entity           *ent        = get_irg_entity(callee);
+               mtp_additional_properties props
+                       = get_entity_additional_properties(ent);
+               ir_graph            *calleee;
                int                 loop_depth;
-               const call_entry    *centry;
-               pmap_entry          *e;
 
-               if ((prop < irg_inline_forced) && env->n_nodes + callee_env->n_nodes > maxsize) {
+               if (!(props & mtp_property_always_inline)
+                   && env->n_nodes + callee_env->n_nodes > maxsize) {
                        DB((dbg, LEVEL_2, "%+F: too big (%d) + %+F (%d)\n", irg,
                                                env->n_nodes, callee, callee_env->n_nodes));
                        continue;
                }
 
-               e = pmap_find(copied_graphs, callee);
-               if (e != NULL) {
+               calleee = pmap_get(ir_graph, copied_graphs, callee);
+               if (calleee != NULL) {
                        int benefice = curr_call->benefice;
                        /*
                         * Reduce the weight for recursive function IFF not all arguments are const.
@@ -1602,7 +1574,7 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                        /*
                         * Remap callee if we have a copy.
                         */
-                       callee     = (ir_graph*)e->value;
+                       callee     = calleee;
                        callee_env = (inline_irg_env*)get_irg_link(callee);
                }
 
@@ -1628,7 +1600,7 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
 
                        /*
                         * No copy yet, create one.
-                        * Note that recursive methods are never leaves, so it is
+                        * Note that recursive methods are never leafs, so it is
                         * sufficient to test this condition here.
                         */
                        copy = create_irg_copy(callee);
@@ -1642,7 +1614,8 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                        callee_env = alloc_inline_irg_env();
                        set_irg_link(copy, callee_env);
 
-                       assure_cf_loop(copy);
+                       assure_irg_properties(copy, IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
+                       memset(&wenv, 0, sizeof(wenv));
                        wenv.x              = callee_env;
                        wenv.ignore_callers = 1;
                        irg_walk_graph(copy, NULL, collect_calls2, &wenv);
@@ -1694,6 +1667,13 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                         * but we need Call nodes in our graph. Luckily the inliner leaves
                         * this information in the link field. */
                        new_call = (ir_node*)get_irn_link(centry->call);
+                       if (get_irn_irg(new_call) != irg) {
+                               /* centry->call has not been copied, which means it is dead.
+                                * This might happen during inlining, if a const function,
+                                * which cannot be inlined is only used as an unused argument
+                                * of another function, which is inlined. */
+                               continue;
+                       }
                        assert(is_Call(new_call));
 
                        new_entry = duplicate_call_entry(centry, new_call, loop_depth);
@@ -1746,7 +1726,7 @@ void inline_functions(unsigned maxsize, int inline_threshold,
                free_callee_info(irg);
 
                wenv.x = (inline_irg_env*)get_irg_link(irg);
-               assure_cf_loop(irg);
+               assure_loopinfo(irg);
                irg_walk_graph(irg, NULL, collect_calls2, &wenv);
        }