Only (IV - RC) is allowed for induction variables.
[libfirm] / ir / opt / opt_inline.c
index a93c1f3..c23694c 100644 (file)
@@ -21,7 +21,6 @@
  * @file
  * @brief    Dead node elimination and Procedure Inlining.
  * @author   Michael Beck, Goetz Lindenmaier
- * @version  $Id$
  */
 #include "config.h"
 
@@ -63,7 +62,7 @@
 #include "irtools.h"
 #include "iropt_dbg.h"
 #include "irpass_t.h"
-#include "irphase_t.h"
+#include "irnodemap.h"
 
 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
 
@@ -146,7 +145,13 @@ static void find_addr(ir_node *node, void *env)
 {
        bool *allow_inline = (bool*)env;
 
-       if (is_Sel(node)) {
+       if (is_Block(node) && get_Block_entity(node)) {
+               /**
+                * Currently we can't handle blocks whose address was taken correctly
+                * when inlining
+                */
+               *allow_inline = false;
+       } else if (is_Sel(node)) {
                ir_graph *irg = current_ir_graph;
                if (get_Sel_ptr(node) == get_irg_frame(irg)) {
                        /* access to frame */
@@ -155,6 +160,9 @@ static void find_addr(ir_node *node, void *env)
                                /* access to value_type */
                                *allow_inline = false;
                        }
+                       if (is_parameter_entity(ent)) {
+                               *allow_inline = false;
+                       }
                }
        } else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
                /* From GCC:
@@ -190,11 +198,11 @@ static bool can_inline(ir_node *call, ir_graph *called_graph)
        ir_entity          *called      = get_irg_entity(called_graph);
        ir_type            *called_type = get_entity_type(called);
        ir_type            *call_type   = get_Call_type(call);
-       int                 n_params    = get_method_n_params(called_type);
-       int                 n_arguments = get_method_n_params(call_type);
-       int                 n_res       = get_method_n_ress(called_type);
+       size_t              n_params    = get_method_n_params(called_type);
+       size_t              n_arguments = get_method_n_params(call_type);
+       size_t              n_res       = get_method_n_ress(called_type);
        irg_inline_property prop        = get_irg_inline_property(called_graph);
-       int                 i;
+       size_t              i;
        bool                res;
 
        if (prop == irg_inline_forbidden)
@@ -214,7 +222,7 @@ static bool can_inline(ir_node *call, ir_graph *called_graph)
         * It is implementation dependent what happens in that case.
         * We support inlining, if the bitsize of the types matches AND
         * the same arithmetic is used. */
-       for (i = n_params - 1; i >= 0; --i) {
+       for (i = 0; i < n_params; ++i) {
                ir_type *param_tp = get_method_param_type(called_type, i);
                ir_type *arg_tp   = get_method_param_type(call_type, i);
 
@@ -231,7 +239,7 @@ static bool can_inline(ir_node *call, ir_graph *called_graph)
                        /* otherwise we can simply "reinterpret" the bits */
                }
        }
-       for (i = n_res - 1; i >= 0; --i) {
+       for (i = 0; i < n_res; ++i) {
                ir_type *decl_res_tp = get_method_res_type(called_type, i);
                ir_type *used_res_tp = get_method_res_type(call_type, i);
 
@@ -283,14 +291,15 @@ static void copy_frame_entities(ir_graph *from, ir_graph *to)
 {
        ir_type *from_frame = get_irg_frame_type(from);
        ir_type *to_frame   = get_irg_frame_type(to);
-       int      n_members  = get_class_n_members(from_frame);
-       int      i;
+       size_t   n_members  = get_class_n_members(from_frame);
+       size_t   i;
        assert(from_frame != to_frame);
 
        for (i = 0; i < n_members; ++i) {
                ir_entity *old_ent = get_class_member(from_frame, i);
                ir_entity *new_ent = copy_entity_own(old_ent, to_frame);
                set_entity_link(old_ent, new_ent);
+               assert (!is_parameter_entity(old_ent));
        }
 }
 
@@ -299,7 +308,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
 {
        ir_node       *pre_call;
        ir_node       *post_call, *post_bl;
-       ir_node       *in[pn_Start_max];
+       ir_node       *in[pn_Start_max+1];
        ir_node       *end, *end_bl, *block;
        ir_node       **res_pred;
        ir_node       **cf_pred;
@@ -343,12 +352,10 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        assert(get_irg_phase_state(irg) != phase_building);
        assert(get_irg_pinned(irg) == op_pin_state_pinned);
        assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
-       set_irg_outs_inconsistent(irg);
-       set_irg_extblk_inconsistent(irg);
-       set_irg_doms_inconsistent(irg);
-       set_irg_loopinfo_inconsistent(irg);
+       clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_DOMINANCE
+                          | IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
        set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
-       set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
+       clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_ENTITY_USAGE);
        edges_deactivate(irg);
 
        /* here we know we WILL inline, so inform the statistics */
@@ -393,9 +400,8 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        in[pn_Start_M]              = get_Call_mem(call);
        in[pn_Start_X_initial_exec] = new_r_Jmp(post_bl);
        in[pn_Start_P_frame_base]   = get_irg_frame(irg);
-       in[pn_Start_P_tls]          = get_irg_tls(irg);
        in[pn_Start_T_args]         = new_r_Tuple(post_bl, n_params, args_in);
-       pre_call = new_r_Tuple(post_bl, pn_Start_max, in);
+       pre_call = new_r_Tuple(post_bl, pn_Start_max+1, in);
        post_call = call;
 
        /* --
@@ -415,7 +421,6 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        {
                ir_node *start_block;
                ir_node *start;
-               ir_node *bad;
                ir_node *nomem;
 
                start_block = get_irg_start_block(called_graph);
@@ -426,10 +431,6 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                set_new_node(start, pre_call);
                mark_irn_visited(start);
 
-               bad = get_irg_bad(called_graph);
-               set_new_node(bad, get_irg_bad(irg));
-               mark_irn_visited(bad);
-
                nomem = get_irg_no_mem(called_graph);
                set_new_node(nomem, get_irg_no_mem(irg));
                mark_irn_visited(nomem);
@@ -437,7 +438,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
 
        /* entitiy link is used to link entities on old stackframe to the
         * new stackframe */
-       irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
+       irp_reserve_resources(irp, IRP_RESOURCE_ENTITY_LINK);
 
        /* copy entities and nodes */
        assert(!irn_visited(get_irg_end(called_graph)));
@@ -445,7 +446,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        irg_walk_core(get_irg_end(called_graph), copy_node_inline, set_preds_inline,
                      irg);
 
-       irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
+       irp_free_resources(irp, IRP_RESOURCE_ENTITY_LINK);
 
        /* -- Merge the end of the inlined procedure with the call site -- */
        /* We will turn the old Call node into a Tuple with the following
@@ -493,7 +494,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
 
        /* build a Tuple for all results of the method.
         * add Phi node if there was more than one Return. */
-       turn_into_tuple(post_call, pn_Call_max);
+       turn_into_tuple(post_call, pn_Call_max+1);
        /* First the Memory-Phi */
        n_mem_phi = 0;
        for (i = 0; i < arity; i++) {
@@ -538,10 +539,9 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                                }
                        }
                        if (n_ret > 0) {
-                               ir_mode *mode = get_irn_mode(cf_pred[0]);
-                               phi = new_r_Phi(post_bl, n_ret, cf_pred, mode);
+                               phi = new_r_Phi(post_bl, n_ret, cf_pred, res_mode);
                        } else {
-                               phi = new_r_Bad(irg);
+                               phi = new_r_Bad(irg, res_mode);
                        }
                        res_pred[j] = phi;
                        /* Conserve Phi-list for further inlinings -- but might be optimized */
@@ -553,14 +553,11 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                result_tuple = new_r_Tuple(post_bl, n_res, res_pred);
                set_Tuple_pred(call, pn_Call_T_result, result_tuple);
        } else {
-               set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
+               set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
        }
        /* handle the regular call */
        set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(post_bl));
 
-       /* For now, we cannot inline calls with value_base */
-       set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg));
-
        /* Finally the exception control flow.
           We have two possible situations:
           First if the Call branches to an exception handler:
@@ -592,7 +589,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                                set_Tuple_pred(call, pn_Call_X_except, new_r_Jmp(block));
                        }
                } else {
-                       set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+                       set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
                }
        } else {
                ir_node *main_end_bl;
@@ -619,7 +616,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                for (i = 0; i < n_exc; ++i)
                        end_preds[main_end_bl_arity + i] = cf_pred[i];
                set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
-               set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+               set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
                free(end_preds);
        }
        free(res_pred);
@@ -668,8 +665,8 @@ static ir_graph *get_call_called_irg(ir_node *call)
        ir_node *addr;
 
        addr = get_Call_ptr(call);
-       if (is_Global(addr)) {
-               ir_entity *ent = get_Global_entity(addr);
+       if (is_SymConst_addr_ent(addr)) {
+               ir_entity *ent = get_SymConst_entity(addr);
                /* we don't know which function gets finally bound to a weak symbol */
                if (get_entity_linkage(ent) & IR_LINKAGE_WEAK)
                        return NULL;
@@ -854,8 +851,8 @@ static void collect_calls2(ir_node *call, void *ctx)
        if (env->ignore_runtime) {
                ir_node *symc = get_Call_ptr(call);
 
-               if (is_Global(symc)) {
-                       ir_entity *ent = get_Global_entity(symc);
+               if (is_SymConst_addr_ent(symc)) {
+                       ir_entity *ent = get_SymConst_entity(symc);
 
                        if (get_entity_additional_properties(ent) & mtp_property_runtime)
                                return;
@@ -892,9 +889,9 @@ static void collect_calls2(ir_node *call, void *ctx)
 
 /**
  * Returns TRUE if the number of callers is 0 in the irg's environment,
- * hence this irg is a leave.
+ * hence this irg is a leaf.
  */
-inline static int is_leave(ir_graph *irg)
+inline static int is_leaf(ir_graph *irg)
 {
        inline_irg_env *env = (inline_irg_env*)get_irg_link(irg);
        return env->n_call_nodes == 0;
@@ -956,15 +953,15 @@ static void append_call_list(inline_irg_env *dst, inline_irg_env *src, int loop_
 }
 
 /*
- * Inlines small leave methods at call sites where the called address comes
+ * Inlines small leaf methods at call sites where the called address comes
  * from a Const node that references the entity representing the called
  * method.
  * The size argument is a rough measure for the code size of the method:
  * Methods where the obstack containing the firm graph is smaller than
  * size are inlined.
  */
-void inline_leave_functions(unsigned maxsize, unsigned leavesize,
-                            unsigned size, int ignore_runtime)
+void inline_leaf_functions(unsigned maxsize, unsigned leafsize,
+                           unsigned size, int ignore_runtime)
 {
        inline_irg_env   *env;
        ir_graph         *irg;
@@ -997,14 +994,16 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                assert(get_irg_phase_state(irg) != phase_building);
                free_callee_info(irg);
 
-               assure_cf_loop(irg);
+               assure_irg_properties(irg,
+                       IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
                wenv.x = (inline_irg_env*)get_irg_link(irg);
                irg_walk_graph(irg, NULL, collect_calls2, &wenv);
+               confirm_irg_properties(irg, IR_GRAPH_PROPERTIES_ALL);
        }
 
        /* -- and now inline. -- */
 
-       /* Inline leaves recursively -- we might construct new leaves. */
+       /* Inline leafs recursively -- we might construct new leafs. */
        do {
                did_inline = 0;
 
@@ -1031,8 +1030,8 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                        continue;
                                }
 
-                               if (is_leave(callee) && (
-                                   is_smaller(callee, leavesize) || prop >= irg_inline_forced)) {
+                               if (is_leaf(callee) && (
+                                   is_smaller(callee, leafsize) || prop >= irg_inline_forced)) {
                                        if (!phiproj_computed) {
                                                phiproj_computed = 1;
                                                collect_phiprojs(current_ir_graph);
@@ -1075,7 +1074,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                list_for_each_entry_safe(call_entry, entry, next, &env->calls, list) {
                        irg_inline_property prop;
                        ir_graph            *callee;
-                       pmap_entry          *e;
+                       ir_graph            *calleee;
 
                        call   = entry->call;
                        callee = entry->callee;
@@ -1085,13 +1084,13 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                continue;
                        }
 
-                       e = pmap_find(copied_graphs, callee);
-                       if (e != NULL) {
+                       calleee = (ir_graph*)pmap_get(copied_graphs, callee);
+                       if (calleee != NULL) {
                                /*
                                 * Remap callee if we have a copy.
                                 * FIXME: Should we do this only for recursive Calls ?
                                 */
-                               callee = (ir_graph*)e->value;
+                               callee = calleee;
                        }
 
                        if (prop >= irg_inline_forced ||
@@ -1110,7 +1109,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
 
                                        /*
                                         * No copy yet, create one.
-                                        * Note that recursive methods are never leaves, so it is sufficient
+                                        * Note that recursive methods are never leafs, so it is sufficient
                                         * to test this condition here.
                                         */
                                        copy = create_irg_copy(callee);
@@ -1124,7 +1123,8 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                        callee_env = alloc_inline_irg_env();
                                        set_irg_link(copy, callee_env);
 
-                                       assure_cf_loop(copy);
+                                       assure_irg_properties(copy,
+                                               IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
                                        wenv.x              = callee_env;
                                        wenv.ignore_callers = 1;
                                        irg_walk_graph(copy, NULL, collect_calls2, &wenv);
@@ -1154,7 +1154,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                        /* call was inlined, Phi/Projs for current graph must be recomputed */
                                        phiproj_computed = 0;
 
-                                       /* callee was inline. Append it's call list. */
+                                       /* callee was inline. Append its call list. */
                                        env->got_inline = 1;
                                        --env->n_call_nodes;
                                        append_call_list(env, callee_env, entry->loop_depth);
@@ -1206,44 +1206,44 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
        current_ir_graph = rem;
 }
 
-typedef struct inline_leave_functions_pass_t {
+typedef struct inline_leaf_functions_pass_t {
        ir_prog_pass_t pass;
        unsigned       maxsize;
-       unsigned       leavesize;
+       unsigned       leafsize;
        unsigned       size;
        int            ignore_runtime;
-} inline_leave_functions_pass_t;
+} inline_leaf_functions_pass_t;
 
 /**
- * Wrapper to run inline_leave_functions() as a ir_prog pass.
+ * Wrapper to run inline_leaf_functions() as a ir_prog pass.
  */
-static int inline_leave_functions_wrapper(ir_prog *irp, void *context)
+static int inline_leaf_functions_wrapper(ir_prog *irp, void *context)
 {
-       inline_leave_functions_pass_t *pass = (inline_leave_functions_pass_t*)context;
+       inline_leaf_functions_pass_t *pass = (inline_leaf_functions_pass_t*)context;
 
        (void)irp;
-       inline_leave_functions(
-               pass->maxsize, pass->leavesize,
+       inline_leaf_functions(
+               pass->maxsize, pass->leafsize,
                pass->size, pass->ignore_runtime);
        return 0;
 }
 
-/* create a pass for inline_leave_functions() */
-ir_prog_pass_t *inline_leave_functions_pass(
-       const char *name, unsigned maxsize, unsigned leavesize,
+/* create a pass for inline_leaf_functions() */
+ir_prog_pass_t *inline_leaf_functions_pass(
+       const char *name, unsigned maxsize, unsigned leafsize,
        unsigned size, int ignore_runtime)
 {
-       inline_leave_functions_pass_t *pass = XMALLOCZ(inline_leave_functions_pass_t);
+       inline_leaf_functions_pass_t *pass = XMALLOCZ(inline_leaf_functions_pass_t);
 
        pass->maxsize        = maxsize;
-       pass->leavesize      = leavesize;
+       pass->leafsize       = leafsize;
        pass->size           = size;
        pass->ignore_runtime = ignore_runtime;
 
        return def_prog_pass_constructor(
                &pass->pass,
-               name ? name : "inline_leave_functions",
-               inline_leave_functions_wrapper);
+               name ? name : "inline_leaf_functions",
+               inline_leaf_functions_wrapper);
 }
 
 /**
@@ -1318,7 +1318,9 @@ static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg)
 {
        ir_entity *ent = get_irg_entity(irg);
        ir_type  *mtp;
-       int      nparams, i, proj_nr;
+       size_t   nparams;
+       int      i;
+       long     proj_nr;
        ir_node  *irg_args, *arg;
 
        mtp      = get_entity_type(ent);
@@ -1345,17 +1347,12 @@ static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg)
  * After inlining, the local variable might be transformed into a
  * SSA variable by scalar_replacement().
  */
-static unsigned get_method_local_adress_weight(ir_graph *callee, int pos)
+static unsigned get_method_local_adress_weight(ir_graph *callee, size_t pos)
 {
        inline_irg_env *env = (inline_irg_env*)get_irg_link(callee);
 
-       if (env->local_weights != NULL) {
-               if (pos < ARR_LEN(env->local_weights))
-                       return env->local_weights[pos];
-               return 0;
-       }
-
-       analyze_irg_local_weights(env, callee);
+       if (env->local_weights == NULL)
+               analyze_irg_local_weights(env, callee);
 
        if (pos < ARR_LEN(env->local_weights))
                return env->local_weights[pos];
@@ -1372,10 +1369,12 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
 {
        ir_node   *call = entry->call;
        ir_entity *ent  = get_irg_entity(callee);
+       ir_type   *callee_frame;
+       size_t    i, n_members, n_params;
        ir_node   *frame_ptr;
        ir_type   *mtp;
        int       weight = 0;
-       int       i, n_params, all_const;
+       int       all_const;
        unsigned  cc, v;
        irg_inline_property prop;
 
@@ -1388,6 +1387,18 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
                return entry->benefice = INT_MIN;
        }
 
+       callee_frame = get_irg_frame_type(callee);
+       n_members = get_class_n_members(callee_frame);
+       for (i = 0; i < n_members; ++i) {
+               ir_entity *frame_ent = get_class_member(callee_frame, i);
+               if (is_parameter_entity(frame_ent)) {
+                       // TODO inliner should handle parameter entities by inserting Store operations
+                       DB((dbg, LEVEL_2, "In %+F Call to %+F: inlining forbidden due to parameter entity\n", call, callee));
+                       set_irg_inline_property(callee, irg_inline_forbidden);
+                       return entry->benefice = INT_MIN;
+               }
+       }
+
        if (get_irg_additional_properties(callee) & mtp_property_noreturn) {
                DB((dbg, LEVEL_2, "In %+F Call to %+F: not inlining noreturn or weak\n",
                    call, callee));
@@ -1400,7 +1411,7 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
        cc       = get_method_calling_convention(mtp);
        if (cc & cc_reg_param) {
                /* register parameter, smaller costs for register parameters */
-               int max_regs = cc & ~cc_bits;
+               size_t max_regs = cc & ~cc_bits;
 
                if (max_regs < n_params)
                        weight += max_regs * 2 + (n_params - max_regs) * 5;
@@ -1453,7 +1464,7 @@ static int calc_inline_benefice(call_entry *entry, ir_graph *callee)
        if (callee_env->n_nodes < 30 && !callee_env->recursive)
                weight += 2000;
 
-       /* and finally for leaves: they do not increase the register pressure
+       /* and finally for leafs: they do not increase the register pressure
           because of callee safe registers */
        if (callee_env->n_call_nodes == 0)
                weight += 400;
@@ -1509,6 +1520,8 @@ static ir_graph **create_irg_list(void)
        callgraph_walk(NULL, callgraph_walker, &env);
        assert(n_irgs == env.last_irg);
 
+       free_callgraph();
+
        return env.irgs;
 }
 
@@ -1584,9 +1597,9 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                ir_node             *call_node  = curr_call->call;
                inline_irg_env      *callee_env = (inline_irg_env*)get_irg_link(callee);
                irg_inline_property prop        = get_irg_inline_property(callee);
+               ir_graph            *calleee;
                int                 loop_depth;
                const call_entry    *centry;
-               pmap_entry          *e;
 
                if ((prop < irg_inline_forced) && env->n_nodes + callee_env->n_nodes > maxsize) {
                        DB((dbg, LEVEL_2, "%+F: too big (%d) + %+F (%d)\n", irg,
@@ -1594,8 +1607,8 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                        continue;
                }
 
-               e = pmap_find(copied_graphs, callee);
-               if (e != NULL) {
+               calleee = (ir_graph*)pmap_get(copied_graphs, callee);
+               if (calleee != NULL) {
                        int benefice = curr_call->benefice;
                        /*
                         * Reduce the weight for recursive function IFF not all arguments are const.
@@ -1609,7 +1622,7 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                        /*
                         * Remap callee if we have a copy.
                         */
-                       callee     = (ir_graph*)e->value;
+                       callee     = calleee;
                        callee_env = (inline_irg_env*)get_irg_link(callee);
                }
 
@@ -1635,7 +1648,7 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
 
                        /*
                         * No copy yet, create one.
-                        * Note that recursive methods are never leaves, so it is
+                        * Note that recursive methods are never leafs, so it is
                         * sufficient to test this condition here.
                         */
                        copy = create_irg_copy(callee);
@@ -1649,7 +1662,8 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                        callee_env = alloc_inline_irg_env();
                        set_irg_link(copy, callee_env);
 
-                       assure_cf_loop(copy);
+                       assure_irg_properties(copy, IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
+                       memset(&wenv, 0, sizeof(wenv));
                        wenv.x              = callee_env;
                        wenv.ignore_callers = 1;
                        irg_walk_graph(copy, NULL, collect_calls2, &wenv);
@@ -1682,7 +1696,7 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                /* remove it from the caller list */
                list_del(&curr_call->list);
 
-               /* callee was inline. Append it's call list. */
+               /* callee was inline. Append its call list. */
                env->got_inline = 1;
                --env->n_call_nodes;
 
@@ -1701,6 +1715,13 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                         * but we need Call nodes in our graph. Luckily the inliner leaves
                         * this information in the link field. */
                        new_call = (ir_node*)get_irn_link(centry->call);
+                       if (get_irn_irg(new_call) != irg) {
+                               /* centry->call has not been copied, which means it is dead.
+                                * This might happen during inlining, if a const function,
+                                * which cannot be inlined is only used as an unused argument
+                                * of another function, which is inlined. */
+                               continue;
+                       }
                        assert(is_Call(new_call));
 
                        new_entry = duplicate_call_entry(centry, new_call, loop_depth);
@@ -1753,7 +1774,7 @@ void inline_functions(unsigned maxsize, int inline_threshold,
                free_callee_info(irg);
 
                wenv.x = (inline_irg_env*)get_irg_link(irg);
-               assure_cf_loop(irg);
+               assure_loopinfo(irg);
                irg_walk_graph(irg, NULL, collect_calls2, &wenv);
        }