call hook_new_entity after copying entity so the firm debugger can catch it
[libfirm] / ir / opt / opt_inline.c
index a93c1f3..6c5e336 100644 (file)
@@ -190,11 +190,11 @@ static bool can_inline(ir_node *call, ir_graph *called_graph)
        ir_entity          *called      = get_irg_entity(called_graph);
        ir_type            *called_type = get_entity_type(called);
        ir_type            *call_type   = get_Call_type(call);
-       int                 n_params    = get_method_n_params(called_type);
-       int                 n_arguments = get_method_n_params(call_type);
-       int                 n_res       = get_method_n_ress(called_type);
+       size_t              n_params    = get_method_n_params(called_type);
+       size_t              n_arguments = get_method_n_params(call_type);
+       size_t              n_res       = get_method_n_ress(called_type);
        irg_inline_property prop        = get_irg_inline_property(called_graph);
-       int                 i;
+       size_t              i;
        bool                res;
 
        if (prop == irg_inline_forbidden)
@@ -214,7 +214,7 @@ static bool can_inline(ir_node *call, ir_graph *called_graph)
         * It is implementation dependent what happens in that case.
         * We support inlining, if the bitsize of the types matches AND
         * the same arithmetic is used. */
-       for (i = n_params - 1; i >= 0; --i) {
+       for (i = 0; i < n_params; ++i) {
                ir_type *param_tp = get_method_param_type(called_type, i);
                ir_type *arg_tp   = get_method_param_type(call_type, i);
 
@@ -231,7 +231,7 @@ static bool can_inline(ir_node *call, ir_graph *called_graph)
                        /* otherwise we can simply "reinterpret" the bits */
                }
        }
-       for (i = n_res - 1; i >= 0; --i) {
+       for (i = 0; i < n_res; ++i) {
                ir_type *decl_res_tp = get_method_res_type(called_type, i);
                ir_type *used_res_tp = get_method_res_type(call_type, i);
 
@@ -283,8 +283,8 @@ static void copy_frame_entities(ir_graph *from, ir_graph *to)
 {
        ir_type *from_frame = get_irg_frame_type(from);
        ir_type *to_frame   = get_irg_frame_type(to);
-       int      n_members  = get_class_n_members(from_frame);
-       int      i;
+       size_t   n_members  = get_class_n_members(from_frame);
+       size_t   i;
        assert(from_frame != to_frame);
 
        for (i = 0; i < n_members; ++i) {
@@ -299,7 +299,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
 {
        ir_node       *pre_call;
        ir_node       *post_call, *post_bl;
-       ir_node       *in[pn_Start_max];
+       ir_node       *in[pn_Start_max+1];
        ir_node       *end, *end_bl, *block;
        ir_node       **res_pred;
        ir_node       **cf_pred;
@@ -343,10 +343,8 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        assert(get_irg_phase_state(irg) != phase_building);
        assert(get_irg_pinned(irg) == op_pin_state_pinned);
        assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
-       set_irg_outs_inconsistent(irg);
        set_irg_extblk_inconsistent(irg);
        set_irg_doms_inconsistent(irg);
-       set_irg_loopinfo_inconsistent(irg);
        set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
        set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
        edges_deactivate(irg);
@@ -393,9 +391,8 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        in[pn_Start_M]              = get_Call_mem(call);
        in[pn_Start_X_initial_exec] = new_r_Jmp(post_bl);
        in[pn_Start_P_frame_base]   = get_irg_frame(irg);
-       in[pn_Start_P_tls]          = get_irg_tls(irg);
        in[pn_Start_T_args]         = new_r_Tuple(post_bl, n_params, args_in);
-       pre_call = new_r_Tuple(post_bl, pn_Start_max, in);
+       pre_call = new_r_Tuple(post_bl, pn_Start_max+1, in);
        post_call = call;
 
        /* --
@@ -415,7 +412,6 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        {
                ir_node *start_block;
                ir_node *start;
-               ir_node *bad;
                ir_node *nomem;
 
                start_block = get_irg_start_block(called_graph);
@@ -426,10 +422,6 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                set_new_node(start, pre_call);
                mark_irn_visited(start);
 
-               bad = get_irg_bad(called_graph);
-               set_new_node(bad, get_irg_bad(irg));
-               mark_irn_visited(bad);
-
                nomem = get_irg_no_mem(called_graph);
                set_new_node(nomem, get_irg_no_mem(irg));
                mark_irn_visited(nomem);
@@ -437,7 +429,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
 
        /* entitiy link is used to link entities on old stackframe to the
         * new stackframe */
-       irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
+       irp_reserve_resources(irp, IRP_RESOURCE_ENTITY_LINK);
 
        /* copy entities and nodes */
        assert(!irn_visited(get_irg_end(called_graph)));
@@ -445,7 +437,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
        irg_walk_core(get_irg_end(called_graph), copy_node_inline, set_preds_inline,
                      irg);
 
-       irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
+       irp_free_resources(irp, IRP_RESOURCE_ENTITY_LINK);
 
        /* -- Merge the end of the inlined procedure with the call site -- */
        /* We will turn the old Call node into a Tuple with the following
@@ -493,7 +485,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
 
        /* build a Tuple for all results of the method.
         * add Phi node if there was more than one Return. */
-       turn_into_tuple(post_call, pn_Call_max);
+       turn_into_tuple(post_call, pn_Call_max+1);
        /* First the Memory-Phi */
        n_mem_phi = 0;
        for (i = 0; i < arity; i++) {
@@ -538,10 +530,9 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                                }
                        }
                        if (n_ret > 0) {
-                               ir_mode *mode = get_irn_mode(cf_pred[0]);
-                               phi = new_r_Phi(post_bl, n_ret, cf_pred, mode);
+                               phi = new_r_Phi(post_bl, n_ret, cf_pred, res_mode);
                        } else {
-                               phi = new_r_Bad(irg);
+                               phi = new_r_Bad(irg, res_mode);
                        }
                        res_pred[j] = phi;
                        /* Conserve Phi-list for further inlinings -- but might be optimized */
@@ -553,14 +544,11 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                result_tuple = new_r_Tuple(post_bl, n_res, res_pred);
                set_Tuple_pred(call, pn_Call_T_result, result_tuple);
        } else {
-               set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
+               set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
        }
        /* handle the regular call */
        set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(post_bl));
 
-       /* For now, we cannot inline calls with value_base */
-       set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg));
-
        /* Finally the exception control flow.
           We have two possible situations:
           First if the Call branches to an exception handler:
@@ -592,7 +580,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                                set_Tuple_pred(call, pn_Call_X_except, new_r_Jmp(block));
                        }
                } else {
-                       set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+                       set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
                }
        } else {
                ir_node *main_end_bl;
@@ -619,7 +607,7 @@ int inline_method(ir_node *call, ir_graph *called_graph)
                for (i = 0; i < n_exc; ++i)
                        end_preds[main_end_bl_arity + i] = cf_pred[i];
                set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
-               set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+               set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
                free(end_preds);
        }
        free(res_pred);
@@ -1154,7 +1142,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize,
                                        /* call was inlined, Phi/Projs for current graph must be recomputed */
                                        phiproj_computed = 0;
 
-                                       /* callee was inline. Append it's call list. */
+                                       /* callee was inline. Append its call list. */
                                        env->got_inline = 1;
                                        --env->n_call_nodes;
                                        append_call_list(env, callee_env, entry->loop_depth);
@@ -1318,7 +1306,9 @@ static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg)
 {
        ir_entity *ent = get_irg_entity(irg);
        ir_type  *mtp;
-       int      nparams, i, proj_nr;
+       size_t   nparams;
+       int      i;
+       long     proj_nr;
        ir_node  *irg_args, *arg;
 
        mtp      = get_entity_type(ent);
@@ -1345,17 +1335,12 @@ static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg)
  * After inlining, the local variable might be transformed into a
  * SSA variable by scalar_replacement().
  */
-static unsigned get_method_local_adress_weight(ir_graph *callee, int pos)
+static unsigned get_method_local_adress_weight(ir_graph *callee, size_t pos)
 {
        inline_irg_env *env = (inline_irg_env*)get_irg_link(callee);
 
-       if (env->local_weights != NULL) {
-               if (pos < ARR_LEN(env->local_weights))
-                       return env->local_weights[pos];
-               return 0;
-       }
-
-       analyze_irg_local_weights(env, callee);
+       if (env->local_weights == NULL)
+               analyze_irg_local_weights(env, callee);
 
        if (pos < ARR_LEN(env->local_weights))
                return env->local_weights[pos];
@@ -1650,6 +1635,7 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                        set_irg_link(copy, callee_env);
 
                        assure_cf_loop(copy);
+                       memset(&wenv, 0, sizeof(wenv));
                        wenv.x              = callee_env;
                        wenv.ignore_callers = 1;
                        irg_walk_graph(copy, NULL, collect_calls2, &wenv);
@@ -1682,7 +1668,7 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                /* remove it from the caller list */
                list_del(&curr_call->list);
 
-               /* callee was inline. Append it's call list. */
+               /* callee was inline. Append its call list. */
                env->got_inline = 1;
                --env->n_call_nodes;
 
@@ -1701,6 +1687,13 @@ static void inline_into(ir_graph *irg, unsigned maxsize,
                         * but we need Call nodes in our graph. Luckily the inliner leaves
                         * this information in the link field. */
                        new_call = (ir_node*)get_irn_link(centry->call);
+                       if (get_irn_irg(new_call) != irg) {
+                               /* centry->call has not been copied, which means it is dead.
+                                * This might happen during inlining, if a const function,
+                                * which cannot be inlined is only used as an unused argument
+                                * of another function, which is inlined. */
+                               continue;
+                       }
                        assert(is_Call(new_call));
 
                        new_entry = duplicate_call_entry(centry, new_call, loop_depth);