X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fopt%2Fopt_inline.c;h=85577c63014f7a04e4dce6e38076ed92a79a5bd8;hb=b27ae245166bb695bc4e418ff416d91bc37d0f28;hp=a93c1f3ff4daf81b1c13d7cb41360d8d1129bad2;hpb=9d4e23060441530a20af5d331268435bfe18f305;p=libfirm diff --git a/ir/opt/opt_inline.c b/ir/opt/opt_inline.c index a93c1f3ff..85577c630 100644 --- a/ir/opt/opt_inline.c +++ b/ir/opt/opt_inline.c @@ -190,11 +190,11 @@ static bool can_inline(ir_node *call, ir_graph *called_graph) ir_entity *called = get_irg_entity(called_graph); ir_type *called_type = get_entity_type(called); ir_type *call_type = get_Call_type(call); - int n_params = get_method_n_params(called_type); - int n_arguments = get_method_n_params(call_type); - int n_res = get_method_n_ress(called_type); + size_t n_params = get_method_n_params(called_type); + size_t n_arguments = get_method_n_params(call_type); + size_t n_res = get_method_n_ress(called_type); irg_inline_property prop = get_irg_inline_property(called_graph); - int i; + size_t i; bool res; if (prop == irg_inline_forbidden) @@ -214,7 +214,7 @@ static bool can_inline(ir_node *call, ir_graph *called_graph) * It is implementation dependent what happens in that case. * We support inlining, if the bitsize of the types matches AND * the same arithmetic is used. */ - for (i = n_params - 1; i >= 0; --i) { + for (i = 0; i < n_params; ++i) { ir_type *param_tp = get_method_param_type(called_type, i); ir_type *arg_tp = get_method_param_type(call_type, i); @@ -231,7 +231,7 @@ static bool can_inline(ir_node *call, ir_graph *called_graph) /* otherwise we can simply "reinterpret" the bits */ } } - for (i = n_res - 1; i >= 0; --i) { + for (i = 0; i < n_res; ++i) { ir_type *decl_res_tp = get_method_res_type(called_type, i); ir_type *used_res_tp = get_method_res_type(call_type, i); @@ -283,8 +283,8 @@ static void copy_frame_entities(ir_graph *from, ir_graph *to) { ir_type *from_frame = get_irg_frame_type(from); ir_type *to_frame = get_irg_frame_type(to); - int n_members = get_class_n_members(from_frame); - int i; + size_t n_members = get_class_n_members(from_frame); + size_t i; assert(from_frame != to_frame); for (i = 0; i < n_members; ++i) { @@ -343,10 +343,8 @@ int inline_method(ir_node *call, ir_graph *called_graph) assert(get_irg_phase_state(irg) != phase_building); assert(get_irg_pinned(irg) == op_pin_state_pinned); assert(get_irg_pinned(called_graph) == op_pin_state_pinned); - set_irg_outs_inconsistent(irg); set_irg_extblk_inconsistent(irg); set_irg_doms_inconsistent(irg); - set_irg_loopinfo_inconsistent(irg); set_irg_callee_info_state(irg, irg_callee_info_inconsistent); set_irg_entity_usage_state(irg, ir_entity_usage_not_computed); edges_deactivate(irg); @@ -393,7 +391,6 @@ int inline_method(ir_node *call, ir_graph *called_graph) in[pn_Start_M] = get_Call_mem(call); in[pn_Start_X_initial_exec] = new_r_Jmp(post_bl); in[pn_Start_P_frame_base] = get_irg_frame(irg); - in[pn_Start_P_tls] = get_irg_tls(irg); in[pn_Start_T_args] = new_r_Tuple(post_bl, n_params, args_in); pre_call = new_r_Tuple(post_bl, pn_Start_max, in); post_call = call; @@ -415,7 +412,6 @@ int inline_method(ir_node *call, ir_graph *called_graph) { ir_node *start_block; ir_node *start; - ir_node *bad; ir_node *nomem; start_block = get_irg_start_block(called_graph); @@ -426,10 +422,6 @@ int inline_method(ir_node *call, ir_graph *called_graph) set_new_node(start, pre_call); mark_irn_visited(start); - bad = get_irg_bad(called_graph); - set_new_node(bad, get_irg_bad(irg)); - mark_irn_visited(bad); - nomem = get_irg_no_mem(called_graph); set_new_node(nomem, get_irg_no_mem(irg)); mark_irn_visited(nomem); @@ -538,10 +530,9 @@ int inline_method(ir_node *call, ir_graph *called_graph) } } if (n_ret > 0) { - ir_mode *mode = get_irn_mode(cf_pred[0]); - phi = new_r_Phi(post_bl, n_ret, cf_pred, mode); + phi = new_r_Phi(post_bl, n_ret, cf_pred, res_mode); } else { - phi = new_r_Bad(irg); + phi = new_r_Bad(irg, res_mode); } res_pred[j] = phi; /* Conserve Phi-list for further inlinings -- but might be optimized */ @@ -553,14 +544,11 @@ int inline_method(ir_node *call, ir_graph *called_graph) result_tuple = new_r_Tuple(post_bl, n_res, res_pred); set_Tuple_pred(call, pn_Call_T_result, result_tuple); } else { - set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg)); + set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T)); } /* handle the regular call */ set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(post_bl)); - /* For now, we cannot inline calls with value_base */ - set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg)); - /* Finally the exception control flow. We have two possible situations: First if the Call branches to an exception handler: @@ -592,7 +580,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) set_Tuple_pred(call, pn_Call_X_except, new_r_Jmp(block)); } } else { - set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg)); + set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X)); } } else { ir_node *main_end_bl; @@ -619,7 +607,7 @@ int inline_method(ir_node *call, ir_graph *called_graph) for (i = 0; i < n_exc; ++i) end_preds[main_end_bl_arity + i] = cf_pred[i]; set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds); - set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg)); + set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X)); free(end_preds); } free(res_pred); @@ -1154,7 +1142,7 @@ void inline_leave_functions(unsigned maxsize, unsigned leavesize, /* call was inlined, Phi/Projs for current graph must be recomputed */ phiproj_computed = 0; - /* callee was inline. Append it's call list. */ + /* callee was inline. Append its call list. */ env->got_inline = 1; --env->n_call_nodes; append_call_list(env, callee_env, entry->loop_depth); @@ -1318,7 +1306,9 @@ static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg) { ir_entity *ent = get_irg_entity(irg); ir_type *mtp; - int nparams, i, proj_nr; + size_t nparams; + int i; + long proj_nr; ir_node *irg_args, *arg; mtp = get_entity_type(ent); @@ -1345,17 +1335,12 @@ static void analyze_irg_local_weights(inline_irg_env *env, ir_graph *irg) * After inlining, the local variable might be transformed into a * SSA variable by scalar_replacement(). */ -static unsigned get_method_local_adress_weight(ir_graph *callee, int pos) +static unsigned get_method_local_adress_weight(ir_graph *callee, size_t pos) { inline_irg_env *env = (inline_irg_env*)get_irg_link(callee); - if (env->local_weights != NULL) { - if (pos < ARR_LEN(env->local_weights)) - return env->local_weights[pos]; - return 0; - } - - analyze_irg_local_weights(env, callee); + if (env->local_weights == NULL) + analyze_irg_local_weights(env, callee); if (pos < ARR_LEN(env->local_weights)) return env->local_weights[pos]; @@ -1650,6 +1635,7 @@ static void inline_into(ir_graph *irg, unsigned maxsize, set_irg_link(copy, callee_env); assure_cf_loop(copy); + memset(&wenv, 0, sizeof(wenv)); wenv.x = callee_env; wenv.ignore_callers = 1; irg_walk_graph(copy, NULL, collect_calls2, &wenv); @@ -1682,7 +1668,7 @@ static void inline_into(ir_graph *irg, unsigned maxsize, /* remove it from the caller list */ list_del(&curr_call->list); - /* callee was inline. Append it's call list. */ + /* callee was inline. Append its call list. */ env->got_inline = 1; --env->n_call_nodes;