/* access to value_type */
*allow_inline = false;
}
+ if (is_parameter_entity(ent)) {
+ *allow_inline = false;
+ }
}
} else if (is_Alloc(node) && get_Alloc_where(node) == stack_alloc) {
/* From GCC:
ir_entity *called = get_irg_entity(called_graph);
ir_type *called_type = get_entity_type(called);
ir_type *call_type = get_Call_type(call);
- int n_params = get_method_n_params(called_type);
- int n_arguments = get_method_n_params(call_type);
- int n_res = get_method_n_ress(called_type);
+ size_t n_params = get_method_n_params(called_type);
+ size_t n_arguments = get_method_n_params(call_type);
+ size_t n_res = get_method_n_ress(called_type);
irg_inline_property prop = get_irg_inline_property(called_graph);
- int i;
+ size_t i;
bool res;
if (prop == irg_inline_forbidden)
* It is implementation dependent what happens in that case.
* We support inlining, if the bitsize of the types matches AND
* the same arithmetic is used. */
- for (i = n_params - 1; i >= 0; --i) {
+ for (i = 0; i < n_params; ++i) {
ir_type *param_tp = get_method_param_type(called_type, i);
ir_type *arg_tp = get_method_param_type(call_type, i);
/* otherwise we can simply "reinterpret" the bits */
}
}
- for (i = n_res - 1; i >= 0; --i) {
+ for (i = 0; i < n_res; ++i) {
ir_type *decl_res_tp = get_method_res_type(called_type, i);
ir_type *used_res_tp = get_method_res_type(call_type, i);
{
ir_type *from_frame = get_irg_frame_type(from);
ir_type *to_frame = get_irg_frame_type(to);
- int n_members = get_class_n_members(from_frame);
- int i;
+ size_t n_members = get_class_n_members(from_frame);
+ size_t i;
assert(from_frame != to_frame);
for (i = 0; i < n_members; ++i) {
ir_entity *old_ent = get_class_member(from_frame, i);
ir_entity *new_ent = copy_entity_own(old_ent, to_frame);
set_entity_link(old_ent, new_ent);
+ assert (!is_parameter_entity(old_ent));
}
}
{
ir_node *pre_call;
ir_node *post_call, *post_bl;
- ir_node *in[pn_Start_max];
+ ir_node *in[pn_Start_max+1];
ir_node *end, *end_bl, *block;
ir_node **res_pred;
ir_node **cf_pred;
assert(get_irg_phase_state(irg) != phase_building);
assert(get_irg_pinned(irg) == op_pin_state_pinned);
assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
- set_irg_outs_inconsistent(irg);
- set_irg_extblk_inconsistent(irg);
- set_irg_doms_inconsistent(irg);
- set_irg_loopinfo_inconsistent(irg);
+ clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_DOMINANCE
+ | IR_GRAPH_STATE_VALID_EXTENDED_BLOCKS
+ | IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE);
set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
- set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
+ clear_irg_state(irg, IR_GRAPH_STATE_CONSISTENT_ENTITY_USAGE);
edges_deactivate(irg);
/* here we know we WILL inline, so inform the statistics */
in[pn_Start_M] = get_Call_mem(call);
in[pn_Start_X_initial_exec] = new_r_Jmp(post_bl);
in[pn_Start_P_frame_base] = get_irg_frame(irg);
- in[pn_Start_P_tls] = get_irg_tls(irg);
in[pn_Start_T_args] = new_r_Tuple(post_bl, n_params, args_in);
- pre_call = new_r_Tuple(post_bl, pn_Start_max, in);
+ pre_call = new_r_Tuple(post_bl, pn_Start_max+1, in);
post_call = call;
/* --
{
ir_node *start_block;
ir_node *start;
- ir_node *bad;
ir_node *nomem;
start_block = get_irg_start_block(called_graph);
set_new_node(start, pre_call);
mark_irn_visited(start);
- bad = get_irg_bad(called_graph);
- set_new_node(bad, get_irg_bad(irg));
- mark_irn_visited(bad);
-
nomem = get_irg_no_mem(called_graph);
set_new_node(nomem, get_irg_no_mem(irg));
mark_irn_visited(nomem);
/* entitiy link is used to link entities on old stackframe to the
* new stackframe */
- irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
+ irp_reserve_resources(irp, IRP_RESOURCE_ENTITY_LINK);
/* copy entities and nodes */
assert(!irn_visited(get_irg_end(called_graph)));
irg_walk_core(get_irg_end(called_graph), copy_node_inline, set_preds_inline,
irg);
- irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
+ irp_free_resources(irp, IRP_RESOURCE_ENTITY_LINK);
/* -- Merge the end of the inlined procedure with the call site -- */
/* We will turn the old Call node into a Tuple with the following
/* build a Tuple for all results of the method.
* add Phi node if there was more than one Return. */
- turn_into_tuple(post_call, pn_Call_max);
+ turn_into_tuple(post_call, pn_Call_max+1);
/* First the Memory-Phi */
n_mem_phi = 0;
for (i = 0; i < arity; i++) {
}
}
if (n_ret > 0) {
- ir_mode *mode = get_irn_mode(cf_pred[0]);
- phi = new_r_Phi(post_bl, n_ret, cf_pred, mode);
+ phi = new_r_Phi(post_bl, n_ret, cf_pred, res_mode);
} else {
- phi = new_r_Bad(irg);
+ phi = new_r_Bad(irg, res_mode);
}
res_pred[j] = phi;
/* Conserve Phi-list for further inlinings -- but might be optimized */
result_tuple = new_r_Tuple(post_bl, n_res, res_pred);
set_Tuple_pred(call, pn_Call_T_result, result_tuple);
} else {
- set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
+ set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
}
/* handle the regular call */
set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(post_bl));
- /* For now, we cannot inline calls with value_base */
- set_Tuple_pred(call, pn_Call_P_value_res_base, new_r_Bad(irg));
-
/* Finally the exception control flow.
We have two possible situations:
First if the Call branches to an exception handler:
set_Tuple_pred(call, pn_Call_X_except, new_r_Jmp(block));
}
} else {
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+ set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
}
} else {
ir_node *main_end_bl;
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+ set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
free(end_preds);
}
free(res_pred);
{
inline_irg_env *env;
ir_graph *irg;
- int i, n_irgs;
+ size_t i, n_irgs;
ir_graph *rem;
int did_inline;
wenv_t wenv;
assert(get_irg_phase_state(irg) != phase_building);
free_callee_info(irg);
- assure_cf_loop(irg);
+ assure_loopinfo(irg);
wenv.x = (inline_irg_env*)get_irg_link(irg);
irg_walk_graph(irg, NULL, collect_calls2, &wenv);
}
callee_env = alloc_inline_irg_env();
set_irg_link(copy, callee_env);
- assure_cf_loop(copy);
+ assure_loopinfo(copy);
wenv.x = callee_env;
wenv.ignore_callers = 1;
irg_walk_graph(copy, NULL, collect_calls2, &wenv);
/* call was inlined, Phi/Projs for current graph must be recomputed */
phiproj_computed = 0;
- /* callee was inline. Append it's call list. */
+ /* callee was inline. Append its call list. */
env->got_inline = 1;
--env->n_call_nodes;
append_call_list(env, callee_env, entry->loop_depth);
{
ir_entity *ent = get_irg_entity(irg);
ir_type *mtp;
- int nparams, i, proj_nr;
+ size_t nparams;
+ int i;
+ long proj_nr;
ir_node *irg_args, *arg;
mtp = get_entity_type(ent);
* After inlining, the local variable might be transformed into a
* SSA variable by scalar_replacement().
*/
-static unsigned get_method_local_adress_weight(ir_graph *callee, int pos)
+static unsigned get_method_local_adress_weight(ir_graph *callee, size_t pos)
{
inline_irg_env *env = (inline_irg_env*)get_irg_link(callee);
- if (env->local_weights != NULL) {
- if (pos < ARR_LEN(env->local_weights))
- return env->local_weights[pos];
- return 0;
- }
-
- analyze_irg_local_weights(env, callee);
+ if (env->local_weights == NULL)
+ analyze_irg_local_weights(env, callee);
if (pos < ARR_LEN(env->local_weights))
return env->local_weights[pos];
{
ir_node *call = entry->call;
ir_entity *ent = get_irg_entity(callee);
+ ir_type *callee_frame;
+ size_t i, n_members, n_params;
ir_node *frame_ptr;
ir_type *mtp;
int weight = 0;
- int i, n_params, all_const;
+ int all_const;
unsigned cc, v;
irg_inline_property prop;
return entry->benefice = INT_MIN;
}
+ callee_frame = get_irg_frame_type(callee);
+ n_members = get_class_n_members(callee_frame);
+ for (i = 0; i < n_members; ++i) {
+ ir_entity *frame_ent = get_class_member(callee_frame, i);
+ if (is_parameter_entity(frame_ent)) {
+ // TODO inliner should handle parameter entities by inserting Store operations
+ DB((dbg, LEVEL_2, "In %+F Call to %+F: inlining forbidden due to parameter entity\n", call, callee));
+ set_irg_inline_property(callee, irg_inline_forbidden);
+ return entry->benefice = INT_MIN;
+ }
+ }
+
if (get_irg_additional_properties(callee) & mtp_property_noreturn) {
DB((dbg, LEVEL_2, "In %+F Call to %+F: not inlining noreturn or weak\n",
call, callee));
cc = get_method_calling_convention(mtp);
if (cc & cc_reg_param) {
/* register parameter, smaller costs for register parameters */
- int max_regs = cc & ~cc_bits;
+ size_t max_regs = cc & ~cc_bits;
if (max_regs < n_params)
weight += max_regs * 2 + (n_params - max_regs) * 5;
return entry->benefice = weight;
}
-static ir_graph **irgs;
-static int last_irg;
+typedef struct walk_env_t {
+ ir_graph **irgs;
+ size_t last_irg;
+} walk_env_t;
/**
* Callgraph walker, collect all visited graphs.
*/
static void callgraph_walker(ir_graph *irg, void *data)
{
- (void) data;
- irgs[last_irg++] = irg;
+ walk_env_t *env = (walk_env_t *)data;
+ env->irgs[env->last_irg++] = irg;
}
/**
*/
static ir_graph **create_irg_list(void)
{
- ir_entity **free_methods;
- int n_irgs = get_irp_n_irgs();
+ ir_entity **free_methods;
+ size_t n_irgs = get_irp_n_irgs();
+ walk_env_t env;
cgana(&free_methods);
xfree(free_methods);
compute_callgraph();
- last_irg = 0;
- irgs = XMALLOCNZ(ir_graph*, n_irgs);
+ env.irgs = XMALLOCNZ(ir_graph*, n_irgs);
+ env.last_irg = 0;
- callgraph_walk(NULL, callgraph_walker, NULL);
- assert(n_irgs == last_irg);
+ callgraph_walk(NULL, callgraph_walker, &env);
+ assert(n_irgs == env.last_irg);
- return irgs;
+ return env.irgs;
}
/**
callee_env = alloc_inline_irg_env();
set_irg_link(copy, callee_env);
- assure_cf_loop(copy);
+ assure_loopinfo(copy);
+ memset(&wenv, 0, sizeof(wenv));
wenv.x = callee_env;
wenv.ignore_callers = 1;
irg_walk_graph(copy, NULL, collect_calls2, &wenv);
/* remove it from the caller list */
list_del(&curr_call->list);
- /* callee was inline. Append it's call list. */
+ /* callee was inline. Append its call list. */
env->got_inline = 1;
--env->n_call_nodes;
* but we need Call nodes in our graph. Luckily the inliner leaves
* this information in the link field. */
new_call = (ir_node*)get_irn_link(centry->call);
+ if (get_irn_irg(new_call) != irg) {
+ /* centry->call has not been copied, which means it is dead.
+ * This might happen during inlining, if a const function,
+ * which cannot be inlined is only used as an unused argument
+ * of another function, which is inlined. */
+ continue;
+ }
assert(is_Call(new_call));
new_entry = duplicate_call_entry(centry, new_call, loop_depth);
opt_ptr after_inline_opt)
{
inline_irg_env *env;
- int i, n_irgs;
+ size_t i, n_irgs;
ir_graph *rem;
wenv_t wenv;
pmap *copied_graphs;
free_callee_info(irg);
wenv.x = (inline_irg_env*)get_irg_link(irg);
- assure_cf_loop(irg);
+ assure_loopinfo(irg);
irg_walk_graph(irg, NULL, collect_calls2, &wenv);
}