{
ir_node *pre_call;
ir_node *post_call, *post_bl;
- ir_node *in[pn_Start_max];
+ ir_node *in[pn_Start_max+1];
ir_node *end, *end_bl, *block;
ir_node **res_pred;
ir_node **cf_pred;
assert(get_irg_phase_state(irg) != phase_building);
assert(get_irg_pinned(irg) == op_pin_state_pinned);
assert(get_irg_pinned(called_graph) == op_pin_state_pinned);
- set_irg_outs_inconsistent(irg);
set_irg_extblk_inconsistent(irg);
set_irg_doms_inconsistent(irg);
- set_irg_loopinfo_inconsistent(irg);
set_irg_callee_info_state(irg, irg_callee_info_inconsistent);
set_irg_entity_usage_state(irg, ir_entity_usage_not_computed);
edges_deactivate(irg);
in[pn_Start_X_initial_exec] = new_r_Jmp(post_bl);
in[pn_Start_P_frame_base] = get_irg_frame(irg);
in[pn_Start_T_args] = new_r_Tuple(post_bl, n_params, args_in);
- pre_call = new_r_Tuple(post_bl, pn_Start_max, in);
+ pre_call = new_r_Tuple(post_bl, pn_Start_max+1, in);
post_call = call;
/* --
{
ir_node *start_block;
ir_node *start;
- ir_node *bad;
ir_node *nomem;
start_block = get_irg_start_block(called_graph);
set_new_node(start, pre_call);
mark_irn_visited(start);
- bad = get_irg_bad(called_graph);
- set_new_node(bad, get_irg_bad(irg));
- mark_irn_visited(bad);
-
nomem = get_irg_no_mem(called_graph);
set_new_node(nomem, get_irg_no_mem(irg));
mark_irn_visited(nomem);
/* build a Tuple for all results of the method.
* add Phi node if there was more than one Return. */
- turn_into_tuple(post_call, pn_Call_max);
+ turn_into_tuple(post_call, pn_Call_max+1);
/* First the Memory-Phi */
n_mem_phi = 0;
for (i = 0; i < arity; i++) {
}
}
if (n_ret > 0) {
- ir_mode *mode = get_irn_mode(cf_pred[0]);
- phi = new_r_Phi(post_bl, n_ret, cf_pred, mode);
+ phi = new_r_Phi(post_bl, n_ret, cf_pred, res_mode);
} else {
- phi = new_r_Bad(irg);
+ phi = new_r_Bad(irg, res_mode);
}
res_pred[j] = phi;
/* Conserve Phi-list for further inlinings -- but might be optimized */
result_tuple = new_r_Tuple(post_bl, n_res, res_pred);
set_Tuple_pred(call, pn_Call_T_result, result_tuple);
} else {
- set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg));
+ set_Tuple_pred(call, pn_Call_T_result, new_r_Bad(irg, mode_T));
}
/* handle the regular call */
set_Tuple_pred(call, pn_Call_X_regular, new_r_Jmp(post_bl));
set_Tuple_pred(call, pn_Call_X_except, new_r_Jmp(block));
}
} else {
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+ set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
}
} else {
ir_node *main_end_bl;
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg));
+ set_Tuple_pred(call, pn_Call_X_except, new_r_Bad(irg, mode_X));
free(end_preds);
}
free(res_pred);
set_irg_link(copy, callee_env);
assure_cf_loop(copy);
+ memset(&wenv, 0, sizeof(wenv));
wenv.x = callee_env;
wenv.ignore_callers = 1;
irg_walk_graph(copy, NULL, collect_calls2, &wenv);
* but we need Call nodes in our graph. Luckily the inliner leaves
* this information in the link field. */
new_call = (ir_node*)get_irn_link(centry->call);
+ if (get_irn_irg(new_call) != irg) {
+ /* centry->call has not been copied, which means it is dead.
+ * This might happen during inlining, if a const function,
+ * which cannot be inlined is only used as an unused argument
+ * of another function, which is inlined. */
+ continue;
+ }
assert(is_Call(new_call));
new_entry = duplicate_call_entry(centry, new_call, loop_depth);