post_bl = get_nodes_Block(call);
set_irg_current_block(current_ir_graph, post_bl);
/* XxMxPxP of Start + parameter of Call */
- in[0] = new_Jmp();
- in[1] = get_Call_mem(call);
- in[2] = get_irg_frame(current_ir_graph);
- in[3] = get_irg_globals(current_ir_graph);
- in[4] = new_Tuple (get_Call_n_params(call), get_Call_param_arr(call));
+ in[pn_Start_X_initial_exec] = new_Jmp();
+ in[pn_Start_M] = get_Call_mem(call);
+ in[pn_Start_P_frame_base] = get_irg_frame(current_ir_graph);
+ in[pn_Start_P_globals] = get_irg_globals(current_ir_graph);
+ in[pn_Start_T_arg] = new_Tuple(get_Call_n_params(call), get_Call_param_arr(call));
+ /* in[pn_Start_P_value_arg_base] = ??? */
pre_call = new_Tuple(5, in);
post_call = call;
}
}
phi = new_Phi(n_ret, cf_pred, mode_M);
- set_Tuple_pred(call, 0, phi);
+ set_Tuple_pred(call, pn_Call_M_regular, phi);
/* Conserve Phi-list for further inlinings -- but might be optimized */
if (get_nodes_Block(phi) == post_bl) {
set_irn_link(phi, get_irn_link(post_bl));
for (j = 0; j < n_res; j++) {
n_ret = 0;
for (i = 0; i < arity; i++) {
- ret = intern_get_irn_n(end_bl, i);
- if (intern_get_irn_op(ret) == op_Return) {
- cf_pred[n_ret] = get_Return_res(ret, j);
- n_ret++;
- }
+ ret = intern_get_irn_n(end_bl, i);
+ if (intern_get_irn_op(ret) == op_Return) {
+ cf_pred[n_ret] = get_Return_res(ret, j);
+ n_ret++;
+ }
}
phi = new_Phi(n_ret, cf_pred, intern_get_irn_mode(cf_pred[0]));
res_pred[j] = phi;
/* Conserve Phi-list for further inlinings -- but might be optimized */
if (get_nodes_Block(phi) == post_bl) {
- set_irn_link(phi, get_irn_link(post_bl));
- set_irn_link(post_bl, phi);
+ set_irn_link(phi, get_irn_link(post_bl));
+ set_irn_link(post_bl, phi);
}
}
- set_Tuple_pred(call, 2, new_Tuple(n_res, res_pred));
+ set_Tuple_pred(call, pn_Call_T_result, new_Tuple(n_res, res_pred));
} else {
- set_Tuple_pred(call, 2, new_Bad());
+ set_Tuple_pred(call, pn_Call_T_result, new_Bad());
}
/* Finally the exception control flow.
We have two (three) possible situations:
}
if (n_exc > 0) {
new_Block(n_exc, cf_pred); /* watch it: current_block is changed! */
- set_Tuple_pred(call, 1, new_Jmp());
+ set_Tuple_pred(call, pn_Call_X_except, new_Jmp());
/* The Phi for the memories with the exception objects */
n_exc = 0;
for (i = 0; i < arity; i++) {
n_exc++;
}
}
- set_Tuple_pred(call, 3, new_Phi(n_exc, cf_pred, mode_M));
+ set_Tuple_pred(call, pn_Call_M_except, new_Phi(n_exc, cf_pred, mode_M));
} else {
- set_Tuple_pred(call, 1, new_Bad());
- set_Tuple_pred(call, 3, new_Bad());
+ set_Tuple_pred(call, pn_Call_X_except, new_Bad());
+ set_Tuple_pred(call, pn_Call_M_except, new_Bad());
}
} else {
ir_node *main_end_bl;
for (i = 0; i < n_exc; ++i)
end_preds[main_end_bl_arity + i] = cf_pred[i];
set_irn_in(main_end_bl, n_exc + main_end_bl_arity, end_preds);
- set_Tuple_pred(call, 1, new_Bad());
- set_Tuple_pred(call, 3, new_Bad());
+ set_Tuple_pred(call, pn_Call_X_except, new_Bad());
+ set_Tuple_pred(call, pn_Call_X_except, new_Bad());
free(end_preds);
}
free(res_pred);
/* Turn Div into a tuple (mem, bad, a) */
ir_node *mem = get_Div_mem(n);
turn_into_tuple(n, 3);
- set_Tuple_pred(n, 0, mem);
- set_Tuple_pred(n, 1, new_Bad());
- set_Tuple_pred(n, 2, a);
+ set_Tuple_pred(n, pn_Div_M, mem);
+ set_Tuple_pred(n, pn_Div_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_Div_res, a);
}
return n;
}
doesn't change the memory -- a write after read. */
a = get_Store_mem(n);
turn_into_tuple(n, 2);
- set_Tuple_pred(n, 0, a);
- set_Tuple_pred(n, 1, new_Bad()); DBG_OPT_WAR;
+ set_Tuple_pred(n, pn_Store_M, a);
+ set_Tuple_pred(n, pn_Store_X_except, new_Bad()); DBG_OPT_WAR;
}
return n;
}
ir_node *mem = get_Div_mem(n);
turn_into_tuple(n, 3);
- set_Tuple_pred(n, 0, mem);
- set_Tuple_pred(n, 1, new_Bad());
- set_Tuple_pred(n, 2, new_Const(get_tarval_mode(ta), ta));
+ set_Tuple_pred(n, pn_Div_M, mem);
+ set_Tuple_pred(n, pn_Div_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Div_res, new_Const(get_tarval_mode(ta), ta));
}
return n;
}
/* Turn Mod into a tuple (mem, bad, value) */
ir_node *mem = get_Mod_mem(n);
turn_into_tuple(n, 3);
- set_Tuple_pred(n, 0, mem);
- set_Tuple_pred(n, 1, new_Bad());
- set_Tuple_pred(n, 2, new_Const(get_tarval_mode(ta), ta));
+ set_Tuple_pred(n, pn_Mod_M, mem);
+ set_Tuple_pred(n, pn_Mod_X_except, new_Bad());
+ set_Tuple_pred(n, pn_Mod_res, new_Const(get_tarval_mode(ta), ta));
}
return n;
}
if (evaluated) { /* replace by tuple */
ir_node *mem = get_DivMod_mem(n);
turn_into_tuple(n, 4);
- set_Tuple_pred(n, 0, mem);
- set_Tuple_pred(n, 1, new_Bad()); /* no exception */
- set_Tuple_pred(n, 2, a);
- set_Tuple_pred(n, 3, b);
+ set_Tuple_pred(n, pn_DivMod_M, mem);
+ set_Tuple_pred(n, pn_DivMod_X_except, new_Bad()); /* no exception */
+ set_Tuple_pred(n, pn_DivMod_res_div, a);
+ set_Tuple_pred(n, pn_DivMod_res_mod, b);
assert(get_nodes_Block(n));
}
jmp = new_r_Jmp(current_ir_graph, get_nodes_Block(n));
turn_into_tuple(n, 2);
if (ta == tarval_b_true) {
- set_Tuple_pred(n, 0, new_Bad());
- set_Tuple_pred(n, 1, jmp);
+ set_Tuple_pred(n, pn_Cond_false, new_Bad());
+ set_Tuple_pred(n, pn_Cond_true, jmp);
} else {
- set_Tuple_pred(n, 0, jmp);
- set_Tuple_pred(n, 1, new_Bad());
+ set_Tuple_pred(n, pn_Cond_false, jmp);
+ set_Tuple_pred(n, pn_Cond_true, new_Bad());
}
/* We might generate an endless loop, so keep it alive. */
add_End_keepalive(get_irg_end(current_ir_graph), get_nodes_Block(n));