*
* @return the new ABI call object
*/
-static be_abi_call_t *be_abi_call_new()
+static be_abi_call_t *be_abi_call_new(void)
{
be_abi_call_t *call = xmalloc(sizeof(call[0]));
call->flags.val = 0;
const ir_edge_t *edge;
ir_node *new_alloc;
+ ir_node *addr;
+ ir_node *copy;
foreach_out_edge(alloc, edge) {
ir_node *irn = get_edge_src_irn(edge);
/* Beware: currently Alloc nodes without a result might happen,
only escape analysis kills them and this phase runs only for object
oriented source. We kill the Alloc here. */
- if (alloc_res == NULL) {
+ if (alloc_res == NULL && alloc_mem) {
exchange(alloc_mem, get_Alloc_mem(alloc));
return curr_sp;
}
env->call->flags.bits.try_omit_fp = 0;
new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
- exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
+ exchange(alloc, new_alloc);
if(alloc_mem != NULL)
- exchange(alloc_mem, new_r_NoMem(irg));
+ set_Proj_proj(alloc_mem, pn_be_AddSP_M);
- curr_sp = new_alloc;
+ /* fix projnum of alloca res */
+ set_Proj_proj(alloc_res, pn_be_AddSP_res);
+
+ addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp;
+
+ /* copy the address away, since it could be used after further stack pointer modifictions. */
+ /* Let it point curr_sp just for the moment, I'll reroute it in a second. */
+ copy = be_new_Copy(env->isa->sp->reg_class, irg, bl, curr_sp);
+
+ /* Let all users of the Alloc() result now point to the copy. */
+ edges_reroute(alloc_res, copy, irg);
+
+ /* Rewire the copy appropriately. */
+ set_irn_n(copy, be_pos_Copy_op, addr);
+
+ curr_sp = alloc_res;
}
return curr_sp;
static int dependent_on(ir_node *n1, ir_node *n2)
{
ir_node *bl = get_nodes_block(n1);
- ir_graph *irg = get_irn_irg(bl);
assert(bl == get_nodes_block(n2));
*/
static void link_calls_in_block_walker(ir_node *irn, void *data)
{
- if(is_Call(irn)) {
+ if(is_Call(irn) || (get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)) {
be_abi_irg_t *env = data;
ir_node *bl = get_nodes_block(irn);
void *save = get_irn_link(bl);
- env->call->flags.bits.irg_is_leaf = 0;
+ if (is_Call(irn))
+ env->call->flags.bits.irg_is_leaf = 0;
set_irn_link(irn, save);
set_irn_link(bl, irn);
/* clear SP entry, since it has already been grown. */
pmap_insert(reg_map, (void *) isa->sp, NULL);
for(i = 0; i < n_res; ++i) {
- ir_node *res = get_Return_res(irn, i);
be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
in[n] = be_abi_reg_map_get(reg_map, arg->reg);
restore_optimization_state(&state);
FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
- env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
-
memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
env->irn_ops.impl = &abi_irn_ops;
/* Lower all call nodes in the IRG. */
process_calls(env);
+ /*
+ Beware: init backend abi call object after processing calls,
+ otherwise some information might be not yet available.
+ */
+ env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
+
/* Process the IRG */
modify_irg(env);
arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
env->call->cb->done(env->cb);
- be_liveness(irg);
return env;
}
static void collect_stack_nodes_walker(ir_node *irn, void *data)
{
struct fix_stack_walker_info *info = data;
+ ir_mode *mode;
+
+ if (is_Block(irn))
+ return;
+
+ mode = get_irn_mode(irn);
- if(arch_irn_is(info->aenv, irn, modify_sp))
+ if (arch_irn_is(info->aenv, irn, modify_sp) && mode != mode_T && mode != mode_M)
pset_insert_ptr(info->nodes, irn);
}
-void be_abi_fix_stack_nodes(be_abi_irg_t *env)
+void be_abi_fix_stack_nodes(be_abi_irg_t *env, be_lv_t *lv)
{
dom_front_info_t *df;
pset *stack_nodes = pset_new_ptr(16);
df = be_compute_dominance_frontiers(env->birg->irg);
irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, &info);
pset_insert_ptr(stack_nodes, env->init_sp);
- be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis);
+ be_ssa_constr_set_phis(df, lv, stack_nodes, env->stack_phis);
del_pset(stack_nodes);
- /* Liveness could have changed due to Phi nodes. */
- be_liveness(env->birg->irg);
-
/* free these dominance frontiers */
be_free_dominance_frontiers(df);
}
return NULL;
}
+static void abi_set_frame_entity(const void *_self, ir_node *irn, entity *ent)
+{
+}
+
static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
{
}
abi_classify,
abi_get_flags,
abi_get_frame_entity,
- abi_set_stack_bias
+ abi_set_frame_entity,
+ abi_set_stack_bias,
+ NULL, /* get_inverse */
+ NULL, /* get_op_estimated_cost */
+ NULL, /* possible_memory_operand */
+ NULL, /* perform_memory_operand */
};
static const arch_irn_handler_t abi_irn_handler = {