const ir_edge_t *edge;
ir_node *new_alloc;
+ ir_node *addr;
+ ir_node *copy;
foreach_out_edge(alloc, edge) {
ir_node *irn = get_edge_src_irn(edge);
/* Beware: currently Alloc nodes without a result might happen,
only escape analysis kills them and this phase runs only for object
oriented source. We kill the Alloc here. */
- if (alloc_res == NULL) {
+ if (alloc_res == NULL && alloc_mem) {
exchange(alloc_mem, get_Alloc_mem(alloc));
return curr_sp;
}
env->call->flags.bits.try_omit_fp = 0;
new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
- exchange(alloc, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
+ exchange(alloc, new_alloc);
if(alloc_mem != NULL)
- exchange(alloc_mem, new_r_NoMem(irg));
+ set_Proj_proj(alloc_mem, pn_be_AddSP_M);
/* fix projnum of alloca res */
- set_Proj_proj(alloc_res, 1);
+ set_Proj_proj(alloc_res, pn_be_AddSP_res);
+
+ addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp;
+
+ /* copy the address away, since it could be used after further stack pointer modifictions. */
+ /* Let it point curr_sp just for the moment, I'll reroute it in a second. */
+ copy = be_new_Copy(env->isa->sp->reg_class, irg, bl, curr_sp);
+
+ /* Let all users of the Alloc() result now point to the copy. */
+ edges_reroute(alloc_res, copy, irg);
+
+ /* Rewire the copy appropriately. */
+ set_irn_n(copy, be_pos_Copy_op, addr);
curr_sp = alloc_res;
}
restore_optimization_state(&state);
FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
- env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
-
memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
env->irn_ops.impl = &abi_irn_ops;
/* Lower all call nodes in the IRG. */
process_calls(env);
+ /*
+ Beware: init backend abi call object after processing calls,
+ otherwise some information might be not yet available.
+ */
+ env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
+
/* Process the IRG */
modify_irg(env);
static void collect_stack_nodes_walker(ir_node *irn, void *data)
{
struct fix_stack_walker_info *info = data;
+ ir_mode *mode;
+
+ if (is_Block(irn))
+ return;
+
+ mode = get_irn_mode(irn);
- if(arch_irn_is(info->aenv, irn, modify_sp))
+ if (arch_irn_is(info->aenv, irn, modify_sp) && mode != mode_T && mode != mode_M)
pset_insert_ptr(info->nodes, irn);
}
return NULL;
}
+static void abi_set_frame_entity(const void *_self, ir_node *irn, entity *ent)
+{
+}
+
static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
{
}
abi_classify,
abi_get_flags,
abi_get_frame_entity,
- abi_set_stack_bias
+ abi_set_frame_entity,
+ abi_set_stack_bias,
+ NULL, /* get_inverse */
+ NULL, /* get_op_estimated_cost */
+ NULL, /* possible_memory_operand */
+ NULL, /* perform_memory_operand */
};
static const arch_irn_handler_t abi_irn_handler = {