* @param curr_sp The stack pointer node to use.
* @return The stack pointer after the call.
*/
-static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
+static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp, ir_node *alloca_copy)
{
ir_graph *irg = env->birg->irg;
const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
const arch_register_t *sp = arch_isa_sp(isa);
ir_mode *mach_mode = sp->reg_class->mode;
struct obstack *obst = &env->obst;
- ir_node *no_mem = get_irg_no_mem(irg);
int no_alloc = call->flags.bits.frame_is_setup_on_call;
ir_node *res_proj = NULL;
* moving the stack pointer along the stack's direction.
*/
if(stack_dir < 0 && !do_seq && !no_alloc) {
- curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_expand);
+ curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size);
+ if(alloca_copy) {
+ add_irn_dep(curr_sp, alloca_copy);
+ alloca_copy = NULL;
+ }
+ }
+
+ if(!do_seq) {
+ obstack_ptr_grow(obst, get_Call_mem(irn));
+ curr_mem = new_NoMem();
+ } else {
+ curr_mem = get_Call_mem(irn);
}
assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
*/
if (do_seq) {
curr_ofs = 0;
- addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, curr_mem,
- param_size + arg->space_before, be_stack_dir_expand);
+ addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before);
+ if(alloca_copy) {
+ add_irn_dep(curr_sp, alloca_copy);
+ alloca_copy = NULL;
+ }
+ add_irn_dep(curr_sp, curr_mem);
}
else {
curr_ofs += arg->space_before;
/* Insert a store for primitive arguments. */
if (is_atomic_type(param_type)) {
- mem = new_r_Store(irg, bl, curr_mem, addr, param);
- mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
+ ir_node *store;
+ store = new_r_Store(irg, bl, curr_mem, addr, param);
+ mem = new_r_Proj(irg, bl, store, mode_M, pn_Store_M);
}
/* Make a mem copy for compound arguments. */
else {
+ ir_node *copy;
+
assert(mode_is_reference(get_irn_mode(param)));
- mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
- mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
+ copy = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
+ mem = new_r_Proj(irg, bl, copy, mode_M, pn_CopyB_M_regular);
}
curr_ofs += param_size;
in = (ir_node **) obstack_finish(obst);
/* We need the sync only, if we didn't build the stores sequentially. */
- if(!do_seq)
- curr_mem = new_r_Sync(irg, bl, n_pos, in);
+ if(!do_seq) {
+ if(n_pos >= 1) {
+ curr_mem = new_r_Sync(irg, bl, n_pos + 1, in);
+ } else {
+ curr_mem = get_Call_mem(irn);
+ }
+ }
obstack_free(obst, in);
}
}
}
- if(!mem_proj)
+ if(!mem_proj) {
mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
+ keep_alive(mem_proj);
+ }
/* Clean up the stack frame if we allocated it */
- if(!no_alloc)
- curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_shrink);
+ if(!no_alloc) {
+ curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size);
+ add_irn_dep(curr_sp, mem_proj);
+ if(alloca_copy) {
+ add_irn_dep(curr_sp, alloca_copy);
+ alloca_copy = NULL;
+ }
+ }
}
be_abi_call_free(call);
* Adjust an alloca.
* The alloca is transformed into a back end alloca node and connected to the stack nodes.
*/
-static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
+static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp, ir_node **result_copy)
{
if (get_Alloc_where(alloc) == stack_alloc) {
ir_node *bl = get_nodes_block(alloc);
const ir_edge_t *edge;
ir_node *new_alloc;
+ ir_node *addr;
+ ir_node *copy;
foreach_out_edge(alloc, edge) {
ir_node *irn = get_edge_src_irn(edge);
/* Beware: currently Alloc nodes without a result might happen,
only escape analysis kills them and this phase runs only for object
oriented source. We kill the Alloc here. */
- if (alloc_res == NULL) {
+ if (alloc_res == NULL && alloc_mem) {
exchange(alloc_mem, get_Alloc_mem(alloc));
return curr_sp;
}
env->call->flags.bits.try_omit_fp = 0;
new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
- exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
+ exchange(alloc, new_alloc);
if(alloc_mem != NULL)
- exchange(alloc_mem, new_r_NoMem(irg));
+ set_Proj_proj(alloc_mem, pn_be_AddSP_M);
+
+ /* fix projnum of alloca res */
+ set_Proj_proj(alloc_res, pn_be_AddSP_res);
- curr_sp = new_alloc;
+ addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp;
+
+ /* copy the address away, since it could be used after further stack pointer modifications. */
+ /* Let it point curr_sp just for the moment, I'll reroute it in a second. */
+ *result_copy = copy = be_new_Copy(env->isa->sp->reg_class, irg, bl, curr_sp);
+
+ /* Let all users of the Alloc() result now point to the copy. */
+ edges_reroute(alloc_res, copy, irg);
+
+ /* Rewire the copy appropriately. */
+ set_irn_n(copy, be_pos_Copy_op, addr);
+
+ curr_sp = alloc_res;
}
return curr_sp;
*/
static void link_calls_in_block_walker(ir_node *irn, void *data)
{
- if(is_Call(irn)) {
+ if(is_Call(irn) || (get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)) {
be_abi_irg_t *env = data;
ir_node *bl = get_nodes_block(irn);
void *save = get_irn_link(bl);
- env->call->flags.bits.irg_is_leaf = 0;
+ if (is_Call(irn))
+ env->call->flags.bits.irg_is_leaf = 0;
set_irn_link(irn, save);
set_irn_link(bl, irn);
if(n > 0) {
ir_node *keep;
ir_node **nodes;
+ ir_node *copy = NULL;
int i;
nodes = obstack_finish(&env->obst);
DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
switch(get_irn_opcode(irn)) {
case iro_Call:
- curr_sp = adjust_call(env, irn, curr_sp);
+ curr_sp = adjust_call(env, irn, curr_sp, copy);
break;
case iro_Alloc:
- curr_sp = adjust_alloc(env, irn, curr_sp);
+ curr_sp = adjust_alloc(env, irn, curr_sp, ©);
break;
default:
break;
int stack_nr = get_Proj_proj(stack);
if(flags.try_omit_fp) {
- stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
+ stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE_EXPAND);
frame = stack;
}
arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
}
- stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
+ stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE_EXPAND);
}
be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
pmap_entry *ent;
if(env->call->flags.bits.try_omit_fp) {
- stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
+ stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, -BE_STACK_FRAME_SIZE_SHRINK);
}
else {
ir_graph *irg = env->birg->irg;
ir_node *bl = get_irg_start_block(irg);
ir_node *end = get_irg_end_block(irg);
- ir_node *no_mem = get_irg_no_mem(irg);
ir_node *mem = get_irg_initial_mem(irg);
ir_type *method_type = get_entity_type(get_irg_entity(irg));
pset *dont_save = pset_new_ptr(8);
/* do the stack allocation BEFORE the barrier, or spill code
might be added before it */
env->init_sp = be_abi_reg_map_get(env->regs, sp);
- env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
+ env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND);
be_abi_reg_map_set(env->regs, sp, env->init_sp);
barrier = create_barrier(env, bl, &mem, env->regs, 0);
restore_optimization_state(&state);
FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
- env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
-
memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
env->irn_ops.impl = &abi_irn_ops;
/* Lower all call nodes in the IRG. */
process_calls(env);
+ /*
+ Beware: init backend abi call object after processing calls,
+ otherwise some information might be not yet available.
+ */
+ env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
+
/* Process the IRG */
modify_irg(env);
{
struct fix_stack_walker_info *info = data;
- if(arch_irn_is(info->aenv, irn, modify_sp))
+ if (is_Block(irn))
+ return;
+
+ if (arch_irn_is(info->aenv, irn, modify_sp)) {
+ assert(get_irn_mode(irn) != mode_M && get_irn_mode(irn) != mode_T);
pset_insert_ptr(info->nodes, irn);
+ }
}
void be_abi_fix_stack_nodes(be_abi_irg_t *env, be_lv_t *lv)
be_free_dominance_frontiers(df);
}
-/**
- * Translates a direction of an IncSP node (either be_stack_dir_shrink, or ...expand)
- * into -1 or 1, respectively.
- * @param irn The node.
- * @return 1, if the direction of the IncSP was along, -1 if against.
- */
-static int get_dir(ir_node *irn)
-{
- return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_shrink);
-}
-
static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
{
- const arch_env_t *aenv = env->birg->main_env->arch_env;
+ const arch_env_t *arch_env = env->birg->main_env->arch_env;
int omit_fp = env->call->flags.bits.try_omit_fp;
ir_node *irn;
sched_foreach(bl, irn) {
/*
- If the node modifies the stack pointer by a constant offset,
- record that in the bias.
- */
- if(be_is_IncSP(irn)) {
- int ofs = be_get_IncSP_offset(irn);
- int dir = get_dir(irn);
-
- if(ofs == BE_STACK_FRAME_SIZE) {
- ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
- be_set_IncSP_offset(irn, ofs);
- }
-
- if(omit_fp)
- bias += dir * ofs;
+ Check, if the node relates to an entity on the stack frame.
+ If so, set the true offset (including the bias) for that
+ node.
+ */
+ entity *ent = arch_get_frame_entity(arch_env, irn);
+ if(ent) {
+ int offset = get_stack_entity_offset(env->frame, ent, bias);
+ arch_set_frame_offset(arch_env, irn, offset);
+ DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n", ent, offset, bias));
}
/*
- Else check, if the node relates to an entity on the stack frame.
- If so, set the true offset (including the bias) for that
- node.
- */
- else {
- entity *ent = arch_get_frame_entity(aenv, irn);
- if(ent) {
- int offset = get_stack_entity_offset(env->frame, ent, bias);
- arch_set_frame_offset(aenv, irn, offset);
- DBG((env->dbg, LEVEL_2, "%F has offset %d\n", ent, offset));
+ If the node modifies the stack pointer by a constant offset,
+ record that in the bias.
+ */
+ if(arch_irn_is(arch_env, irn, modify_sp)) {
+ int ofs = arch_get_sp_bias(arch_env, irn);
+
+ if(be_is_IncSP(irn)) {
+ if(ofs == BE_STACK_FRAME_SIZE_EXPAND) {
+ ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
+ be_set_IncSP_offset(irn, ofs);
+ } else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) {
+ ofs = - get_type_size_bytes(get_irg_frame_type(env->birg->irg));
+ be_set_IncSP_offset(irn, ofs);
+ }
}
+
+ if(omit_fp)
+ bias += ofs;
}
}
return NULL;
}
-static void abi_set_stack_bias(const void *_self, ir_node *irn, int bias)
+static void abi_set_frame_entity(const void *_self, ir_node *irn, entity *ent)
+{
+}
+
+static void abi_set_frame_offset(const void *_self, ir_node *irn, int bias)
{
}
+static int abi_get_sp_bias(const void *self, const ir_node *irn)
+{
+ return 0;
+}
+
static const arch_irn_ops_if_t abi_irn_ops = {
abi_get_irn_reg_req,
abi_set_irn_reg,
abi_classify,
abi_get_flags,
abi_get_frame_entity,
- abi_set_stack_bias
+ abi_set_frame_entity,
+ abi_set_frame_offset,
+ abi_get_sp_bias,
+ NULL, /* get_inverse */
+ NULL, /* get_op_estimated_cost */
+ NULL, /* possible_memory_operand */
+ NULL, /* perform_memory_operand */
};
static const arch_irn_handler_t abi_irn_handler = {