const arch_register_t *reg;
entity *stack_ent;
unsigned alignment;
+ unsigned space_before;
+ unsigned space_after;
} be_abi_call_arg_t;
struct _be_abi_call_t {
call->cb = cb;
}
-void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment)
+void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
{
be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
- arg->on_stack = 1;
- arg->alignment = alignment;
+ arg->on_stack = 1;
+ arg->alignment = alignment;
+ arg->space_before = space_before;
+ arg->space_after = space_after;
assert(alignment > 0 && "Alignment must be greater than 0");
}
ir_mode *mach_mode = sp->reg_class->mode;
struct obstack *obst = &env->obst;
ir_node *no_mem = get_irg_no_mem(irg);
+ int no_alloc = call->flags.bits.frame_is_setup_on_call;
ir_node *res_proj = NULL;
int curr_res_proj = pn_Call_max;
be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
assert(arg);
if(arg->on_stack) {
+ stack_size += arg->space_before;
+ stack_size = round_up2(stack_size, arg->alignment);
stack_size += get_type_size_bytes(get_method_param_type(mt, i));
+ stack_size += arg->space_after;
obstack_int_grow(obst, i);
n_pos++;
}
/* If there are some parameters which shall be passed on the stack. */
if(n_pos > 0) {
int curr_ofs = 0;
- int do_seq = call->flags.bits.store_args_sequential;
+ int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
/* Reverse list of stack parameters if call arguments are from left to right */
if(call->flags.bits.left_to_right) {
/*
* If the stack is decreasing and we do not want to store sequentially,
+ * or someone else allocated the call frame
* we allocate as much space on the stack all parameters need, by
* moving the stack pointer along the stack's direction.
*/
- if(stack_dir < 0 && !do_seq) {
- curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_along);
+ if(stack_dir < 0 && !do_seq && !no_alloc) {
+ curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_expand);
}
assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
for(i = 0; i < n_pos; ++i) {
- int p = pos[i];
- ir_node *param = get_Call_param(irn, p);
- ir_node *addr = curr_sp;
- ir_node *mem = NULL;
- type *param_type = get_method_param_type(mt, p);
- int param_size = get_type_size_bytes(param_type);
+ int p = pos[i];
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
+ ir_node *param = get_Call_param(irn, p);
+ ir_node *addr = curr_sp;
+ ir_node *mem = NULL;
+ type *param_type = get_method_param_type(mt, p);
+ int param_size = get_type_size_bytes(param_type) + arg->space_after;
+
+ curr_ofs += arg->space_before;
+ curr_ofs = round_up2(curr_ofs, arg->alignment);
/* Make the expression to compute the argument's offset. */
if(curr_ofs > 0) {
mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
}
- /* Make a memcopy for compound arguments. */
+ /* Make a mem copy for compound arguments. */
else {
assert(mode_is_reference(get_irn_mode(param)));
mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
*/
if(do_seq) {
curr_ofs = 0;
- curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, param_size, be_stack_dir_along);
+ curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, param_size, be_stack_dir_expand);
curr_mem = mem;
}
}
unspeakable Proj_T from the Call. Therefore, all real argument
Proj numbers must be increased by pn_Call_max
*/
- proj += pn_Call_max;
+ proj += pn_Call_max;
set_Proj_proj(res, proj);
obstack_ptr_grow(obst, res);
be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
}
- else
+ else
low_call = be_new_Call(irg, bl, curr_mem, curr_sp, call_ptr, curr_res_proj + pset_count(caller_save), n_low_args, in);
+ set_irn_dbg_info(low_call, get_irn_dbg_info(irn));
+
/*
TODO:
Set the register class of the call address to the same as the stack pointer's.
if(!mem_proj)
mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
- /* Make a Proj for the stack pointer. */
- curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_against);
+ /* Clean up the stack frame if we allocated it */
+ if(!no_alloc)
+ curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_shrink);
}
be_abi_call_free(call);
*/
static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
{
- if(get_Alloc_where(alloc) == stack_alloc) {
+ if (get_Alloc_where(alloc) == stack_alloc) {
ir_node *bl = get_nodes_block(alloc);
ir_graph *irg = get_irn_irg(bl);
ir_node *alloc_mem = NULL;
}
}
+ /* TODO: Beware: currently Alloc nodes without a result might happen,
+ only escape analysis kills them and this phase runs only for object
+ oriented source. So this must be fixed. */
assert(alloc_res != NULL);
exchange(alloc_res, env->isa->stack_dir < 0 ? new_alloc : curr_sp);
int stack_nr = get_Proj_proj(stack);
if(flags.try_omit_fp) {
- stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
+ stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
frame = stack;
}
arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
}
- stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_along);
+ stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
}
be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
pmap_entry *ent;
if(env->call->flags.bits.try_omit_fp) {
- stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_against);
+ stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
}
else {
if(arg->on_stack) {
snprintf(buf, sizeof(buf), "param_%d", i);
arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
+ ofs += arg->space_before;
ofs = round_up2(ofs, arg->alignment);
set_entity_offset_bytes(arg->stack_ent, ofs);
+ ofs += arg->space_after;
ofs += get_type_size_bytes(param_type);
}
}
static void create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
{
ir_graph *irg = env->birg->irg;
- int i, n;
+ int n;
int n_regs = pmap_count(regs);
ir_node *irn;
ir_node **in;
rm = reg_map_to_arr(&env->obst, regs);
- for(i = 0, n = 0; i < n_regs; ++i, ++n)
- obstack_ptr_grow(&env->obst, rm[i].irn);
+ for(n = 0; n < n_regs; ++n)
+ obstack_ptr_grow(&env->obst, rm[n].irn);
if(mem) {
obstack_ptr_grow(&env->obst, *mem);
ir_node *proj;
const arch_register_t *reg = rm[n].reg;
- proj = new_r_Proj(env->birg->irg, bl, irn, get_irn_mode(rm[i].irn), n);
+ proj = new_r_Proj(env->birg->irg, bl, irn, get_irn_mode(rm[n].irn), n);
be_node_set_reg_class(irn, n, reg->reg_class);
if(in_req)
be_set_constr_single_reg(irn, n, reg);
create_barrier(env, bl, &mem, env->regs, 0);
env->init_sp = be_abi_reg_map_get(env->regs, sp);
- env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
+ env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
be_abi_reg_map_set(env->regs, sp, env->init_sp);
- frame_pointer = be_abi_reg_map_get(env->regs, sp);
+ frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
set_irg_frame(irg, frame_pointer);
/* Now, introduce stack param nodes for all parameters passed on the stack */
}
/* The in array for the new back end return is now ready. */
- ret = be_new_Return(irg, bl, n, in);
+ ret = be_new_Return(get_irn_dbg_info(irn), irg, bl, n, in);
/* Set the register classes of the return's parameter accordingly. */
for(i = 0; i < n; ++i)
}
}
+ del_pset(dont_save);
obstack_free(&env->obst, args);
}
env->dce_survivor = new_survive_dce();
env->birg = birg;
- env->dbg = firm_dbg_register("firm.be.abi");
env->stack_phis = pset_new_ptr(16);
env->init_sp = dummy = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
+ FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
}
/**
- * Translates a direction of an IncSP node (either be_stack_dir_against, or ...along)
+ * Translates a direction of an IncSP node (either be_stack_dir_shrink, or ...expand)
* into -1 or 1, respectively.
* @param irn The node.
* @return 1, if the direction of the IncSP was along, -1 if against.
*/
static int get_dir(ir_node *irn)
{
- return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_against);
+ return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_shrink);
}
static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)