arch_register_req_t sp_req;
arch_register_req_t sp_cls_req;
- DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
+ DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
};
static heights_t *ir_heights;
/* Flag: if set, try to omit the frame pointer if called by the backend */
static int be_omit_fp = 1;
-static int be_pic = 0;
/*
_ ____ ___ ____ _ _ _ _
and the spills.
*/
-static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent, int bias)
+static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent,
+ int bias)
{
ir_type *t = get_entity_owner(ent);
int ofs = get_entity_offset(ent);
static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
{
ir_graph *irg = env->birg->irg;
- const arch_env_t *arch_env = env->birg->main_env->arch_env;
+ const arch_env_t *arch_env = &env->birg->main_env->arch_env;
const arch_isa_t *isa = arch_env->isa;
ir_type *call_tp = get_Call_type(irn);
ir_node *call_ptr = get_Call_ptr(irn);
struct obstack *obst = &env->obst;
int no_alloc = call->flags.bits.frame_is_setup_on_call;
int n_res = get_method_n_ress(call_tp);
+ int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
ir_node *res_proj = NULL;
int n_reg_params = 0;
}
reg_param_idxs = obstack_finish(obst);
+ /*
+ * If the stack is decreasing and we do not want to store sequentially,
+ * or someone else allocated the call frame
+ * we allocate as much space on the stack all parameters need, by
+ * moving the stack pointer along the stack's direction.
+ *
+ * Note: we also have to do this for stack_size == 0, because we may have
+ * to adjust stack alignment for the call.
+ */
+ if (stack_dir < 0 && !do_seq && !no_alloc) {
+ curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size, 1);
+ }
+
/* If there are some parameters which shall be passed on the stack. */
if (n_stack_params > 0) {
int curr_ofs = 0;
- int do_seq = call->flags.bits.store_args_sequential && !no_alloc;
/*
* Reverse list of stack parameters if call arguments are from left to right.
}
}
- /*
- * If the stack is decreasing and we do not want to store sequentially,
- * or someone else allocated the call frame
- * we allocate as much space on the stack all parameters need, by
- * moving the stack pointer along the stack's direction.
- */
- if (stack_dir < 0 && !do_seq && !no_alloc) {
- curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size);
- }
-
curr_mem = get_Call_mem(irn);
if (! do_seq) {
obstack_ptr_grow(obst, curr_mem);
*/
if (do_seq) {
curr_ofs = 0;
- addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before);
+ addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before, 0);
add_irn_dep(curr_sp, curr_mem);
}
else {
mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_be_Call_M_regular);
keep_alive(mem_proj);
}
-
- /* Clean up the stack frame if we allocated it */
- if (! no_alloc) {
- curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size);
- }
+ }
+ /* Clean up the stack frame or revert alignment fixes if we allocated it */
+ if (! no_alloc) {
+ curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size, 0);
}
be_abi_call_free(call);
ir_graph *irg, ir_node *block, dbg_info *dbg)
{
if (stack_alignment > 1) {
- ir_mode *mode = get_irn_mode(size);
- tarval *tv = new_tarval_from_long(stack_alignment-1, mode);
- ir_node *mask = new_r_Const(irg, block, mode, tv);
+ ir_mode *mode;
+ tarval *tv;
+ ir_node *mask;
+
+ assert(is_po2(stack_alignment));
+ mode = get_irn_mode(size);
+ tv = new_tarval_from_long(stack_alignment-1, mode);
+ mask = new_r_Const(irg, block, mode, tv);
size = new_rd_Add(dbg, irg, block, size, mask, mode);
tv = new_tarval_from_long(-(long)stack_alignment, mode);
We cannot omit it. */
env->call->flags.bits.try_omit_fp = 0;
- /* FIXME: size must be here round up for the stack alignment, but
- this must be transmitted from the backend. */
- stack_alignment = 4;
+ stack_alignment = env->isa->stack_alignment;
size = adjust_alloc_size(stack_alignment, size, irg, block, dbg);
new_alloc = be_new_AddSP(env->isa->sp, irg, block, curr_sp, size);
set_irn_dbg_info(new_alloc, dbg);
size = get_Free_size(free);
}
- /* FIXME: size must be here round up for the stack alignment, but
- this must be transmitted from the backend. */
- stack_alignment = 4;
- size = adjust_alloc_size(stack_alignment, size, irg, block, dbg);
+ stack_alignment = env->isa->stack_alignment;
+ size = adjust_alloc_size(stack_alignment, size, irg, block, dbg);
/* The stack pointer will be modified in an unknown manner.
We cannot omit it. */
static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type, ir_entity ***param_map)
{
int dir = env->call->flags.bits.left_to_right ? 1 : -1;
- int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
+ int inc = env->birg->main_env->arch_env.isa->stack_dir * dir;
int n = get_method_n_params(method_type);
int curr = inc > 0 ? 0 : n - 1;
int ofs = 0;
be_set_constr_single_reg(irn, n, reg);
be_set_constr_single_reg(irn, pos, reg);
be_node_set_reg_class(irn, pos, reg->reg_class);
- arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
+ arch_set_irn_register(&env->birg->main_env->arch_env, proj, reg);
/* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */
- if(arch_register_type_is(reg, ignore) || arch_irn_is(env->birg->main_env->arch_env, in[n], ignore))
+ if(arch_register_type_is(reg, ignore) || arch_irn_is(&env->birg->main_env->arch_env, in[n], ignore))
flags |= arch_irn_flags_ignore;
- if(arch_irn_is(env->birg->main_env->arch_env, in[n], modify_sp))
+ if(arch_irn_is(&env->birg->main_env->arch_env, in[n], modify_sp))
flags |= arch_irn_flags_modify_sp;
be_node_set_flags(irn, pos, flags);
static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
ir_node *mem, int n_res)
{
- be_abi_call_t *call = env->call;
- const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
+ be_abi_call_t *call = env->call;
+ const arch_isa_t *isa = env->birg->main_env->arch_env.isa;
dbg_info *dbgi;
pmap *reg_map = pmap_create();
ir_node *keep = pmap_get(env->keep_map, bl);
static void modify_irg(be_abi_irg_t *env)
{
be_abi_call_t *call = env->call;
- const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
+ const arch_isa_t *isa = env->birg->main_env->arch_env.isa;
const arch_register_t *sp = arch_isa_sp(isa);
ir_graph *irg = env->birg->irg;
ir_node *bl = get_irg_start_block(irg);
proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
pmap_insert(env->regs, (void *) reg, proj);
be_set_constr_single_reg(env->reg_params, pos, reg);
- arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
+ arch_set_irn_register(&env->birg->main_env->arch_env, proj, reg);
/*
* If the register is an ignore register,
/* do the stack allocation BEFORE the barrier, or spill code
might be added before it */
env->init_sp = be_abi_reg_map_get(env->regs, sp);
- env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND);
+ env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
be_abi_reg_map_set(env->regs, sp, env->init_sp);
create_barrier(env, bl, &mem, env->regs, 0);
env->init_sp = be_abi_reg_map_get(env->regs, sp);
- arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
+ arch_set_irn_register(&env->birg->main_env->arch_env, env->init_sp, sp);
frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
set_irg_frame(irg, frame_pointer);
arity = get_irn_arity(call);
- /* the statereg inputs are the last n inputs of the calls */
+ /* the state reg inputs are the last n inputs of the calls */
for(s = 0; s < n_states; ++s) {
int inp = arity - n_states + s;
const arch_register_t *reg = stateregs[s];
}
}
+/**
+ * Create a trampoline entity for the given method.
+ */
static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
{
ir_type *type = get_entity_type(method);
return ent;
}
+/**
+ * Returns the trampoline entity for the given method.
+ */
+static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
+{
+ ir_entity *result = pmap_get(env->ent_trampoline_map, method);
+ if (result == NULL) {
+ result = create_trampoline(env, method);
+ pmap_insert(env->ent_trampoline_map, method, result);
+ }
+
+ return result;
+}
+
+/**
+ * Returns non-zero if a given entity can be accessed using a relative address.
+ */
static int can_address_relative(ir_entity *entity)
{
return get_entity_variability(entity) == variability_initialized
continue;
dbgi = get_irn_dbg_info(pred);
- trampoline = create_trampoline(be, entity);
+ trampoline = get_trampoline(be, entity);
trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code, trampoline, NULL);
set_irn_n(node, i, trampoline_const);
continue;
unsigned *limited_bitset;
be_omit_fp = birg->main_env->options->omit_fp;
- be_pic = birg->main_env->options->pic;
obstack_init(&env->obst);
- env->isa = birg->main_env->arch_env->isa;
+ env->isa = birg->main_env->arch_env.isa;
env->method_type = get_entity_type(get_irg_entity(irg));
env->call = be_abi_call_new(env->isa->sp->reg_class);
arch_isa_get_call_abi(env->isa, env->method_type, env->call);
env->calls = NEW_ARR_F(ir_node*, 0);
- if (be_pic) {
+ if (birg->main_env->options->pic) {
irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
}
Beware: init backend abi call object after processing calls,
otherwise some information might be not yet available.
*/
- env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
+ env->cb = env->call->cb->init(env->call, &birg->main_env->arch_env, irg);
/* Process the IRG */
modify_irg(env);
arch_isa_t *isa;
walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
- walker_env.arch_env = birg->main_env->arch_env;
+ walker_env.arch_env = &birg->main_env->arch_env;
isa = walker_env.arch_env->isa;
irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
DEL_ARR_F(walker_env.sp_nodes);
}
-static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
+static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
{
- const arch_env_t *arch_env = env->birg->main_env->arch_env;
- int omit_fp = env->call->flags.bits.try_omit_fp;
- ir_node *irn;
+ const arch_env_t *arch_env = &env->birg->main_env->arch_env;
+ int omit_fp = env->call->flags.bits.try_omit_fp;
+ ir_node *irn;
+ int wanted_bias = real_bias;
sched_foreach(bl, irn) {
int ofs;
*/
ir_entity *ent = arch_get_frame_entity(arch_env, irn);
if(ent) {
+ int bias = omit_fp ? real_bias : 0;
int offset = get_stack_entity_offset(env->frame, ent, bias);
arch_set_frame_offset(arch_env, irn, offset);
- DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n", ent, offset, bias));
+ DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
+ ent, offset, bias));
}
- if(omit_fp || be_is_IncSP(irn)) {
- /*
- * If the node modifies the stack pointer by a constant offset,
- * record that in the bias.
- */
- ofs = arch_get_sp_bias(arch_env, irn);
-
- if(be_is_IncSP(irn)) {
- if(ofs == BE_STACK_FRAME_SIZE_EXPAND) {
- ofs = (int)get_type_size_bytes(get_irg_frame_type(env->birg->irg));
- be_set_IncSP_offset(irn, ofs);
- } else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) {
- ofs = - (int)get_type_size_bytes(get_irg_frame_type(env->birg->irg));
- be_set_IncSP_offset(irn, ofs);
+ /*
+ * If the node modifies the stack pointer by a constant offset,
+ * record that in the bias.
+ */
+ ofs = arch_get_sp_bias(arch_env, irn);
+
+ if(be_is_IncSP(irn)) {
+ /* fill in real stack frame size */
+ if(ofs == BE_STACK_FRAME_SIZE_EXPAND) {
+ ir_type *frame_type = get_irg_frame_type(env->birg->irg);
+ ofs = (int) get_type_size_bytes(frame_type);
+ be_set_IncSP_offset(irn, ofs);
+ } else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) {
+ ir_type *frame_type = get_irg_frame_type(env->birg->irg);
+ ofs = - (int)get_type_size_bytes(frame_type);
+ be_set_IncSP_offset(irn, ofs);
+ } else {
+ if (be_get_IncSP_align(irn)) {
+ /* patch IncSP to produce an aligned stack pointer */
+ ir_type *between_type = env->frame->between_type;
+ int between_size = get_type_size_bytes(between_type);
+ int alignment = env->isa->stack_alignment;
+ int delta = (real_bias + ofs + between_size) % env->isa->stack_alignment;
+ assert(ofs >= 0);
+ if (delta > 0) {
+ be_set_IncSP_offset(irn, ofs + alignment - delta);
+ real_bias += alignment - delta;
+ }
+ } else {
+ /* adjust so real_bias corresponds with wanted_bias */
+ int delta = wanted_bias - real_bias;
+ assert(delta <= 0);
+ if(delta != 0) {
+ be_set_IncSP_offset(irn, ofs + delta);
+ real_bias += delta;
+ }
}
}
-
- if(omit_fp)
- bias += ofs;
}
+
+ real_bias += ofs;
+ wanted_bias += ofs;
}
- return bias;
+ assert(real_bias == wanted_bias);
+ return real_bias;
}
/**
*/
struct bias_walk {
be_abi_irg_t *env; /**< The ABI irg environment. */
- int start_block_bias; /**< The bias at the end of the start block. */
- ir_node *start_block; /**< The start block of the current graph. */
+ int start_block_bias; /**< The bias at the end of the start block. */
+ int between_size;
+ ir_node *start_block; /**< The start block of the current graph. */
};
/**
/* Determine the stack bias at the end of the start block. */
bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
+ bw.between_size = get_type_size_bytes(env->frame->between_type);
/* fix the bias is all other blocks */
bw.env = env;