* @author Sebastian Hack, Michael Beck
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
#include "obst.h"
#include "offset.h"
struct _be_abi_irg_t {
struct obstack obst;
- be_stack_layout_t *frame; /**< The stack frame model. */
be_irg_t *birg; /**< The back end IRG. */
const arch_env_t *arch_env;
survive_dce_t *dce_survivor;
pmap *keep_map; /**< mapping blocks to keep nodes. */
pset *ignore_regs; /**< Additional registers which shall be ignored. */
- ir_node **calls; /**< flexible array containing all be_Call nodes */
+ ir_node **calls; /**< flexible array containing all be_Call nodes */
+
+ arch_register_req_t sp_req;
+ arch_register_req_t sp_cls_req;
- arch_register_req_t sp_req;
- arch_register_req_t sp_cls_req;
+ be_stack_layout_t frame; /**< The stack frame model. */
DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
};
*/
static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
{
- be_abi_call_t *call = xmalloc(sizeof(call[0]));
- memset(call, 0, sizeof(call[0]));
+ be_abi_call_t *call = XMALLOCZ(be_abi_call_t);
call->flags.val = 0;
call->params = new_set(cmp_call_arg, 16);
ir_type *t = get_entity_owner(ent);
int ofs = get_entity_offset(ent);
- int i, index;
+ int index;
/* Find the type the entity is contained in. */
- for(index = 0; index < N_FRAME_TYPES; ++index) {
- if(frame->order[index] == t)
+ for (index = 0; index < N_FRAME_TYPES; ++index) {
+ if (frame->order[index] == t)
break;
+ /* Add the size of all the types below the one of the entity to the entity's offset */
+ ofs += get_type_size_bytes(frame->order[index]);
}
- /* Add the size of all the types below the one of the entity to the entity's offset */
- for(i = 0; i < index; ++i)
- ofs += get_type_size_bytes(frame->order[i]);
-
/* correct the offset by the initial position of the frame pointer */
ofs -= frame->initial_offset;
for(i = 0, n = get_compound_n_members(t); i < n; ++i) {
ir_entity *ent = get_compound_member(t, i);
- if(get_entity_offset(ent) == offset)
+ if (get_entity_offset(ent) == offset)
return ent;
}
frame->between_type = between;
frame->frame_type = locals;
frame->initial_offset = 0;
+ frame->initial_bias = 0;
frame->stack_dir = stack_dir;
frame->order[1] = between;
frame->param_map = param_map;
}
/** TODO: this is not correct for cases where return values are passed
- * on the stack, but no known ABI does this currentl...
+ * on the stack, but no known ABI does this currently...
*/
n_reg_results = n_res;
curr_sp = new_r_Proj(irg, bl, low_call, get_irn_mode(curr_sp),
pn_be_Call_sp);
be_set_constr_single_reg(low_call, BE_OUT_POS(pn_be_Call_sp), sp);
- arch_set_irn_register(arch_env, curr_sp, sp);
+ arch_set_irn_register(curr_sp, sp);
be_node_set_flags(low_call, BE_OUT_POS(pn_be_Call_sp),
arch_irn_flags_ignore | arch_irn_flags_modify_sp);
assert(arg->in_reg);
be_set_constr_single_reg(low_call, BE_OUT_POS(pn), arg->reg);
- arch_set_irn_register(arch_env, proj, arg->reg);
+ arch_set_irn_register(proj, arg->reg);
}
obstack_free(obst, in);
exchange(irn, low_call);
/* kill the ProjT node */
if (res_proj != NULL) {
- be_kill_node(res_proj);
+ kill_node(res_proj);
}
/* Make additional projs for the caller save registers
/* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
- arch_set_irn_register(arch_env, proj, reg);
+ arch_set_irn_register(proj, reg);
/* a call can produce ignore registers, in this case set the flag and register for the Proj */
if (arch_register_type_is(reg, ignore)) {
for(i = 0; i < n_reg_results; ++i) {
ir_node *proj = res_projs[i];
- const arch_register_t *reg = arch_get_irn_register(arch_env, proj);
+ const arch_register_t *reg = arch_get_irn_register(proj);
set_irn_link(proj, (void*) reg);
obstack_ptr_grow(obst, proj);
}
obstack_init(&obst);
/* Create a Perm after the RegParams node to delimit it. */
- for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
+ for (i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
ir_node *perm;
ir_node **in;
int n_regs;
- for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
+ for (n_regs = 0, j = 0; j < cls->n_regs; ++j) {
const arch_register_t *reg = &cls->regs[j];
ir_node *irn = pmap_get(regs, (void *) reg);
obstack_ptr_grow(&obst, NULL);
in = obstack_finish(&obst);
- if(n_regs > 0) {
+ if (n_regs > 0) {
perm = be_new_Perm(cls, irg, bl, n_regs, in);
- for(j = 0; j < n_regs; ++j) {
+ for (j = 0; j < n_regs; ++j) {
ir_node *arg = in[j];
arch_register_t *reg = get_irn_link(arg);
pmap_insert(regs, reg, arg);
rm = reg_map_to_arr(&env->obst, regs);
- for(n = 0; n < n_regs; ++n)
+ for (n = 0; n < n_regs; ++n)
obstack_ptr_grow(&env->obst, rm[n].irn);
- if(mem) {
+ if (mem) {
obstack_ptr_grow(&env->obst, *mem);
n++;
}
proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
be_node_set_reg_class(irn, n, reg->reg_class);
- if(in_req)
+ if (in_req)
be_set_constr_single_reg(irn, n, reg);
be_set_constr_single_reg(irn, pos, reg);
be_node_set_reg_class(irn, pos, reg->reg_class);
- arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
+ arch_set_irn_register(proj, reg);
/* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */
- if(arch_register_type_is(reg, ignore) || arch_irn_is(env->birg->main_env->arch_env, in[n], ignore))
+ if (arch_register_type_is(reg, ignore) || arch_irn_is(in[n], ignore))
flags |= arch_irn_flags_ignore;
- if(arch_irn_is(env->birg->main_env->arch_env, in[n], modify_sp))
+ if (arch_irn_is(in[n], modify_sp))
flags |= arch_irn_flags_modify_sp;
be_node_set_flags(irn, pos, flags);
pmap_insert(regs, (void *) reg, proj);
}
- if(mem) {
+ if (mem) {
*mem = new_r_Proj(irg, bl, irn, mode_M, n);
}
stack = be_abi_reg_map_get(env->regs, arch_env->sp);
if (keep) {
stack = get_irn_n(keep, 0);
- be_kill_node(keep);
+ kill_node(keep);
remove_End_keepalive(get_irg_end(env->birg->irg), keep);
}
/* Insert results for Return into the register map. */
- for(i = 0; i < n_res; ++i) {
+ for (i = 0; i < n_res; ++i) {
ir_node *res = get_Return_res(irn, i);
be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
assert(arg->in_reg && "return value must be passed in register");
/* clear SP entry, since it has already been grown. */
pmap_insert(reg_map, (void *) arch_env->sp, NULL);
- for(i = 0; i < n_res; ++i) {
+ for (i = 0; i < n_res; ++i) {
be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
in[n] = be_abi_reg_map_get(reg_map, arg->reg);
/* grow the rest of the stuff. */
foreach_pmap(reg_map, ent) {
- if(ent->value) {
+ if (ent->value) {
in[n] = ent->value;
regs[n++] = ent->key;
}
}
/* The in array for the new back end return is now ready. */
- if(irn != NULL) {
+ if (irn != NULL) {
dbgi = get_irn_dbg_info(irn);
} else {
dbgi = NULL;
ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
/* Set the register classes of the return's parameter accordingly. */
- for(i = 0; i < n; ++i)
- if(regs[i])
+ for (i = 0; i < n; ++i)
+ if (regs[i])
be_node_set_reg_class(ret, i, regs[i]->reg_class);
/* Free the space of the Epilog's in array and the register <-> proj map. */
const arch_env_t *arch_env= env->birg->main_env->arch_env;
const arch_register_t *sp = arch_env_sp(arch_env);
ir_graph *irg = env->birg->irg;
- ir_node *bl = get_irg_start_block(irg);
- ir_node *end = get_irg_end_block(irg);
- ir_node *old_mem = get_irg_initial_mem(irg);
+ ir_node *start_bl;
+ ir_node *end;
+ ir_node *old_mem;
ir_node *new_mem_proj;
ir_node *mem;
ir_type *method_type = get_entity_type(get_irg_entity(irg));
- pset *dont_save = pset_new_ptr(8);
int n_params;
int i, n;
DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
+ /* Must fetch memory here, otherwise the start Barrier gets the wrong
+ * memory, which leads to loops in the DAG. */
+ old_mem = get_irg_initial_mem(irg);
+
/* set the links of all frame entities to NULL, we use it
to detect if an entity is already linked in the value_param_list */
tp = get_method_value_param_type(method_type);
/* value_param_base anchor is not needed anymore now */
value_param_base = get_irg_value_param_base(irg);
- be_kill_node(value_param_base);
+ kill_node(value_param_base);
set_irg_value_param_base(irg, new_r_Bad(irg));
- env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
env->regs = pmap_create();
used_proj_nr = bitset_alloca(1024);
arg_type = compute_arg_type(env, call, method_type, ¶m_map);
bet_type = call->cb->get_between_type(env->cb);
- stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), arch_env->stack_dir, param_map);
+ stack_frame_init(&env->frame, arg_type, bet_type, get_irg_frame_type(irg), arch_env->stack_dir, param_map);
/* Count the register params and add them to the number of Projs for the RegParams node */
- for(i = 0; i < n_params; ++i) {
+ for (i = 0; i < n_params; ++i) {
be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
- if(arg->in_reg && args[i]) {
+ if (arg->in_reg && args[i]) {
assert(arg->reg != sp && "cannot use stack pointer as parameter register");
assert(i == get_Proj_proj(args[i]));
}
/* Collect all callee-save registers */
- for(i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
+ for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
- for(j = 0; j < cls->n_regs; ++j) {
+ for (j = 0; j < cls->n_regs; ++j) {
const arch_register_t *reg = &cls->regs[j];
- if(arch_register_type_is(reg, callee_save) ||
+ if (arch_register_type_is(reg, callee_save) ||
arch_register_type_is(reg, state)) {
pmap_insert(env->regs, (void *) reg, NULL);
}
*/
rm = reg_map_to_arr(&env->obst, env->regs);
- for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
+ for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
arch_register_t *reg = (void *) rm[i].reg;
ir_mode *mode = reg->reg_class->mode;
long nr = i;
proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
pmap_insert(env->regs, (void *) reg, proj);
be_set_constr_single_reg(env->reg_params, pos, reg);
- arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
+ arch_set_irn_register(proj, reg);
/*
* If the register is an ignore register,
* The Proj for that register shall also be ignored during register allocation.
*/
- if(arch_register_type_is(reg, ignore))
+ if (arch_register_type_is(reg, ignore))
flags |= arch_irn_flags_ignore;
- if(reg == sp)
+ if (reg == sp)
flags |= arch_irn_flags_modify_sp;
be_node_set_flags(env->reg_params, pos, flags);
mem = new_mem_proj;
/* Generate the Prologue */
- fp_reg = call->cb->prologue(env->cb, &mem, env->regs);
+ fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &env->frame.initial_bias);
/* do the stack allocation BEFORE the barrier, or spill code
might be added before it */
env->init_sp = be_abi_reg_map_get(env->regs, sp);
- env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
+ start_bl = get_irg_start_block(irg);
+ env->init_sp = be_new_IncSP(sp, irg, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
be_abi_reg_map_set(env->regs, sp, env->init_sp);
- create_barrier(env, bl, &mem, env->regs, 0);
+ create_barrier(env, start_bl, &mem, env->regs, 0);
env->init_sp = be_abi_reg_map_get(env->regs, sp);
- arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
+ arch_set_irn_register(env->init_sp, sp);
frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
set_irg_frame(irg, frame_pointer);
set_irg_initial_mem(irg, mem);
/* Now, introduce stack param nodes for all parameters passed on the stack */
- for(i = 0; i < n_params; ++i) {
+ for (i = 0; i < n_params; ++i) {
ir_node *arg_proj = args[i];
ir_node *repl = NULL;
- if(arg_proj != NULL) {
+ if (arg_proj != NULL) {
be_abi_call_arg_t *arg;
ir_type *param_type;
int nr = get_Proj_proj(arg_proj);
if (arg->in_reg) {
repl = pmap_get(env->regs, (void *) arg->reg);
- } else if(arg->on_stack) {
+ } else if (arg->on_stack) {
ir_node *addr = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
/* For atomic parameters which are actually used, we create a Load node. */
/* the arg proj is not needed anymore now and should be only used by the anchor */
assert(get_irn_n_edges(arg_tuple) == 1);
- be_kill_node(arg_tuple);
+ kill_node(arg_tuple);
set_irg_args(irg, new_rd_Bad(irg));
/* All Return nodes hang on the End node, so look for them there. */
+ end = get_irg_end_block(irg);
for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
ir_node *irn = get_Block_cfgpred(end, i);
/* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
the code is dead and will never be executed. */
- del_pset(dont_save);
obstack_free(&env->obst, args);
/* handle start block here (place a jump in the block) */
/* Collect caller save registers */
n = arch_env_get_n_reg_class(arch_env);
- for(i = 0; i < n; ++i) {
+ for (i = 0; i < n; ++i) {
unsigned j;
const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
- for(j = 0; j < cls->n_regs; ++j) {
+ for (j = 0; j < cls->n_regs; ++j) {
const arch_register_t *reg = arch_register_for_index(cls, j);
- if(arch_register_type_is(reg, state)) {
+ if (arch_register_type_is(reg, state)) {
ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
}
}
n = ARR_LEN(env->calls);
n_states = ARR_LEN(stateregs);
- for(i = 0; i < n; ++i) {
+ for (i = 0; i < n; ++i) {
int s, arity;
ir_node *call = env->calls[i];
arity = get_irn_arity(call);
/* the state reg inputs are the last n inputs of the calls */
- for(s = 0; s < n_states; ++s) {
+ for (s = 0; s < n_states; ++s) {
int inp = arity - n_states + s;
const arch_register_t *reg = stateregs[s];
ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
set_irn_n(call, inp, regnode);
}
}
+
+ DEL_ARR_F(stateregs);
}
/**
return result;
}
+static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity)
+{
+ ident *old_id = get_entity_ld_ident(entity);
+ ident *id = mangle3("L", old_id, "$non_lazy_ptr");
+ ir_type *e_type = get_entity_type(entity);
+ ir_type *type = new_type_pointer(id, e_type, mode_P_data);
+ ir_type *parent = be->pic_symbols_type;
+ ir_entity *ent = new_entity(parent, old_id, type);
+ set_entity_ld_ident(ent, id);
+ set_entity_visibility(ent, visibility_local);
+ set_entity_variability(ent, variability_uninitialized);
+
+ return ent;
+}
+
+static ir_entity *get_pic_symbol(be_main_env_t *env, ir_entity *entity)
+{
+ ir_entity *result = pmap_get(env->ent_pic_symbol_map, entity);
+ if (result == NULL) {
+ result = create_pic_symbol(env, entity);
+ pmap_insert(env->ent_pic_symbol_map, entity, result);
+ }
+
+ return result;
+}
+
+
+
/**
* Returns non-zero if a given entity can be accessed using a relative address.
*/
arity = get_irn_arity(node);
for (i = 0; i < arity; ++i) {
+ dbg_info *dbgi;
ir_node *pred = get_irn_n(node, i);
ir_entity *entity;
+ ir_entity *pic_symbol;
+ ir_node *pic_symconst;
+
if (!is_SymConst(pred))
continue;
/* calls can jump to relative addresses, so we can directly jump to
the (relatively) known call address or the trampoline */
if (is_Call(node) && i == 1) {
- dbg_info *dbgi;
ir_entity *trampoline;
ir_node *trampoline_const;
dbgi = get_irn_dbg_info(pred);
trampoline = get_trampoline(be, entity);
- trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code, trampoline, NULL);
+ trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
+ trampoline, NULL);
set_irn_n(node, i, trampoline_const);
continue;
}
mode = get_irn_mode(pred);
unknown = new_r_Unknown(irg, mode);
pic_base = arch_code_generator_get_pic_base(env->birg->cg);
- add = new_r_Add(irg, block, pic_base, pred, mode);
-
- /* make sure the walker doesn't visit this add again */
- mark_irn_visited(add);
/* all ok now for locally constructed stuff */
if (can_address_relative(entity)) {
+ ir_node *add = new_r_Add(irg, block, pic_base, pred, mode);
+
+ /* make sure the walker doesn't visit this add again */
+ mark_irn_visited(add);
set_irn_n(node, i, add);
continue;
}
+ /* get entry from pic symbol segment */
+ dbgi = get_irn_dbg_info(pred);
+ pic_symbol = get_pic_symbol(be, entity);
+ pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
+ pic_symbol, NULL);
+ add = new_r_Add(irg, block, pic_base, pic_symconst, mode);
+ mark_irn_visited(add);
+
/* we need an extra indirection for global data outside our current
module. The loads are always safe and can therefore float
and need no memory input */
be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
{
- be_abi_irg_t *env = xmalloc(sizeof(env[0]));
+ be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
ir_node *old_frame = get_irg_frame(birg->irg);
ir_graph *irg = birg->irg;
bitset_set(bs, reg->index);
}
+void be_abi_set_non_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, unsigned *raw_bitset)
+{
+ unsigned i;
+ arch_register_t *reg;
+
+ for (i = 0; i < cls->n_regs; ++i) {
+ if (arch_register_type_is(&cls->regs[i], ignore))
+ continue;
+
+ rbitset_set(raw_bitset, i);
+ }
+
+ for (reg = pset_first(abi->ignore_regs); reg != NULL;
+ reg = pset_next(abi->ignore_regs)) {
+ if (reg->reg_class != cls)
+ continue;
+
+ rbitset_clear(raw_bitset, reg->index);
+ }
+}
+
/* Returns the stack layout from a abi environment. */
const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi) {
- return abi->frame;
+ return &abi->frame;
}
/*
typedef struct fix_stack_walker_env_t {
node_array sp_nodes;
- const arch_env_t *arch_env;
} fix_stack_walker_env_t;
/**
{
fix_stack_walker_env_t *env = data;
- if (arch_irn_is(env->arch_env, node, modify_sp)) {
+ if (arch_irn_is(node, modify_sp)) {
assert(get_irn_mode(node) != mode_M && get_irn_mode(node) != mode_T);
ARR_APP1(ir_node*, env->sp_nodes, node);
}
fix_stack_walker_env_t walker_env;
walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
- walker_env.arch_env = birg->main_env->arch_env;
irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
* now.
*/
len = ARR_LEN(walker_env.sp_nodes);
- if(len == 0) {
+ if (len == 0) {
DEL_ARR_F(walker_env.sp_nodes);
return;
}
len = ARR_LEN(phis);
for(i = 0; i < len; ++i) {
ir_node *phi = phis[i];
- be_set_phi_reg_req(walker_env.arch_env, phi, &env->sp_req);
- be_set_phi_flags(walker_env.arch_env, phi, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
- arch_set_irn_register(walker_env.arch_env, phi, env->arch_env->sp);
+ be_set_phi_reg_req(phi, &env->sp_req);
+ be_set_phi_flags(phi, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
+ arch_set_irn_register(phi, env->arch_env->sp);
}
be_ssa_construction_destroy(&senv);
DEL_ARR_F(walker_env.sp_nodes);
}
+/**
+ * Fix all stack accessing operations in the block bl.
+ *
+ * @param env the abi environment
+ * @param bl the block to process
+ * @param real_bias the bias value
+ *
+ * @return the bias at the end of this block
+ */
static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
{
- const arch_env_t *arch_env = env->birg->main_env->arch_env;
int omit_fp = env->call->flags.bits.try_omit_fp;
ir_node *irn;
int wanted_bias = real_bias;
If so, set the true offset (including the bias) for that
node.
*/
- ir_entity *ent = arch_get_frame_entity(arch_env, irn);
- if(ent) {
+ ir_entity *ent = arch_get_frame_entity(irn);
+ if (ent) {
int bias = omit_fp ? real_bias : 0;
- int offset = get_stack_entity_offset(env->frame, ent, bias);
- arch_set_frame_offset(arch_env, irn, offset);
+ int offset = get_stack_entity_offset(&env->frame, ent, bias);
+ arch_set_frame_offset(irn, offset);
DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
ent, offset, bias));
}
* If the node modifies the stack pointer by a constant offset,
* record that in the bias.
*/
- ofs = arch_get_sp_bias(arch_env, irn);
+ ofs = arch_get_sp_bias(irn);
- if(be_is_IncSP(irn)) {
+ if (be_is_IncSP(irn)) {
/* fill in real stack frame size */
- if(ofs == BE_STACK_FRAME_SIZE_EXPAND) {
+ if (ofs == BE_STACK_FRAME_SIZE_EXPAND) {
ir_type *frame_type = get_irg_frame_type(env->birg->irg);
ofs = (int) get_type_size_bytes(frame_type);
be_set_IncSP_offset(irn, ofs);
- } else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) {
+ } else if (ofs == BE_STACK_FRAME_SIZE_SHRINK) {
ir_type *frame_type = get_irg_frame_type(env->birg->irg);
ofs = - (int)get_type_size_bytes(frame_type);
be_set_IncSP_offset(irn, ofs);
} else {
if (be_get_IncSP_align(irn)) {
/* patch IncSP to produce an aligned stack pointer */
- ir_type *between_type = env->frame->between_type;
+ ir_type *between_type = env->frame.between_type;
int between_size = get_type_size_bytes(between_type);
int alignment = 1 << env->arch_env->stack_alignment;
int delta = (real_bias + ofs + between_size) & (alignment - 1);
/* adjust so real_bias corresponds with wanted_bias */
int delta = wanted_bias - real_bias;
assert(delta <= 0);
- if(delta != 0) {
+ if (delta != 0) {
be_set_IncSP_offset(irn, ofs + delta);
real_bias += delta;
}
};
/**
- * Block-Walker: fix all stack offsets
+ * Block-Walker: fix all stack offsets for all blocks
+ * except the start block
*/
static void stack_bias_walker(ir_node *bl, void *data)
{
void be_abi_fix_stack_bias(be_abi_irg_t *env)
{
- ir_graph *irg = env->birg->irg;
- struct bias_walk bw;
+ ir_graph *irg = env->birg->irg;
+ struct bias_walk bw;
- stack_frame_compute_initial_offset(env->frame);
- // stack_layout_dump(stdout, env->frame);
+ stack_frame_compute_initial_offset(&env->frame);
+ // stack_layout_dump(stdout, frame);
/* Determine the stack bias at the end of the start block. */
- bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
- bw.between_size = get_type_size_bytes(env->frame->between_type);
+ bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), env->frame.initial_bias);
+ bw.between_size = get_type_size_bytes(env->frame.between_type);
/* fix the bias is all other blocks */
bw.env = env;