* @author Sebastian Hack, Michael Beck
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
#include "obst.h"
#include "offset.h"
} be_abi_call_arg_t;
struct _be_abi_call_t {
- be_abi_call_flags_t flags;
- int pop;
+ be_abi_call_flags_t flags; /**< Flags describing the ABI behavior on calls */
+ int pop; /**< number of bytes the stack frame is shrinked by the callee on return. */
const be_abi_callbacks_t *cb;
ir_type *between_type;
set *params;
- const arch_register_class_t *cls_addr;
+ const arch_register_class_t *cls_addr; /**< register class of the call address */
};
+/**
+ * The ABI information for the current birg.
+ */
struct _be_abi_irg_t {
struct obstack obst;
- be_stack_layout_t *frame; /**< The stack frame model. */
be_irg_t *birg; /**< The back end IRG. */
const arch_env_t *arch_env;
survive_dce_t *dce_survivor;
pmap *keep_map; /**< mapping blocks to keep nodes. */
pset *ignore_regs; /**< Additional registers which shall be ignored. */
- ir_node **calls; /**< flexible array containing all be_Call nodes */
+ ir_node **calls; /**< flexible array containing all be_Call nodes */
- arch_register_req_t sp_req;
- arch_register_req_t sp_cls_req;
+ arch_register_req_t sp_req;
+ arch_register_req_t sp_cls_req;
+
+ be_stack_layout_t frame; /**< The stack frame model. */
DEBUG_ONLY(firm_dbg_module_t *dbg;) /**< The debugging module. */
};
}
/**
- * Get or set an ABI call object argument.
+ * Get an ABI call object argument.
*
* @param call the abi call
* @param is_res true for call results, false for call arguments
* @param pos position of the argument
- * @param do_insert true if the argument is set, false if it's retrieved
*/
-static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
+static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
{
be_abi_call_arg_t arg;
unsigned hash;
hash = is_res * 128 + pos;
- return do_insert
- ? set_insert(call->params, &arg, sizeof(arg), hash)
- : set_find(call->params, &arg, sizeof(arg), hash);
+ return set_find(call->params, &arg, sizeof(arg), hash);
}
/**
- * Retrieve an ABI call object argument.
+ * Set an ABI call object argument.
*
- * @param call the ABI call object
+ * @param call the abi call
* @param is_res true for call results, false for call arguments
* @param pos position of the argument
*/
-static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
+static be_abi_call_arg_t *create_call_arg(be_abi_call_t *call, int is_res, int pos)
{
- return get_or_set_call_arg(call, is_res, pos, 0);
+ be_abi_call_arg_t arg;
+ unsigned hash;
+
+ memset(&arg, 0, sizeof(arg));
+ arg.is_res = is_res;
+ arg.pos = pos;
+
+ hash = is_res * 128 + pos;
+
+ return set_insert(call->params, &arg, sizeof(arg), hash);
}
/* Set the flags for a call. */
call->cb = cb;
}
+/* Sets the number of bytes the stackframe is shrinked by the callee on return */
void be_abi_call_set_pop(be_abi_call_t *call, int pop)
{
assert(pop >= 0);
void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, ir_mode *load_mode, unsigned alignment, unsigned space_before, unsigned space_after)
{
- be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
+ be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
arg->on_stack = 1;
arg->load_mode = load_mode;
arg->alignment = alignment;
void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
{
- be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
+ be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
arg->in_reg = 1;
arg->reg = reg;
}
void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
{
- be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
+ be_abi_call_arg_t *arg = create_call_arg(call, 1, arg_pos);
arg->in_reg = 1;
arg->reg = reg;
}
/**
* Constructor for a new ABI call object.
*
+ * @param cls_addr register class of the call address
+ *
* @return the new ABI call object
*/
static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
{
- be_abi_call_t *call = xmalloc(sizeof(call[0]));
- memset(call, 0, sizeof(call[0]));
+ be_abi_call_t *call = XMALLOCZ(be_abi_call_t);
call->flags.val = 0;
call->params = new_set(cmp_call_arg, 16);
* Returns non-zero if the call argument at given position
* is transfered on the stack.
*/
-static INLINE int is_on_stack(be_abi_call_t *call, int pos)
+static inline int is_on_stack(be_abi_call_t *call, int pos)
{
be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
return arg && !arg->in_reg;
int n_stack_params = 0;
int n_ins;
- ir_node *low_call;
- ir_node **in;
- ir_node **res_projs;
- int n_reg_results = 0;
- const arch_register_t *reg;
- const ir_edge_t *edge;
- int *reg_param_idxs;
- int *stack_param_idx;
- int i, n;
+ ir_node *low_call;
+ ir_node **in;
+ ir_node **res_projs;
+ int n_reg_results = 0;
+ const arch_register_t *reg;
+ const ir_edge_t *edge;
+ int *reg_param_idxs;
+ int *stack_param_idx;
+ int i;
+ int n;
+ dbg_info *dbgi;
/* Let the isa fill out the abi description for that call node. */
arch_env_get_call_abi(arch_env, call_tp, call);
curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size, 1);
}
+ dbgi = get_irn_dbg_info(irn);
/* If there are some parameters which shall be passed on the stack. */
if (n_stack_params > 0) {
int curr_ofs = 0;
if (is_atomic_type(param_type)) {
ir_node *store;
ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
- store = new_r_Store(irg, bl, mem_input, addr, param);
+ store = new_rd_Store(dbgi, irg, bl, mem_input, addr, param);
mem = new_r_Proj(irg, bl, store, mode_M, pn_Store_M);
}
ir_node *copy;
assert(mode_is_reference(get_irn_mode(param)));
- copy = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
+ copy = new_rd_CopyB(dbgi, irg, bl, curr_mem, addr, param, param_type);
mem = new_r_Proj(irg, bl, copy, mode_M, pn_CopyB_M_regular);
}
/* search the greatest result proj number */
- res_projs = alloca(n_res * sizeof(res_projs[0]));
- memset(res_projs, 0, n_res * sizeof(res_projs[0]));
+ res_projs = ALLOCANZ(ir_node*, n_res);
foreach_out_edge(irn, edge) {
const ir_edge_t *res_edge;
if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
/* direct call */
- low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem,
- curr_sp, curr_sp,
+ low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, curr_sp,
n_reg_results + pn_be_Call_first_res + pset_count(caller_save),
n_ins, in, get_Call_type(irn));
be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
} else {
/* indirect call */
- low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem,
- curr_sp, call_ptr,
+ low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, call_ptr,
n_reg_results + pn_be_Call_first_res + pset_count(caller_save),
n_ins, in, get_Call_type(irn));
}
/* create new stack pointer */
curr_sp = new_r_Proj(irg, bl, low_call, get_irn_mode(curr_sp),
pn_be_Call_sp);
- be_set_constr_single_reg(low_call, BE_OUT_POS(pn_be_Call_sp), sp);
- arch_set_irn_register(arch_env, curr_sp, sp);
- be_node_set_flags(low_call, BE_OUT_POS(pn_be_Call_sp),
- arch_irn_flags_ignore | arch_irn_flags_modify_sp);
+ be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
+ arch_register_req_type_ignore | arch_register_req_type_produces_sp);
+ arch_set_irn_register(curr_sp, sp);
for(i = 0; i < n_res; ++i) {
int pn;
Set the register class of the call address to
the backend provided class (default: stack pointer class)
*/
- be_node_set_reg_class(low_call, be_pos_Call_ptr, call->cls_addr);
+ be_node_set_reg_class_in(low_call, be_pos_Call_ptr, call->cls_addr);
DBG((env->dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
assert(arg->reg != NULL);
- be_set_constr_single_reg(low_call, be_pos_Call_first_arg + i, arg->reg);
+ be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i,
+ arg->reg, 0);
}
/* Set the register constraints of the results. */
int pn = get_Proj_proj(proj);
assert(arg->in_reg);
- be_set_constr_single_reg(low_call, BE_OUT_POS(pn), arg->reg);
- arch_set_irn_register(arch_env, proj, arg->reg);
+ be_set_constr_single_reg_out(low_call, pn, arg->reg, 0);
+ arch_set_irn_register(proj, arg->reg);
}
obstack_free(obst, in);
exchange(irn, low_call);
curr_res_proj);
/* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
- be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
- arch_set_irn_register(arch_env, proj, reg);
-
- /* a call can produce ignore registers, in this case set the flag and register for the Proj */
- if (arch_register_type_is(reg, ignore)) {
- be_node_set_flags(low_call, BE_OUT_POS(curr_res_proj),
- arch_irn_flags_ignore);
- }
+ be_set_constr_single_reg_out(low_call, curr_res_proj, reg, 0);
+ arch_set_irn_register(proj, reg);
set_irn_link(proj, (void*) reg);
obstack_ptr_grow(obst, proj);
for(i = 0; i < n_reg_results; ++i) {
ir_node *proj = res_projs[i];
- const arch_register_t *reg = arch_get_irn_register(arch_env, proj);
+ const arch_register_t *reg = arch_get_irn_register(proj);
set_irn_link(proj, (void*) reg);
obstack_ptr_grow(obst, proj);
}
keep = be_new_Keep(NULL, irg, bl, n, in);
for (i = 0; i < n; ++i) {
const arch_register_t *reg = get_irn_link(in[i]);
- be_node_set_reg_class(keep, i, reg->reg_class);
+ be_node_set_reg_class_in(keep, i, reg->reg_class);
}
obstack_free(obst, in);
}
return res;
}
-#if 0
-static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
-{
- int i, j, n;
- struct obstack obst;
-
- obstack_init(&obst);
-
- /* Create a Perm after the RegParams node to delimit it. */
- for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
- const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
- ir_node *perm;
- ir_node **in;
- int n_regs;
-
- for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
- const arch_register_t *reg = &cls->regs[j];
- ir_node *irn = pmap_get(regs, (void *) reg);
-
- if(irn && !arch_register_type_is(reg, ignore)) {
- n_regs++;
- obstack_ptr_grow(&obst, irn);
- set_irn_link(irn, (void *) reg);
- }
- }
-
- obstack_ptr_grow(&obst, NULL);
- in = obstack_finish(&obst);
- if(n_regs > 0) {
- perm = be_new_Perm(cls, irg, bl, n_regs, in);
- for(j = 0; j < n_regs; ++j) {
- ir_node *arg = in[j];
- arch_register_t *reg = get_irn_link(arg);
- pmap_insert(regs, reg, arg);
- be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
- }
- }
- obstack_free(&obst, in);
- }
-
- obstack_free(&obst, NULL);
-}
-#endif
-
typedef struct {
const arch_register_t *reg;
ir_node *irn;
rm = reg_map_to_arr(&env->obst, regs);
- for(n = 0; n < n_regs; ++n)
+ for (n = 0; n < n_regs; ++n)
obstack_ptr_grow(&env->obst, rm[n].irn);
- if(mem) {
+ if (mem) {
obstack_ptr_grow(&env->obst, *mem);
n++;
}
obstack_free(&env->obst, in);
for(n = 0; n < n_regs; ++n) {
- const arch_register_t *reg = rm[n].reg;
- int flags = 0;
- int pos = BE_OUT_POS(n);
- ir_node *proj;
-
- proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
- be_node_set_reg_class(irn, n, reg->reg_class);
- if(in_req)
- be_set_constr_single_reg(irn, n, reg);
- be_set_constr_single_reg(irn, pos, reg);
- be_node_set_reg_class(irn, pos, reg->reg_class);
- arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
-
- /* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */
- if(arch_register_type_is(reg, ignore) || arch_irn_is(env->birg->main_env->arch_env, in[n], ignore))
- flags |= arch_irn_flags_ignore;
-
- if(arch_irn_is(env->birg->main_env->arch_env, in[n], modify_sp))
- flags |= arch_irn_flags_modify_sp;
+ ir_node *pred = rm[n].irn;
+ const arch_register_t *reg = rm[n].reg;
+ arch_register_type_t add_type = 0;
+ ir_node *proj;
+
+ /* stupid workaround for now... as not all nodes report register
+ * requirements. */
+ if (!is_Phi(pred)) {
+ const arch_register_req_t *ireq = arch_get_register_req_out(pred);
+ if (ireq->type & arch_register_req_type_ignore)
+ add_type |= arch_register_req_type_ignore;
+ if (ireq->type & arch_register_req_type_produces_sp)
+ add_type |= arch_register_req_type_produces_sp;
+ }
- be_node_set_flags(irn, pos, flags);
+ proj = new_r_Proj(irg, bl, irn, get_irn_mode(pred), n);
+ be_node_set_reg_class_in(irn, n, reg->reg_class);
+ if (in_req)
+ be_set_constr_single_reg_in(irn, n, reg, 0);
+ be_set_constr_single_reg_out(irn, n, reg, add_type);
+ arch_set_irn_register(proj, reg);
pmap_insert(regs, (void *) reg, proj);
}
- if(mem) {
+ if (mem) {
*mem = new_r_Proj(irg, bl, irn, mode_M, n);
}
}
/* Insert results for Return into the register map. */
- for(i = 0; i < n_res; ++i) {
+ for (i = 0; i < n_res; ++i) {
ir_node *res = get_Return_res(irn, i);
be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
assert(arg->in_reg && "return value must be passed in register");
/* clear SP entry, since it has already been grown. */
pmap_insert(reg_map, (void *) arch_env->sp, NULL);
- for(i = 0; i < n_res; ++i) {
+ for (i = 0; i < n_res; ++i) {
be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
in[n] = be_abi_reg_map_get(reg_map, arg->reg);
/* grow the rest of the stuff. */
foreach_pmap(reg_map, ent) {
- if(ent->value) {
+ if (ent->value) {
in[n] = ent->value;
regs[n++] = ent->key;
}
}
/* The in array for the new back end return is now ready. */
- if(irn != NULL) {
+ if (irn != NULL) {
dbgi = get_irn_dbg_info(irn);
} else {
dbgi = NULL;
ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
/* Set the register classes of the return's parameter accordingly. */
- for(i = 0; i < n; ++i)
- if(regs[i])
- be_node_set_reg_class(ret, i, regs[i]->reg_class);
+ for (i = 0; i < n; ++i) {
+ if (regs[i] == NULL)
+ continue;
+
+ be_node_set_reg_class_in(ret, i, regs[i]->reg_class);
+ }
/* Free the space of the Epilog's in array and the register <-> proj map. */
obstack_free(&env->obst, in);
/* check, if it's a param sel and if have not seen this entity before */
if (ptr == param_base &&
- ent != ctx->value_param_tail &&
- get_entity_link(ent) == NULL) {
+ ent != ctx->value_param_tail &&
+ get_entity_link(ent) == NULL) {
set_entity_link(ent, ctx->value_param_list);
ctx->value_param_list = ent;
if (ctx->value_param_tail == NULL) ctx->value_param_tail = ent;
const arch_env_t *arch_env= env->birg->main_env->arch_env;
const arch_register_t *sp = arch_env_sp(arch_env);
ir_graph *irg = env->birg->irg;
- ir_node *bl = get_irg_start_block(irg);
- ir_node *end = get_irg_end_block(irg);
- ir_node *old_mem = get_irg_initial_mem(irg);
+ ir_node *start_bl;
+ ir_node *end;
+ ir_node *old_mem;
ir_node *new_mem_proj;
ir_node *mem;
ir_type *method_type = get_entity_type(get_irg_entity(irg));
- pset *dont_save = pset_new_ptr(8);
int n_params;
int i, n;
DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
+ /* Must fetch memory here, otherwise the start Barrier gets the wrong
+ * memory, which leads to loops in the DAG. */
+ old_mem = get_irg_initial_mem(irg);
+
+ irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
/* set the links of all frame entities to NULL, we use it
to detect if an entity is already linked in the value_param_list */
tp = get_method_value_param_type(method_type);
kill_node(value_param_base);
set_irg_value_param_base(irg, new_r_Bad(irg));
- env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
env->regs = pmap_create();
used_proj_nr = bitset_alloca(1024);
* a backing store into the first block.
*/
fix_address_of_parameter_access(env, ctx.value_param_list);
+ irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
/* Fill the argument vector */
arg_tuple = get_irg_args(irg);
arg_type = compute_arg_type(env, call, method_type, ¶m_map);
bet_type = call->cb->get_between_type(env->cb);
- stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), arch_env->stack_dir, param_map);
+ stack_frame_init(&env->frame, arg_type, bet_type, get_irg_frame_type(irg), arch_env->stack_dir, param_map);
/* Count the register params and add them to the number of Projs for the RegParams node */
- for(i = 0; i < n_params; ++i) {
+ for (i = 0; i < n_params; ++i) {
be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
- if(arg->in_reg && args[i]) {
+ if (arg->in_reg && args[i]) {
assert(arg->reg != sp && "cannot use stack pointer as parameter register");
assert(i == get_Proj_proj(args[i]));
}
/* Collect all callee-save registers */
- for(i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
+ for (i = 0, n = arch_env_get_n_reg_class(arch_env); i < n; ++i) {
const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
- for(j = 0; j < cls->n_regs; ++j) {
+ for (j = 0; j < cls->n_regs; ++j) {
const arch_register_t *reg = &cls->regs[j];
- if(arch_register_type_is(reg, callee_save) ||
+ if (arch_register_type_is(reg, callee_save) ||
arch_register_type_is(reg, state)) {
pmap_insert(env->regs, (void *) reg, NULL);
}
*/
rm = reg_map_to_arr(&env->obst, env->regs);
- for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
- arch_register_t *reg = (void *) rm[i].reg;
- ir_mode *mode = reg->reg_class->mode;
- long nr = i;
- int pos = BE_OUT_POS((int) nr);
- int flags = 0;
+ for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
+ arch_register_t *reg = (void *) rm[i].reg;
+ ir_mode *mode = reg->reg_class->mode;
+ long nr = i;
+ arch_register_req_type_t add_type = 0;
+ ir_node *proj;
- ir_node *proj;
+ if (reg == sp)
+ add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore;
assert(nr >= 0);
bitset_set(used_proj_nr, nr);
proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
pmap_insert(env->regs, (void *) reg, proj);
- be_set_constr_single_reg(env->reg_params, pos, reg);
- arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
-
- /*
- * If the register is an ignore register,
- * The Proj for that register shall also be ignored during register allocation.
- */
- if(arch_register_type_is(reg, ignore))
- flags |= arch_irn_flags_ignore;
-
- if(reg == sp)
- flags |= arch_irn_flags_modify_sp;
-
- be_node_set_flags(env->reg_params, pos, flags);
+ be_set_constr_single_reg_out(env->reg_params, nr, reg, add_type);
+ arch_set_irn_register(proj, reg);
DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
}
mem = new_mem_proj;
/* Generate the Prologue */
- fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &env->frame->initial_bias);
+ fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &env->frame.initial_bias);
/* do the stack allocation BEFORE the barrier, or spill code
might be added before it */
env->init_sp = be_abi_reg_map_get(env->regs, sp);
- env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
+ start_bl = get_irg_start_block(irg);
+ env->init_sp = be_new_IncSP(sp, irg, start_bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND, 0);
be_abi_reg_map_set(env->regs, sp, env->init_sp);
- create_barrier(env, bl, &mem, env->regs, 0);
+ create_barrier(env, start_bl, &mem, env->regs, 0);
env->init_sp = be_abi_reg_map_get(env->regs, sp);
- arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
+ arch_set_irn_register(env->init_sp, sp);
frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
set_irg_frame(irg, frame_pointer);
set_irg_initial_mem(irg, mem);
/* Now, introduce stack param nodes for all parameters passed on the stack */
- for(i = 0; i < n_params; ++i) {
+ for (i = 0; i < n_params; ++i) {
ir_node *arg_proj = args[i];
ir_node *repl = NULL;
- if(arg_proj != NULL) {
+ if (arg_proj != NULL) {
be_abi_call_arg_t *arg;
ir_type *param_type;
int nr = get_Proj_proj(arg_proj);
if (arg->in_reg) {
repl = pmap_get(env->regs, (void *) arg->reg);
- } else if(arg->on_stack) {
+ } else if (arg->on_stack) {
ir_node *addr = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
/* For atomic parameters which are actually used, we create a Load node. */
/* the arg proj is not needed anymore now and should be only used by the anchor */
assert(get_irn_n_edges(arg_tuple) == 1);
kill_node(arg_tuple);
- set_irg_args(irg, new_rd_Bad(irg));
+ set_irg_args(irg, new_r_Bad(irg));
/* All Return nodes hang on the End node, so look for them there. */
+ end = get_irg_end_block(irg);
for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
ir_node *irn = get_Block_cfgpred(end, i);
/* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
the code is dead and will never be executed. */
- del_pset(dont_save);
obstack_free(&env->obst, args);
/* handle start block here (place a jump in the block) */
/* Collect caller save registers */
n = arch_env_get_n_reg_class(arch_env);
- for(i = 0; i < n; ++i) {
+ for (i = 0; i < n; ++i) {
unsigned j;
const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
- for(j = 0; j < cls->n_regs; ++j) {
+ for (j = 0; j < cls->n_regs; ++j) {
const arch_register_t *reg = arch_register_for_index(cls, j);
- if(arch_register_type_is(reg, state)) {
+ if (arch_register_type_is(reg, state)) {
ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
}
}
n = ARR_LEN(env->calls);
n_states = ARR_LEN(stateregs);
- for(i = 0; i < n; ++i) {
+ for (i = 0; i < n; ++i) {
int s, arity;
ir_node *call = env->calls[i];
arity = get_irn_arity(call);
/* the state reg inputs are the last n inputs of the calls */
- for(s = 0; s < n_states; ++s) {
+ for (s = 0; s < n_states; ++s) {
int inp = arity - n_states + s;
const arch_register_t *reg = stateregs[s];
ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
/* calls can jump to relative addresses, so we can directly jump to
the (relatively) known call address or the trampoline */
- if (is_Call(node) && i == 1) {
+ if (i == 1 && is_Call(node)) {
ir_entity *trampoline;
ir_node *trampoline_const;
mode = get_irn_mode(pred);
unknown = new_r_Unknown(irg, mode);
pic_base = arch_code_generator_get_pic_base(env->birg->cg);
- add = new_r_Add(irg, block, pic_base, pred, mode);
-
- /* make sure the walker doesn't visit this add again */
- mark_irn_visited(add);
/* all ok now for locally constructed stuff */
if (can_address_relative(entity)) {
+ ir_node *add = new_r_Add(irg, block, pic_base, pred, mode);
+
+ /* make sure the walker doesn't visit this add again */
+ mark_irn_visited(add);
set_irn_n(node, i, add);
continue;
}
pic_symbol = get_pic_symbol(be, entity);
pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
pic_symbol, NULL);
- set_Add_right(add, pic_symconst);
+ add = new_r_Add(irg, block, pic_base, pic_symconst, mode);
+ mark_irn_visited(add);
/* we need an extra indirection for global data outside our current
module. The loads are always safe and can therefore float
be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
{
- be_abi_irg_t *env = xmalloc(sizeof(env[0]));
+ be_abi_irg_t *env = XMALLOC(be_abi_irg_t);
ir_node *old_frame = get_irg_frame(birg->irg);
ir_graph *irg = birg->irg;
limited_bitset = rbitset_obstack_alloc(&env->obst, env->sp_req.cls->n_regs);
rbitset_set(limited_bitset, arch_register_get_index(env->arch_env->sp));
env->sp_req.limited = limited_bitset;
+ if (env->arch_env->sp->type & arch_register_type_ignore) {
+ env->sp_req.type |= arch_register_req_type_ignore;
+ }
env->sp_cls_req.type = arch_register_req_type_normal;
env->sp_cls_req.cls = arch_register_get_class(env->arch_env->sp);
set_optimize(0);
env->init_sp = dummy = new_r_Unknown(irg, env->arch_env->sp->reg_class->mode);
restore_optimization_state(&state);
+
FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
env->calls = NEW_ARR_F(ir_node*, 0);
/* Returns the stack layout from a abi environment. */
const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi) {
- return abi->frame;
+ return &abi->frame;
}
/*
typedef struct fix_stack_walker_env_t {
node_array sp_nodes;
- const arch_env_t *arch_env;
} fix_stack_walker_env_t;
/**
*/
static void collect_stack_nodes_walker(ir_node *node, void *data)
{
- fix_stack_walker_env_t *env = data;
+ fix_stack_walker_env_t *env = data;
+ const arch_register_req_t *req;
- if (arch_irn_is(env->arch_env, node, modify_sp)) {
- assert(get_irn_mode(node) != mode_M && get_irn_mode(node) != mode_T);
- ARR_APP1(ir_node*, env->sp_nodes, node);
- }
+ if (get_irn_mode(node) == mode_T)
+ return;
+
+ req = arch_get_register_req_out(node);
+ if (! (req->type & arch_register_req_type_produces_sp))
+ return;
+
+ ARR_APP1(ir_node*, env->sp_nodes, node);
}
void be_abi_fix_stack_nodes(be_abi_irg_t *env)
fix_stack_walker_env_t walker_env;
walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
- walker_env.arch_env = birg->main_env->arch_env;
irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
* now.
*/
len = ARR_LEN(walker_env.sp_nodes);
- if(len == 0) {
+ if (len == 0) {
DEL_ARR_F(walker_env.sp_nodes);
return;
}
be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
ARR_LEN(walker_env.sp_nodes));
be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
- ARR_LEN(walker_env.sp_nodes));
+ ARR_LEN(walker_env.sp_nodes));
if(lv != NULL) {
len = ARR_LEN(walker_env.sp_nodes);
len = ARR_LEN(phis);
for(i = 0; i < len; ++i) {
ir_node *phi = phis[i];
- be_set_phi_reg_req(walker_env.arch_env, phi, &env->sp_req);
- be_set_phi_flags(walker_env.arch_env, phi, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
- arch_set_irn_register(walker_env.arch_env, phi, env->arch_env->sp);
+ be_set_phi_reg_req(phi, &env->sp_req, arch_register_req_type_produces_sp);
+ arch_set_irn_register(phi, env->arch_env->sp);
}
be_ssa_construction_destroy(&senv);
*/
static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int real_bias)
{
- const arch_env_t *arch_env = env->birg->main_env->arch_env;
int omit_fp = env->call->flags.bits.try_omit_fp;
ir_node *irn;
int wanted_bias = real_bias;
If so, set the true offset (including the bias) for that
node.
*/
- ir_entity *ent = arch_get_frame_entity(arch_env, irn);
+ ir_entity *ent = arch_get_frame_entity(irn);
if (ent) {
int bias = omit_fp ? real_bias : 0;
- int offset = get_stack_entity_offset(env->frame, ent, bias);
- arch_set_frame_offset(arch_env, irn, offset);
+ int offset = get_stack_entity_offset(&env->frame, ent, bias);
+ arch_set_frame_offset(irn, offset);
DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n",
ent, offset, bias));
}
* If the node modifies the stack pointer by a constant offset,
* record that in the bias.
*/
- ofs = arch_get_sp_bias(arch_env, irn);
+ ofs = arch_get_sp_bias(irn);
if (be_is_IncSP(irn)) {
/* fill in real stack frame size */
} else {
if (be_get_IncSP_align(irn)) {
/* patch IncSP to produce an aligned stack pointer */
- ir_type *between_type = env->frame->between_type;
+ ir_type *between_type = env->frame.between_type;
int between_size = get_type_size_bytes(between_type);
int alignment = 1 << env->arch_env->stack_alignment;
int delta = (real_bias + ofs + between_size) & (alignment - 1);
/* adjust so real_bias corresponds with wanted_bias */
int delta = wanted_bias - real_bias;
assert(delta <= 0);
- if(delta != 0) {
+ if (delta != 0) {
be_set_IncSP_offset(irn, ofs + delta);
real_bias += delta;
}
void be_abi_fix_stack_bias(be_abi_irg_t *env)
{
- be_stack_layout_t *frame = env->frame;
ir_graph *irg = env->birg->irg;
struct bias_walk bw;
- stack_frame_compute_initial_offset(frame);
+ stack_frame_compute_initial_offset(&env->frame);
// stack_layout_dump(stdout, frame);
/* Determine the stack bias at the end of the start block. */
- bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), frame->initial_bias);
- bw.between_size = get_type_size_bytes(frame->between_type);
+ bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), env->frame.initial_bias);
+ bw.between_size = get_type_size_bytes(env->frame.between_type);
/* fix the bias is all other blocks */
bw.env = env;