* @author Sebastian Hack, Michael Beck
* @version $Id$
*/
-#ifdef HAVE_CONFIG_H
-# include "config.h"
-#endif
+#include "config.h"
#include "obst.h"
#include "offset.h"
} be_abi_call_arg_t;
struct _be_abi_call_t {
- be_abi_call_flags_t flags;
- int pop;
+ be_abi_call_flags_t flags; /**< Flags describing the ABI behavior on calls */
+ int pop; /**< number of bytes the stack frame is shrinked by the callee on return. */
const be_abi_callbacks_t *cb;
ir_type *between_type;
set *params;
- const arch_register_class_t *cls_addr;
+ const arch_register_class_t *cls_addr; /**< register class of the call address */
};
+/**
+ * The ABI information for the current birg.
+ */
struct _be_abi_irg_t {
struct obstack obst;
be_irg_t *birg; /**< The back end IRG. */
}
/**
- * Get or set an ABI call object argument.
+ * Get an ABI call object argument.
*
* @param call the abi call
* @param is_res true for call results, false for call arguments
* @param pos position of the argument
- * @param do_insert true if the argument is set, false if it's retrieved
*/
-static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
+static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
{
be_abi_call_arg_t arg;
unsigned hash;
hash = is_res * 128 + pos;
- return do_insert
- ? set_insert(call->params, &arg, sizeof(arg), hash)
- : set_find(call->params, &arg, sizeof(arg), hash);
+ return set_find(call->params, &arg, sizeof(arg), hash);
}
/**
- * Retrieve an ABI call object argument.
+ * Set an ABI call object argument.
*
- * @param call the ABI call object
+ * @param call the abi call
* @param is_res true for call results, false for call arguments
* @param pos position of the argument
*/
-static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
+static be_abi_call_arg_t *create_call_arg(be_abi_call_t *call, int is_res, int pos)
{
- return get_or_set_call_arg(call, is_res, pos, 0);
+ be_abi_call_arg_t arg;
+ unsigned hash;
+
+ memset(&arg, 0, sizeof(arg));
+ arg.is_res = is_res;
+ arg.pos = pos;
+
+ hash = is_res * 128 + pos;
+
+ return set_insert(call->params, &arg, sizeof(arg), hash);
}
/* Set the flags for a call. */
call->cb = cb;
}
+/* Sets the number of bytes the stackframe is shrinked by the callee on return */
void be_abi_call_set_pop(be_abi_call_t *call, int pop)
{
assert(pop >= 0);
void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, ir_mode *load_mode, unsigned alignment, unsigned space_before, unsigned space_after)
{
- be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
+ be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
arg->on_stack = 1;
arg->load_mode = load_mode;
arg->alignment = alignment;
void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
{
- be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
+ be_abi_call_arg_t *arg = create_call_arg(call, 0, arg_pos);
arg->in_reg = 1;
arg->reg = reg;
}
void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
{
- be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
+ be_abi_call_arg_t *arg = create_call_arg(call, 1, arg_pos);
arg->in_reg = 1;
arg->reg = reg;
}
/**
* Constructor for a new ABI call object.
*
+ * @param cls_addr register class of the call address
+ *
* @return the new ABI call object
*/
static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
* Returns non-zero if the call argument at given position
* is transfered on the stack.
*/
-static INLINE int is_on_stack(be_abi_call_t *call, int pos)
+static inline int is_on_stack(be_abi_call_t *call, int pos)
{
be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
return arg && !arg->in_reg;
int n_stack_params = 0;
int n_ins;
- ir_node *low_call;
- ir_node **in;
- ir_node **res_projs;
- int n_reg_results = 0;
- const arch_register_t *reg;
- const ir_edge_t *edge;
- int *reg_param_idxs;
- int *stack_param_idx;
- int i, n;
+ ir_node *low_call;
+ ir_node **in;
+ ir_node **res_projs;
+ int n_reg_results = 0;
+ const arch_register_t *reg;
+ const ir_edge_t *edge;
+ int *reg_param_idxs;
+ int *stack_param_idx;
+ int i;
+ int n;
+ dbg_info *dbgi;
/* Let the isa fill out the abi description for that call node. */
arch_env_get_call_abi(arch_env, call_tp, call);
curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size, 1);
}
+ dbgi = get_irn_dbg_info(irn);
/* If there are some parameters which shall be passed on the stack. */
if (n_stack_params > 0) {
int curr_ofs = 0;
if (is_atomic_type(param_type)) {
ir_node *store;
ir_node *mem_input = do_seq ? curr_mem : new_NoMem();
- store = new_r_Store(irg, bl, mem_input, addr, param);
+ store = new_rd_Store(dbgi, irg, bl, mem_input, addr, param);
mem = new_r_Proj(irg, bl, store, mode_M, pn_Store_M);
}
ir_node *copy;
assert(mode_is_reference(get_irn_mode(param)));
- copy = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
+ copy = new_rd_CopyB(dbgi, irg, bl, curr_mem, addr, param, param_type);
mem = new_r_Proj(irg, bl, copy, mode_M, pn_CopyB_M_regular);
}
/* search the greatest result proj number */
- res_projs = alloca(n_res * sizeof(res_projs[0]));
- memset(res_projs, 0, n_res * sizeof(res_projs[0]));
+ res_projs = ALLOCANZ(ir_node*, n_res);
foreach_out_edge(irn, edge) {
const ir_edge_t *res_edge;
if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
/* direct call */
- low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem,
- curr_sp, curr_sp,
+ low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, curr_sp,
n_reg_results + pn_be_Call_first_res + pset_count(caller_save),
n_ins, in, get_Call_type(irn));
be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
} else {
/* indirect call */
- low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem,
- curr_sp, call_ptr,
+ low_call = be_new_Call(dbgi, irg, bl, curr_mem, curr_sp, call_ptr,
n_reg_results + pn_be_Call_first_res + pset_count(caller_save),
n_ins, in, get_Call_type(irn));
}
/* create new stack pointer */
curr_sp = new_r_Proj(irg, bl, low_call, get_irn_mode(curr_sp),
pn_be_Call_sp);
- be_set_constr_single_reg(low_call, BE_OUT_POS(pn_be_Call_sp), sp);
+ be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
+ arch_register_req_type_ignore | arch_register_req_type_produces_sp);
arch_set_irn_register(curr_sp, sp);
- be_node_set_flags(low_call, BE_OUT_POS(pn_be_Call_sp),
- arch_irn_flags_ignore | arch_irn_flags_modify_sp);
for(i = 0; i < n_res; ++i) {
int pn;
Set the register class of the call address to
the backend provided class (default: stack pointer class)
*/
- be_node_set_reg_class(low_call, be_pos_Call_ptr, call->cls_addr);
+ be_node_set_reg_class_in(low_call, be_pos_Call_ptr, call->cls_addr);
DBG((env->dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
assert(arg->reg != NULL);
- be_set_constr_single_reg(low_call, be_pos_Call_first_arg + i, arg->reg);
+ be_set_constr_single_reg_in(low_call, be_pos_Call_first_arg + i,
+ arg->reg, 0);
}
/* Set the register constraints of the results. */
int pn = get_Proj_proj(proj);
assert(arg->in_reg);
- be_set_constr_single_reg(low_call, BE_OUT_POS(pn), arg->reg);
+ be_set_constr_single_reg_out(low_call, pn, arg->reg, 0);
arch_set_irn_register(proj, arg->reg);
}
obstack_free(obst, in);
curr_res_proj);
/* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
- be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
+ be_set_constr_single_reg_out(low_call, curr_res_proj, reg, 0);
arch_set_irn_register(proj, reg);
- /* a call can produce ignore registers, in this case set the flag and register for the Proj */
- if (arch_register_type_is(reg, ignore)) {
- be_node_set_flags(low_call, BE_OUT_POS(curr_res_proj),
- arch_irn_flags_ignore);
- }
-
set_irn_link(proj, (void*) reg);
obstack_ptr_grow(obst, proj);
curr_res_proj++;
keep = be_new_Keep(NULL, irg, bl, n, in);
for (i = 0; i < n; ++i) {
const arch_register_t *reg = get_irn_link(in[i]);
- be_node_set_reg_class(keep, i, reg->reg_class);
+ be_node_set_reg_class_in(keep, i, reg->reg_class);
}
obstack_free(obst, in);
}
return res;
}
-#if 0
-static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
-{
- int i, j, n;
- struct obstack obst;
-
- obstack_init(&obst);
-
- /* Create a Perm after the RegParams node to delimit it. */
- for (i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
- const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
- ir_node *perm;
- ir_node **in;
- int n_regs;
-
- for (n_regs = 0, j = 0; j < cls->n_regs; ++j) {
- const arch_register_t *reg = &cls->regs[j];
- ir_node *irn = pmap_get(regs, (void *) reg);
-
- if(irn && !arch_register_type_is(reg, ignore)) {
- n_regs++;
- obstack_ptr_grow(&obst, irn);
- set_irn_link(irn, (void *) reg);
- }
- }
-
- obstack_ptr_grow(&obst, NULL);
- in = obstack_finish(&obst);
- if (n_regs > 0) {
- perm = be_new_Perm(cls, irg, bl, n_regs, in);
- for (j = 0; j < n_regs; ++j) {
- ir_node *arg = in[j];
- arch_register_t *reg = get_irn_link(arg);
- pmap_insert(regs, reg, arg);
- be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
- }
- }
- obstack_free(&obst, in);
- }
-
- obstack_free(&obst, NULL);
-}
-#endif
-
typedef struct {
const arch_register_t *reg;
ir_node *irn;
obstack_free(&env->obst, in);
for(n = 0; n < n_regs; ++n) {
- const arch_register_t *reg = rm[n].reg;
- int flags = 0;
- int pos = BE_OUT_POS(n);
- ir_node *proj;
+ ir_node *pred = rm[n].irn;
+ const arch_register_t *reg = rm[n].reg;
+ arch_register_type_t add_type = 0;
+ ir_node *proj;
+
+ /* stupid workaround for now... as not all nodes report register
+ * requirements. */
+ if (!is_Phi(pred)) {
+ const arch_register_req_t *ireq = arch_get_register_req_out(pred);
+ if (ireq->type & arch_register_req_type_ignore)
+ add_type |= arch_register_req_type_ignore;
+ if (ireq->type & arch_register_req_type_produces_sp)
+ add_type |= arch_register_req_type_produces_sp;
+ }
- proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
- be_node_set_reg_class(irn, n, reg->reg_class);
+ proj = new_r_Proj(irg, bl, irn, get_irn_mode(pred), n);
+ be_node_set_reg_class_in(irn, n, reg->reg_class);
if (in_req)
- be_set_constr_single_reg(irn, n, reg);
- be_set_constr_single_reg(irn, pos, reg);
- be_node_set_reg_class(irn, pos, reg->reg_class);
+ be_set_constr_single_reg_in(irn, n, reg, 0);
+ be_set_constr_single_reg_out(irn, n, reg, add_type);
arch_set_irn_register(proj, reg);
- /* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */
- if (arch_register_type_is(reg, ignore) || arch_irn_is(in[n], ignore))
- flags |= arch_irn_flags_ignore;
-
- if (arch_irn_is(in[n], modify_sp))
- flags |= arch_irn_flags_modify_sp;
-
- be_node_set_flags(irn, pos, flags);
-
pmap_insert(regs, (void *) reg, proj);
}
ret = be_new_Return(dbgi, env->birg->irg, bl, n_res, pop, n, in);
/* Set the register classes of the return's parameter accordingly. */
- for (i = 0; i < n; ++i)
- if (regs[i])
- be_node_set_reg_class(ret, i, regs[i]->reg_class);
+ for (i = 0; i < n; ++i) {
+ if (regs[i] == NULL)
+ continue;
+
+ be_node_set_reg_class_in(ret, i, regs[i]->reg_class);
+ }
/* Free the space of the Epilog's in array and the register <-> proj map. */
obstack_free(&env->obst, in);
/* check, if it's a param sel and if have not seen this entity before */
if (ptr == param_base &&
- ent != ctx->value_param_tail &&
- get_entity_link(ent) == NULL) {
+ ent != ctx->value_param_tail &&
+ get_entity_link(ent) == NULL) {
set_entity_link(ent, ctx->value_param_list);
ctx->value_param_list = ent;
if (ctx->value_param_tail == NULL) ctx->value_param_tail = ent;
* memory, which leads to loops in the DAG. */
old_mem = get_irg_initial_mem(irg);
+ irp_reserve_resources(irp, IR_RESOURCE_ENTITY_LINK);
/* set the links of all frame entities to NULL, we use it
to detect if an entity is already linked in the value_param_list */
tp = get_method_value_param_type(method_type);
* a backing store into the first block.
*/
fix_address_of_parameter_access(env, ctx.value_param_list);
+ irp_free_resources(irp, IR_RESOURCE_ENTITY_LINK);
/* Fill the argument vector */
arg_tuple = get_irg_args(irg);
rm = reg_map_to_arr(&env->obst, env->regs);
for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
- arch_register_t *reg = (void *) rm[i].reg;
- ir_mode *mode = reg->reg_class->mode;
- long nr = i;
- int pos = BE_OUT_POS((int) nr);
- int flags = 0;
+ arch_register_t *reg = (void *) rm[i].reg;
+ ir_mode *mode = reg->reg_class->mode;
+ long nr = i;
+ arch_register_req_type_t add_type = 0;
+ ir_node *proj;
- ir_node *proj;
+ if (reg == sp)
+ add_type |= arch_register_req_type_produces_sp | arch_register_req_type_ignore;
assert(nr >= 0);
bitset_set(used_proj_nr, nr);
proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
pmap_insert(env->regs, (void *) reg, proj);
- be_set_constr_single_reg(env->reg_params, pos, reg);
+ be_set_constr_single_reg_out(env->reg_params, nr, reg, add_type);
arch_set_irn_register(proj, reg);
- /*
- * If the register is an ignore register,
- * The Proj for that register shall also be ignored during register allocation.
- */
- if (arch_register_type_is(reg, ignore))
- flags |= arch_irn_flags_ignore;
-
- if (reg == sp)
- flags |= arch_irn_flags_modify_sp;
-
- be_node_set_flags(env->reg_params, pos, flags);
-
DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
}
obstack_free(&env->obst, rm);
mem = new_mem_proj;
/* Generate the Prologue */
- fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &env->frame.initial_bias);
+ fp_reg = call->cb->prologue(env->cb, &mem, env->regs, &env->frame.initial_bias);
/* do the stack allocation BEFORE the barrier, or spill code
might be added before it */
/* the arg proj is not needed anymore now and should be only used by the anchor */
assert(get_irn_n_edges(arg_tuple) == 1);
kill_node(arg_tuple);
- set_irg_args(irg, new_rd_Bad(irg));
+ set_irg_args(irg, new_r_Bad(irg));
/* All Return nodes hang on the End node, so look for them there. */
end = get_irg_end_block(irg);
/* calls can jump to relative addresses, so we can directly jump to
the (relatively) known call address or the trampoline */
- if (is_Call(node) && i == 1) {
+ if (i == 1 && is_Call(node)) {
ir_entity *trampoline;
ir_node *trampoline_const;
limited_bitset = rbitset_obstack_alloc(&env->obst, env->sp_req.cls->n_regs);
rbitset_set(limited_bitset, arch_register_get_index(env->arch_env->sp));
env->sp_req.limited = limited_bitset;
+ if (env->arch_env->sp->type & arch_register_type_ignore) {
+ env->sp_req.type |= arch_register_req_type_ignore;
+ }
env->sp_cls_req.type = arch_register_req_type_normal;
env->sp_cls_req.cls = arch_register_get_class(env->arch_env->sp);
set_optimize(0);
env->init_sp = dummy = new_r_Unknown(irg, env->arch_env->sp->reg_class->mode);
restore_optimization_state(&state);
+
FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
env->calls = NEW_ARR_F(ir_node*, 0);
*/
static void collect_stack_nodes_walker(ir_node *node, void *data)
{
- fix_stack_walker_env_t *env = data;
+ fix_stack_walker_env_t *env = data;
+ const arch_register_req_t *req;
- if (arch_irn_is(node, modify_sp)) {
- assert(get_irn_mode(node) != mode_M && get_irn_mode(node) != mode_T);
- ARR_APP1(ir_node*, env->sp_nodes, node);
- }
+ if (get_irn_mode(node) == mode_T)
+ return;
+
+ req = arch_get_register_req_out(node);
+ if (! (req->type & arch_register_req_type_produces_sp))
+ return;
+
+ ARR_APP1(ir_node*, env->sp_nodes, node);
}
void be_abi_fix_stack_nodes(be_abi_irg_t *env)
be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
ARR_LEN(walker_env.sp_nodes));
be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
- ARR_LEN(walker_env.sp_nodes));
+ ARR_LEN(walker_env.sp_nodes));
if(lv != NULL) {
len = ARR_LEN(walker_env.sp_nodes);
len = ARR_LEN(phis);
for(i = 0; i < len; ++i) {
ir_node *phi = phis[i];
- be_set_phi_reg_req(phi, &env->sp_req);
- be_set_phi_flags(phi, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
+ be_set_phi_reg_req(phi, &env->sp_req, arch_register_req_type_produces_sp);
arch_set_irn_register(phi, env->arch_env->sp);
}
be_ssa_construction_destroy(&senv);