/**
* ABI lowering.
*
- *
- *
+ * @author Sebastian Hack
+ * @date 7.3.2005
*/
#ifdef HAVE_CONFIG_H
struct _be_abi_irg_t {
struct obstack obst;
- firm_dbg_module_t *dbg; /**< The debugging module. */
- be_stack_frame_t *frame;
- const be_irg_t *birg;
+ firm_dbg_module_t *dbg; /**< The debugging module. */
+ be_stack_frame_t *frame; /**< The stack frame model. */
+ const be_irg_t *birg; /**< The back end IRG. */
const arch_isa_t *isa; /**< The isa. */
survive_dce_t *dce_survivor;
- be_abi_call_t *call;
- type *method_type;
+ be_abi_call_t *call; /**< The ABI call information. */
+ type *method_type; /**< The type of the method of the IRG. */
ir_node *init_sp; /**< The node representing the stack pointer
at the start of the function. */
- ir_node *reg_params;
- pmap *regs;
- pset *stack_phis;
- int start_block_bias;
+ ir_node *reg_params; /**< The reg params node. */
+ pmap *regs; /**< A map of all callee-save and ignore regs to
+ their Projs to the RegParams node. */
+
+ pset *stack_phis; /**< The set of all Phi nodes inserted due to
+ stack pointer modifying nodes. */
+
+ int start_block_bias; /**< The stack bias at the end of the start block. */
arch_irn_handler_t irn_handler;
arch_irn_ops_t irn_ops;
return arg && !arg->in_reg;
}
+/*
+ ____ _ _
+ / ___|__ _| | |___
+ | | / _` | | / __|
+ | |__| (_| | | \__ \
+ \____\__,_|_|_|___/
+
+ Adjustment of the calls inside a graph.
+
+*/
+
/**
* Transform a call node.
* @param env The ABI environment for the current irg.
- * @param irn THe call node.
+ * @param irn The call node.
+ * @param curr_sp The stack pointer node to use.
+ * @return The stack pointer after the call.
*/
-static void adjust_call(be_abi_irg_t *env, ir_node *irn)
+static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
{
ir_graph *irg = env->birg->irg;
const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
be_abi_call_t *call = be_abi_call_new();
ir_type *mt = get_Call_type(irn);
+ ir_node *call_ptr = get_Call_ptr(irn);
int n_params = get_method_n_params(mt);
- ir_node *curr_sp = env->init_sp;
ir_node *curr_mem = get_Call_mem(irn);
ir_node *bl = get_nodes_block(irn);
pset *results = pset_new_ptr(8);
ir_node *no_mem = get_irg_no_mem(irg);
ir_node *res_proj = NULL;
- int curr_res_proj = -1;
+ int curr_res_proj = pn_Call_max;
int n_low_args = 0;
int n_pos = 0;
ir_node *low_call;
ir_node **in;
- ir_node *sp_proj;
+ ir_node **res_projs;
const ir_edge_t *edge;
int *low_args;
int *pos;
ir_node *res = get_edge_src_irn(res_edge);
assert(is_Proj(res));
+
proj = get_Proj_proj(res);
arg = get_call_arg(call, 1, proj);
+
+ /*
+ shift the proj number to the right, since we will drop the
+ unspeakable Proj_T from the Call. Therefore, all real argument
+ Proj numbers must be increased by pn_Call_max
+ */
+ proj += pn_Call_max;
+ set_Proj_proj(res, proj);
+ obstack_ptr_grow(obst, res);
+
if(proj > curr_res_proj)
curr_res_proj = proj;
if(arg->in_reg)
}
}
curr_res_proj++;
+ obstack_ptr_grow(obst, NULL);
+ res_projs = obstack_finish(obst);
/* make the back end call node and set its register requirements. */
for(i = 0; i < n_low_args; ++i)
obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
in = obstack_finish(obst);
- low_call = be_new_Call(irg, bl, curr_mem, curr_sp, get_Call_ptr(irn), curr_res_proj, n_low_args, in);
+ if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
+ low_call = be_new_Call(irg, bl, curr_mem, curr_sp, curr_sp, curr_res_proj, n_low_args, in);
+ be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
+ }
+
+ else
+ low_call = be_new_Call(irg, bl, curr_mem, curr_sp, call_ptr, curr_res_proj, n_low_args, in);
+
+ /* Set the register classes and constraints of the Call parameters. */
+ for(i = 0; i < n_low_args; ++i) {
+ int index = low_args[i];
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
+ assert(arg->reg != NULL);
+ be_set_constr_single_reg(low_call, index, arg->reg);
+ }
+
+ /* Set the register constraints of the results. */
+ for(i = 0; res_projs[i]; ++i) {
+ ir_node *irn = res_projs[i];
+ int proj = get_Proj_proj(irn);
+
+ /* Correct Proj number since it has been adjusted! (see above) */
+ const be_abi_call_arg_t *arg = get_call_arg(call, 1, proj - pn_Call_max);
+
+ assert(arg->in_reg);
+ be_set_constr_single_reg(low_call, BE_OUT_POS(proj), arg->reg);
+ }
obstack_free(obst, in);
exchange(irn, low_call);
+ /* redirect the result projs to the lowered call instead of the Proj_T */
+ for(i = 0; res_projs[i]; ++i)
+ set_Proj_pred(res_projs[i], low_call);
+
/* Make additional projs for the caller save registers
and the Keep node which keeps them alive. */
if(pset_count(caller_save) > 0) {
ir_node **in, *keep;
int i, n;
- if(!res_proj)
- res_proj = new_r_Proj(irg, bl, low_call, mode_T, pn_Call_T_result);
-
for(reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
- ir_node *proj = new_r_Proj(irg, bl, res_proj, reg->reg_class->mode, curr_res_proj++);
+ ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj++);
/* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
set_irn_link(proj, (void *) reg);
/* Clean up the stack. */
if(stack_size > 0) {
- ir_node *last_inc_sp;
+ ir_node *mem_proj = NULL;
+
+ foreach_out_edge(low_call, edge) {
+ ir_node *irn = get_edge_src_irn(edge);
+ if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
+ mem_proj = irn;
+ break;
+ }
+ }
- /* Get the result ProjT */
- if(!res_proj)
- res_proj = new_r_Proj(irg, bl, low_call, mode_T, pn_Call_T_result);
+ if(!mem_proj)
+ mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
/* Make a Proj for the stack pointer. */
- sp_proj = new_r_Proj(irg, bl, res_proj, sp->reg_class->mode, curr_res_proj++);
- last_inc_sp = be_new_IncSP(sp, irg, bl, sp_proj, no_mem, stack_size, be_stack_dir_against);
+ curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, mem_proj, stack_size, be_stack_dir_against);
}
be_abi_call_free(call);
obstack_free(obst, pos);
del_pset(results);
del_pset(caller_save);
+
+ return curr_sp;
+}
+
+/**
+ * Adjust an alloca.
+ * The alloca is transformed into a back end alloca node and connected to the stack nodes.
+ */
+static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
+{
+ if(get_Alloc_where(alloc) == stack_alloc) {
+ ir_node *new_alloc;
+
+ ir_node *bl = get_nodes_block(alloc);
+ ir_graph *irg = get_irn_irg(bl);
+
+ new_alloc = be_new_Alloca(env->isa->sp, irg, bl, get_Alloc_mem(alloc), curr_sp, get_Alloc_size(alloc));
+ exchange(alloc, new_alloc);
+ curr_sp = new_r_Proj(irg, bl, new_alloc, mode_P, pn_Alloc_res);
+ }
+
+ return curr_sp;
+}
+
+/**
+ * Walker for dependent_on().
+ * This function searches a node tgt recursively from a given node
+ * but is restricted to the given block.
+ * @return 1 if tgt was reachable from curr, 0 if not.
+ */
+static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl, unsigned long visited_nr)
+{
+ int n, i;
+
+ if(get_irn_visited(curr) >= visited_nr)
+ return 0;
+
+ set_irn_visited(curr, visited_nr);
+ if(get_nodes_block(curr) != bl)
+ return 0;
+
+ if(curr == tgt)
+ return 1;
+
+ for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
+ if(check_dependence(get_irn_n(curr, i), tgt, bl, visited_nr))
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Check if a node is somehow data dependent on another one.
+ * both nodes must be in the same basic block.
+ * @param n1 The first node.
+ * @param n2 The second node.
+ * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
+ */
+static int dependent_on(ir_node *n1, ir_node *n2)
+{
+ ir_node *bl = get_nodes_block(n1);
+ ir_graph *irg = get_irn_irg(bl);
+ long vis_nr = get_irg_visited(irg) + 1;
+
+ assert(bl == get_nodes_block(n2));
+ set_irg_visited(irg, vis_nr);
+ return check_dependence(n1, n2, bl, vis_nr);
+}
+
+static int cmp_call_dependecy(const void *c1, const void *c2)
+{
+ ir_node *n1 = *(ir_node **) c1;
+ ir_node *n2 = *(ir_node **) c2;
+
+ /*
+ Classical qsort() comparison function behavior:
+ 0 if both elements are equal
+ 1 if second is "smaller" that first
+ -1 if first is "smaller" that second
+ */
+ return n1 == n2 ? 0 : (dependent_on(n1, n2) ? -1 : 1);
+}
+
+static void link_calls_in_block_walker(ir_node *irn, void *data)
+{
+ if(is_Call(irn)) {
+ ir_node *bl = get_nodes_block(irn);
+ void *save = get_irn_link(bl);
+
+ set_irn_link(irn, save);
+ set_irn_link(bl, irn);
+ }
+}
+
+/**
+ * Process all call nodes inside a basic block.
+ * Note that the link field of the block must contain a linked list of all
+ * Call nodes inside the block. We first order this list according to data dependency
+ * and that connect the calls together.
+ */
+static void process_calls_in_block(ir_node *bl, void *data)
+{
+ be_abi_irg_t *env = data;
+ ir_node *curr_sp = env->init_sp;
+ ir_node *irn;
+ int n;
+
+ for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
+ obstack_ptr_grow(&env->obst, irn);
+
+ /* If there were call nodes in the block. */
+ if(n > 0) {
+ ir_node **nodes;
+ int i;
+
+ nodes = obstack_finish(&env->obst);
+
+ /* order the call nodes according to data dependency */
+ qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
+
+ for(i = n - 1; i >= 0; --i) {
+ ir_node *irn = nodes[i];
+
+ switch(get_irn_opcode(irn)) {
+ case iro_Call:
+ curr_sp = adjust_call(env, irn, curr_sp);
+ break;
+ case iro_Alloc:
+ curr_sp = adjust_alloc(env, irn, curr_sp);
+ break;
+ default:
+ break;
+ }
+ }
+
+ obstack_free(&env->obst, nodes);
+
+ /* Keep the last stack state in the block by tying it to Keep node */
+ nodes[0] = curr_sp;
+ be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
+ }
+
+ set_irn_link(bl, curr_sp);
}
-static void adjust_call_walker(ir_node *irn, void *data)
+/**
+ * Adjust all call nodes in the graph to the ABI conventions.
+ */
+static void process_calls(be_abi_irg_t *env)
{
- if(get_irn_opcode(irn) == iro_Call)
- adjust_call(data, irn);
+ ir_graph *irg = env->birg->irg;
+
+ irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, NULL);
+ irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
}
+#if 0
/**
* Walker to implement alloca-style allocations.
* They are implemented using an add to the stack pointer
res = be_new_Copy(isa->sp->reg_class, env->birg->irg, bl, res);
}
+#endif
static void collect_return_walker(ir_node *irn, void *data)
{
const arch_register_t *sp = isa->sp;
const arch_register_t *bp = isa->bp;
ir_graph *irg = env->birg->irg;
- ir_node *no_mem = get_irg_no_mem(irg);
+ ir_node *ret_mem = get_Return_mem(ret);
ir_node *frame = get_irg_frame(irg);
- ir_node *stack = env->init_sp;
ir_node *bl = get_nodes_block(ret);
+ ir_node *stack = get_irn_link(bl);
pmap_entry *ent;
if(env->call->flags.bits.try_omit_fp) {
- stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_against);
+ stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_against);
}
else {
- stack = be_new_SetSP(sp, irg, bl, stack, frame, get_Return_mem(ret));
+ stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
be_set_constr_single_reg(stack, -1, sp);
be_node_set_flags(stack, -1, arch_irn_flags_ignore);
}
return res;
}
-static type *get_bp_type(const arch_register_t *bp)
-{
- static type *bp_type = NULL;
- if(!bp_type) {
- bp_type = new_type_primitive(new_id_from_str("bp_type"), bp->reg_class->mode);
- set_type_size_bytes(bp_type, get_mode_size_bytes(bp->reg_class->mode));
- }
- return bp_type;
-}
-
/**
* Modify the irg itself and the frame type.
*/
{
be_abi_irg_t *env = malloc(sizeof(env[0]));
- int i;
- ir_node **stack_allocs;
- pmap_entry *ent;
+ ir_node *dummy;
env->isa = birg->main_env->arch_env->isa;
env->method_type = get_entity_type(get_irg_entity(birg->irg));
env->call = be_abi_call_new();
arch_isa_get_call_abi(env->isa, env->method_type, env->call);
- env->dce_survivor = new_survive_dce();
- env->birg = birg;
- env->dbg = firm_dbg_register("firm.be.abi");
- env->stack_phis = pset_new_ptr_default();
+ env->dce_survivor = new_survive_dce();
+ env->birg = birg;
+ env->dbg = firm_dbg_register("firm.be.abi");
+ env->stack_phis = pset_new_ptr(16);
+ env->init_sp = dummy = new_r_Unknown(birg->irg, env->isa->sp->reg_class->mode);
+
obstack_init(&env->obst);
memcpy(&env->irn_handler, &abi_irn_handler, sizeof(abi_irn_handler));
env->irn_ops.impl = &abi_irn_ops;
- /* search for stack allocation nodes and record them */
- irg_walk_graph(env->birg->irg, collect_alloca_walker, NULL, env);
- obstack_ptr_grow(&env->obst, NULL);
- stack_allocs = obstack_finish(&env->obst);
-
- /* If there are stack allocations in the irg, we need a frame pointer */
- if(stack_allocs[0] != NULL)
- env->call->flags.bits.try_omit_fp = 0;
+ /* Lower all call nodes in the IRG. */
+ process_calls(env);
+ /* Process the IRG */
modify_irg(env);
+ /* reroute the stack origin of the calls to the true stack origin. */
+ exchange(dummy, env->init_sp);
+
/* Make some important node pointers survive the dead node elimination. */
survive_dce_register_irn(env->dce_survivor, &env->init_sp);
- for(ent = pmap_first(env->regs); ent; ent = pmap_next(env->regs))
- survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
-
- /* Fix the alloca-style allocations */
- for(i = 0; stack_allocs[i] != NULL; ++i)
- implement_stack_alloc(env, stack_allocs[i]);
-
- /* Lower all call nodes in the IRG. */
- irg_walk_graph(env->birg->irg, NULL, adjust_call_walker, env);
+ survive_dce_register_pmap(env->dce_survivor, env->regs);
arch_env_push_irn_handler(env->birg->main_env->arch_env, &env->irn_handler);
return env;
}
-static void collect_stack_nodes(ir_node *irn, void *data)
+void be_abi_free(be_abi_irg_t *env)
+{
+ free_survive_dce(env->dce_survivor);
+ del_pset(env->stack_phis);
+ pmap_destroy(env->regs);
+ obstack_free(&env->obst, NULL);
+ arch_env_pop_irn_handler(env->birg->main_env->arch_env);
+ free(env);
+}
+
+
+/*
+
+ _____ _ ____ _ _
+ | ___(_)_ __ / ___|| |_ __ _ ___| | __
+ | |_ | \ \/ / \___ \| __/ _` |/ __| |/ /
+ | _| | |> < ___) | || (_| | (__| <
+ |_| |_/_/\_\ |____/ \__\__,_|\___|_|\_\
+
+*/
+
+static void collect_stack_nodes_walker(ir_node *irn, void *data)
{
pset *s = data;
- switch(be_get_irn_opcode(irn)) {
- case beo_IncSP:
- case beo_AddSP:
- case beo_SetSP:
+ if((is_Proj(irn) && be_is_Alloca(get_Proj_pred(irn)) && get_Proj_proj(irn) == pn_Alloc_res)
+ || be_is_IncSP(irn)
+ || be_is_SetSP(irn))
pset_insert_ptr(s, irn);
- default:
- break;
- }
}
void be_abi_fix_stack_nodes(be_abi_irg_t *env)
{
dom_front_info_t *df;
- pset *stack_ops;
+ pset *stack_nodes;
/* We need dominance frontiers for fix up */
df = be_compute_dominance_frontiers(env->birg->irg);
-
- stack_ops = pset_new_ptr_default();
- pset_insert_ptr(stack_ops, env->init_sp);
- irg_walk_graph(env->birg->irg, collect_stack_nodes, NULL, stack_ops);
- be_ssa_constr_set_phis(df, stack_ops, env->stack_phis);
- del_pset(stack_ops);
+ stack_nodes = pset_new_ptr(16);
+ pset_insert_ptr(stack_nodes, env->init_sp);
+ irg_walk_graph(env->birg->irg, collect_stack_nodes_walker, NULL, stack_nodes);
+ be_ssa_constr_set_phis(df, stack_nodes, env->stack_phis);
+ del_pset(stack_nodes);
/* free these dominance frontiers */
be_free_dominance_frontiers(df);
struct bias_walk bw;
stack_frame_compute_initial_offset(env->frame);
- stack_frame_dump(stdout, env->frame);
+ // stack_frame_dump(stdout, env->frame);
/* Determine the stack bias at the and of the start block. */
bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
}
-void be_abi_free(be_abi_irg_t *env)
-{
- free_survive_dce(env->dce_survivor);
- del_pset(env->stack_phis);
- pmap_destroy(env->regs);
- obstack_free(&env->obst, NULL);
- arch_env_pop_irn_handler(env->birg->main_env->arch_env);
- free(env);
-}
-
ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
{
assert(arch_register_type_is(reg, callee_save));
fixed on the SP register of the ISA.
*/
-static const arch_irn_ops_t *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
+static const void *abi_get_irn_ops(const arch_irn_handler_t *handler, const ir_node *irn)
{
const be_abi_irg_t *abi = get_abi_from_handler(handler);
return is_Phi(irn) && pset_find_ptr(abi->stack_phis, (void *) irn) != NULL ? &abi->irn_ops : NULL;
}
-static void be_abi_limited(bitset_t *bs, void *data)
+static void be_abi_limited(void *data, bitset_t *bs)
{
be_abi_irg_t *abi = data;
bitset_clear_all(bs);