+
+ return curr_sp;
+}
+
+/**
+ * Adjust an alloca.
+ * The alloca is transformed into a back end alloca node and connected to the stack nodes.
+ */
+static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
+{
+ if (get_Alloc_where(alloc) == stack_alloc) {
+ ir_node *bl = get_nodes_block(alloc);
+ ir_graph *irg = get_irn_irg(bl);
+ ir_node *alloc_mem = NULL;
+ ir_node *alloc_res = NULL;
+
+ const ir_edge_t *edge;
+ ir_node *new_alloc;
+ ir_node *addr;
+ ir_node *copy;
+
+ foreach_out_edge(alloc, edge) {
+ ir_node *irn = get_edge_src_irn(edge);
+
+ assert(is_Proj(irn));
+ switch(get_Proj_proj(irn)) {
+ case pn_Alloc_M:
+ alloc_mem = irn;
+ break;
+ case pn_Alloc_res:
+ alloc_res = irn;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Beware: currently Alloc nodes without a result might happen,
+ only escape analysis kills them and this phase runs only for object
+ oriented source. We kill the Alloc here. */
+ if (alloc_res == NULL && alloc_mem) {
+ exchange(alloc_mem, get_Alloc_mem(alloc));
+ return curr_sp;
+ }
+
+ /* The stack pointer will be modified in an unknown manner.
+ We cannot omit it. */
+ env->call->flags.bits.try_omit_fp = 0;
+ new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
+
+ exchange(alloc, new_alloc);
+
+ if(alloc_mem != NULL)
+ set_Proj_proj(alloc_mem, pn_be_AddSP_M);
+
+ /* fix projnum of alloca res */
+ set_Proj_proj(alloc_res, pn_be_AddSP_res);
+
+ addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp;
+
+ /* copy the address away, since it could be used after further stack pointer modifictions. */
+ /* Let it point curr_sp just for the moment, I'll reroute it in a second. */
+ copy = be_new_Copy(env->isa->sp->reg_class, irg, bl, curr_sp);
+
+ /* Let all users of the Alloc() result now point to the copy. */
+ edges_reroute(alloc_res, copy, irg);
+
+ /* Rewire the copy appropriately. */
+ set_irn_n(copy, be_pos_Copy_op, addr);
+
+ curr_sp = alloc_res;
+ }
+
+ return curr_sp;
+}
+
+/* the following function is replaced by the usage of the heights module */
+#if 0
+/**
+ * Walker for dependent_on().
+ * This function searches a node tgt recursively from a given node
+ * but is restricted to the given block.
+ * @return 1 if tgt was reachable from curr, 0 if not.
+ */
+static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl)
+{
+ int n, i;
+
+ if (get_nodes_block(curr) != bl)
+ return 0;
+
+ if (curr == tgt)
+ return 1;
+
+ /* Phi functions stop the recursion inside a basic block */
+ if (! is_Phi(curr)) {
+ for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
+ if (check_dependence(get_irn_n(curr, i), tgt, bl))
+ return 1;
+ }
+ }
+
+ return 0;
+}
+#endif /* if 0 */
+
+/**
+ * Check if a node is somehow data dependent on another one.
+ * both nodes must be in the same basic block.
+ * @param n1 The first node.
+ * @param n2 The second node.
+ * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
+ */
+static int dependent_on(ir_node *n1, ir_node *n2)
+{
+ ir_node *bl = get_nodes_block(n1);
+
+ assert(bl == get_nodes_block(n2));
+
+ return heights_reachable_in_block(ir_heights, n1, n2);
+ //return check_dependence(n1, n2, bl);
+}
+
+static int cmp_call_dependecy(const void *c1, const void *c2)
+{
+ ir_node *n1 = *(ir_node **) c1;
+ ir_node *n2 = *(ir_node **) c2;
+
+ /*
+ Classical qsort() comparison function behavior:
+ 0 if both elements are equal
+ 1 if second is "smaller" that first
+ -1 if first is "smaller" that second
+ */
+ if (dependent_on(n1, n2))
+ return -1;
+
+ if (dependent_on(n2, n1))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Walker: links all Call nodes to the Block they are contained.
+ */
+static void link_calls_in_block_walker(ir_node *irn, void *data)
+{
+ if(is_Call(irn) || (get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)) {
+ be_abi_irg_t *env = data;
+ ir_node *bl = get_nodes_block(irn);
+ void *save = get_irn_link(bl);
+
+ if (is_Call(irn))
+ env->call->flags.bits.irg_is_leaf = 0;
+
+ set_irn_link(irn, save);
+ set_irn_link(bl, irn);
+ }
+}
+
+/**
+ * Block-walker:
+ * Process all Call nodes inside a basic block.
+ * Note that the link field of the block must contain a linked list of all
+ * Call nodes inside the Block. We first order this list according to data dependency
+ * and that connect the calls together.
+ */
+static void process_calls_in_block(ir_node *bl, void *data)
+{
+ be_abi_irg_t *env = data;
+ ir_node *curr_sp = env->init_sp;
+ ir_node *irn;
+ int n;
+
+ for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
+ obstack_ptr_grow(&env->obst, irn);
+
+ /* If there were call nodes in the block. */
+ if(n > 0) {
+ ir_node *keep;
+ ir_node **nodes;
+ int i;
+
+ nodes = obstack_finish(&env->obst);
+
+ /* order the call nodes according to data dependency */
+ qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
+
+ for(i = n - 1; i >= 0; --i) {
+ ir_node *irn = nodes[i];
+
+ DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
+ switch(get_irn_opcode(irn)) {
+ case iro_Call:
+ curr_sp = adjust_call(env, irn, curr_sp);
+ break;
+ case iro_Alloc:
+ curr_sp = adjust_alloc(env, irn, curr_sp);
+ break;
+ default:
+ break;
+ }
+ }
+
+ obstack_free(&env->obst, nodes);
+
+ /* Keep the last stack state in the block by tying it to Keep node */
+ nodes[0] = curr_sp;
+ keep = be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
+ pmap_insert(env->keep_map, bl, keep);
+ }
+
+ set_irn_link(bl, curr_sp);
+}
+
+/**
+ * Adjust all call nodes in the graph to the ABI conventions.
+ */
+static void process_calls(be_abi_irg_t *env)
+{
+ ir_graph *irg = env->birg->irg;
+
+ env->call->flags.bits.irg_is_leaf = 1;
+ irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
+
+ ir_heights = heights_new(env->birg->irg);
+ irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
+ heights_free(ir_heights);
+}
+
+static void collect_return_walker(ir_node *irn, void *data)
+{
+ if(get_irn_opcode(irn) == iro_Return) {
+ struct obstack *obst = data;
+ obstack_ptr_grow(obst, irn);
+ }
+}
+
+#if 0 /*
+static ir_node *setup_frame(be_abi_irg_t *env)
+{
+ const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
+ const arch_register_t *sp = isa->sp;
+ const arch_register_t *bp = isa->bp;
+ be_abi_call_flags_bits_t flags = env->call->flags.bits;
+ ir_graph *irg = env->birg->irg;
+ ir_node *bl = get_irg_start_block(irg);
+ ir_node *no_mem = get_irg_no_mem(irg);
+ ir_node *old_frame = get_irg_frame(irg);
+ ir_node *stack = pmap_get(env->regs, (void *) sp);
+ ir_node *frame = pmap_get(env->regs, (void *) bp);
+
+ int stack_nr = get_Proj_proj(stack);
+
+ if(flags.try_omit_fp) {
+ stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
+ frame = stack;
+ }
+
+ else {
+ frame = be_new_Copy(bp->reg_class, irg, bl, stack);
+
+ be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
+ if(!flags.fp_free) {
+ be_set_constr_single_reg(frame, -1, bp);
+ be_node_set_flags(frame, -1, arch_irn_flags_ignore);
+ arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
+ }
+
+ stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE, be_stack_dir_expand);
+ }
+
+ be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
+ env->init_sp = stack;
+ set_irg_frame(irg, frame);
+ edges_reroute(old_frame, frame, irg);
+
+ return frame;
+}
+
+static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
+{
+ const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
+ const arch_register_t *sp = isa->sp;
+ const arch_register_t *bp = isa->bp;
+ ir_graph *irg = env->birg->irg;
+ ir_node *ret_mem = get_Return_mem(ret);
+ ir_node *frame = get_irg_frame(irg);
+ ir_node *bl = get_nodes_block(ret);
+ ir_node *stack = get_irn_link(bl);
+
+ pmap_entry *ent;
+
+ if(env->call->flags.bits.try_omit_fp) {
+ stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, BE_STACK_FRAME_SIZE, be_stack_dir_shrink);
+ }
+
+ else {
+ stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
+ be_set_constr_single_reg(stack, -1, sp);
+ be_node_set_flags(stack, -1, arch_irn_flags_ignore);
+ }
+
+ pmap_foreach(env->regs, ent) {
+ const arch_register_t *reg = ent->key;
+ ir_node *irn = ent->value;
+
+ if(reg == sp)
+ obstack_ptr_grow(&env->obst, stack);
+ else if(reg == bp)
+ obstack_ptr_grow(&env->obst, frame);
+ else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
+ obstack_ptr_grow(obst, irn);
+ }
+}
+*/
+#endif
+
+/**
+ * Computes the stack argument layout type.
+ * Changes a possibly allocated value param type by moving
+ * entities to the stack layout type.
+ *
+ * @param env the ABI environment
+ * @param call the current call ABI
+ * @param method_type the method type
+ *
+ * @return the stack argument layout type
+ */
+static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type)
+{
+ int dir = env->call->flags.bits.left_to_right ? 1 : -1;
+ int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
+ int n = get_method_n_params(method_type);
+ int curr = inc > 0 ? 0 : n - 1;
+ int ofs = 0;
+
+ char buf[128];
+ ir_type *res;
+ int i;
+ ir_type *val_param_tp = get_method_value_param_type(method_type);
+ ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
+
+ res = new_type_struct(mangle_u(id, new_id_from_chars("arg_type", 8)));
+ for (i = 0; i < n; ++i, curr += inc) {
+ ir_type *param_type = get_method_param_type(method_type, curr);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
+
+ if (arg->on_stack) {
+ if (val_param_tp) {
+ /* the entity was already created, move it to the param type */
+ arg->stack_ent = get_method_value_param_ent(method_type, i);
+ remove_struct_member(val_param_tp, arg->stack_ent);
+ set_entity_owner(arg->stack_ent, res);
+ add_struct_member(res, arg->stack_ent);
+ /* must be automatic to set a fixed layout */
+ set_entity_allocation(arg->stack_ent, allocation_automatic);
+ }
+ else {
+ snprintf(buf, sizeof(buf), "param_%d", i);
+ arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
+ }
+ ofs += arg->space_before;
+ ofs = round_up2(ofs, arg->alignment);
+ set_entity_offset_bytes(arg->stack_ent, ofs);
+ ofs += arg->space_after;
+ ofs += get_type_size_bytes(param_type);
+ }
+ }
+ set_type_size_bytes(res, ofs);
+ set_type_state(res, layout_fixed);
+ return res;
+}
+
+static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
+{
+ int i, j, n;
+ struct obstack obst;
+
+ obstack_init(&obst);
+
+ /* Create a Perm after the RegParams node to delimit it. */
+ for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
+ const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
+ ir_node *perm;
+ ir_node **in;
+ int n_regs;
+
+ for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
+ const arch_register_t *reg = &cls->regs[j];
+ ir_node *irn = pmap_get(regs, (void *) reg);
+
+ if(irn && !arch_register_type_is(reg, ignore)) {
+ n_regs++;
+ obstack_ptr_grow(&obst, irn);
+ set_irn_link(irn, (void *) reg);
+ }
+ }
+
+ obstack_ptr_grow(&obst, NULL);
+ in = obstack_finish(&obst);
+ if(n_regs > 0) {
+ perm = be_new_Perm(cls, irg, bl, n_regs, in);
+ for(j = 0; j < n_regs; ++j) {
+ ir_node *arg = in[j];
+ arch_register_t *reg = get_irn_link(arg);
+ pmap_insert(regs, reg, arg);
+ be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
+ }
+ }
+ obstack_free(&obst, in);
+ }
+
+ obstack_free(&obst, NULL);
+}
+
+typedef struct {
+ const arch_register_t *reg;
+ ir_node *irn;
+} reg_node_map_t;
+
+static int cmp_regs(const void *a, const void *b)
+{
+ const reg_node_map_t *p = a;
+ const reg_node_map_t *q = b;
+
+ if(p->reg->reg_class == q->reg->reg_class)
+ return p->reg->index - q->reg->index;
+ else
+ return p->reg->reg_class - q->reg->reg_class;