+
+ return curr_sp;
+}
+
+/**
+ * Adjust an alloca.
+ * The alloca is transformed into a back end alloca node and connected to the stack nodes.
+ */
+static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp, ir_node **result_copy)
+{
+ if (get_Alloc_where(alloc) == stack_alloc) {
+ ir_node *bl = get_nodes_block(alloc);
+ ir_graph *irg = get_irn_irg(bl);
+ ir_node *alloc_mem = NULL;
+ ir_node *alloc_res = NULL;
+
+ const ir_edge_t *edge;
+ ir_node *new_alloc;
+ ir_node *addr;
+ ir_node *copy;
+ ir_node *ins[2];
+
+ foreach_out_edge(alloc, edge) {
+ ir_node *irn = get_edge_src_irn(edge);
+
+ assert(is_Proj(irn));
+ switch(get_Proj_proj(irn)) {
+ case pn_Alloc_M:
+ alloc_mem = irn;
+ break;
+ case pn_Alloc_res:
+ alloc_res = irn;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Beware: currently Alloc nodes without a result might happen,
+ only escape analysis kills them and this phase runs only for object
+ oriented source. We kill the Alloc here. */
+ if (alloc_res == NULL && alloc_mem) {
+ exchange(alloc_mem, get_Alloc_mem(alloc));
+ return curr_sp;
+ }
+
+ /* The stack pointer will be modified in an unknown manner.
+ We cannot omit it. */
+ env->call->flags.bits.try_omit_fp = 0;
+ new_alloc = be_new_AddSP(env->isa->sp, irg, bl, curr_sp, get_Alloc_size(alloc));
+
+ if(alloc_mem != NULL) {
+ ir_node *addsp_mem;
+ ir_node *sync;
+
+ addsp_mem = new_r_Proj(irg, bl, new_alloc, mode_M, pn_be_AddSP_M);
+
+ // We need to sync the output mem of the AddSP with the input mem
+ // edge into the alloc node
+ ins[0] = get_Alloc_mem(alloc);
+ ins[1] = addsp_mem;
+ sync = new_r_Sync(irg, bl, 2, ins);
+
+ exchange(alloc_mem, sync);
+ }
+
+ exchange(alloc, new_alloc);
+
+ /* fix projnum of alloca res */
+ set_Proj_proj(alloc_res, pn_be_AddSP_res);
+
+ addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp;
+
+ /* copy the address away, since it could be used after further stack pointer modifications. */
+ /* Let it point curr_sp just for the moment, I'll reroute it in a second. */
+ *result_copy = copy = be_new_Copy(env->isa->sp->reg_class, irg, bl, curr_sp);
+
+ /* Let all users of the Alloc() result now point to the copy. */
+ edges_reroute(alloc_res, copy, irg);
+
+ /* Rewire the copy appropriately. */
+ set_irn_n(copy, be_pos_Copy_op, addr);
+
+ curr_sp = alloc_res;
+ }
+ return curr_sp;
+} /* adjust_alloc */
+
+/**
+ * Adjust a Free.
+ * The Free is transformed into a back end free node and connected to the stack nodes.
+ */
+static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
+{
+ if (get_Free_where(free) == stack_alloc) {
+ ir_node *bl = get_nodes_block(free);
+ ir_graph *irg = get_irn_irg(bl);
+ ir_node *addsp, *mem, *res;
+
+ /* The stack pointer will be modified in an unknown manner.
+ We cannot omit it. */
+ env->call->flags.bits.try_omit_fp = 0;
+ addsp = be_new_SubSP(env->isa->sp, irg, bl, curr_sp, get_Free_size(free));
+
+ mem = new_r_Proj(irg, bl, addsp, mode_M, pn_be_SubSP_M);
+ res = new_r_Proj(irg, bl, addsp, mode_P_data, pn_be_SubSP_res);
+
+ exchange(free, mem);
+ curr_sp = res;
+ }
+ return curr_sp;
+} /* adjust_free */
+
+/* the following function is replaced by the usage of the heights module */
+#if 0
+/**
+ * Walker for dependent_on().
+ * This function searches a node tgt recursively from a given node
+ * but is restricted to the given block.
+ * @return 1 if tgt was reachable from curr, 0 if not.
+ */
+static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl)
+{
+ int n, i;
+
+ if (get_nodes_block(curr) != bl)
+ return 0;
+
+ if (curr == tgt)
+ return 1;
+
+ /* Phi functions stop the recursion inside a basic block */
+ if (! is_Phi(curr)) {
+ for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
+ if (check_dependence(get_irn_n(curr, i), tgt, bl))
+ return 1;
+ }
+ }
+
+ return 0;
+}
+#endif /* if 0 */
+
+/**
+ * Check if a node is somehow data dependent on another one.
+ * both nodes must be in the same basic block.
+ * @param n1 The first node.
+ * @param n2 The second node.
+ * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
+ */
+static int dependent_on(ir_node *n1, ir_node *n2)
+{
+ ir_node *bl = get_nodes_block(n1);
+
+ assert(bl == get_nodes_block(n2));
+
+ return heights_reachable_in_block(ir_heights, n1, n2);
+ //return check_dependence(n1, n2, bl);
+}
+
+static int cmp_call_dependecy(const void *c1, const void *c2)
+{
+ ir_node *n1 = *(ir_node **) c1;
+ ir_node *n2 = *(ir_node **) c2;
+
+ /*
+ Classical qsort() comparison function behavior:
+ 0 if both elements are equal
+ 1 if second is "smaller" that first
+ -1 if first is "smaller" that second
+ */
+ if (dependent_on(n1, n2))
+ return -1;
+
+ if (dependent_on(n2, n1))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Walker: links all Call/alloc/Free nodes to the Block they are contained.
+ */
+static void link_calls_in_block_walker(ir_node *irn, void *data)
+{
+ opcode code = get_irn_opcode(irn);
+
+ if (code == iro_Call ||
+ (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
+ (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
+ be_abi_irg_t *env = data;
+ ir_node *bl = get_nodes_block(irn);
+ void *save = get_irn_link(bl);
+
+ if (code == iro_Call)
+ env->call->flags.bits.irg_is_leaf = 0;
+
+ set_irn_link(irn, save);
+ set_irn_link(bl, irn);
+ }
+}
+
+/**
+ * Block-walker:
+ * Process all Call nodes inside a basic block.
+ * Note that the link field of the block must contain a linked list of all
+ * Call nodes inside the Block. We first order this list according to data dependency
+ * and that connect the calls together.
+ */
+static void process_calls_in_block(ir_node *bl, void *data)
+{
+ be_abi_irg_t *env = data;
+ ir_node *curr_sp = env->init_sp;
+ ir_node *irn;
+ int n;
+
+ for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
+ obstack_ptr_grow(&env->obst, irn);
+
+ /* If there were call nodes in the block. */
+ if(n > 0) {
+ ir_node *keep;
+ ir_node **nodes;
+ ir_node *copy = NULL;
+ int i;
+
+ nodes = obstack_finish(&env->obst);
+
+ /* order the call nodes according to data dependency */
+ qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
+
+ for(i = n - 1; i >= 0; --i) {
+ ir_node *irn = nodes[i];
+
+ DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
+ switch(get_irn_opcode(irn)) {
+ case iro_Call:
+ curr_sp = adjust_call(env, irn, curr_sp, copy);
+ break;
+ case iro_Alloc:
+ curr_sp = adjust_alloc(env, irn, curr_sp, ©);
+ break;
+ case iro_Free:
+ curr_sp = adjust_free(env, irn, curr_sp);
+ break;
+ default:
+ break;
+ }
+ }
+
+ obstack_free(&env->obst, nodes);
+
+ /* Keep the last stack state in the block by tying it to Keep node */
+ nodes[0] = curr_sp;
+ keep = be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
+ pmap_insert(env->keep_map, bl, keep);
+ }
+
+ set_irn_link(bl, curr_sp);
+} /* process_calls_in_block */
+
+/**
+ * Adjust all call nodes in the graph to the ABI conventions.
+ */
+static void process_calls(be_abi_irg_t *env)
+{
+ ir_graph *irg = env->birg->irg;
+
+ env->call->flags.bits.irg_is_leaf = 1;
+ irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
+
+ ir_heights = heights_new(env->birg->irg);
+ irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
+ heights_free(ir_heights);