+ ir_node *block;
+ ir_graph *irg;
+ ir_node *subsp, *mem, *res, *size, *sync;
+ ir_type *type;
+ ir_node *in[2];
+ ir_mode *sp_mode;
+ unsigned stack_alignment;
+ dbg_info *dbg;
+
+ if (get_Free_where(free) != stack_alloc) {
+ assert(0);
+ return free;
+ }
+
+ block = get_nodes_block(free);
+ irg = get_irn_irg(block);
+ type = get_Free_type(free);
+ sp_mode = env->isa->sp->reg_class->mode;
+ dbg = get_irn_dbg_info(free);
+
+ /* we might need to multiply the size with the element size */
+ if(type != get_unknown_type() && get_type_size_bytes(type) != 1) {
+ tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
+ ir_node *cnst = new_rd_Const(dbg, irg, block, mode_Iu, tv);
+ ir_node *mul = new_rd_Mul(dbg, irg, block, get_Free_size(free),
+ cnst, mode_Iu);
+ size = mul;
+ } else {
+ size = get_Free_size(free);
+ }
+
+ /* FIXME: size must be here round up for the stack alignment, but
+ this must be transmitted from the backend. */
+ stack_alignment = 4;
+ size = adjust_alloc_size(stack_alignment, size, irg, block, dbg);
+
+ /* The stack pointer will be modified in an unknown manner.
+ We cannot omit it. */
+ env->call->flags.bits.try_omit_fp = 0;
+ subsp = be_new_SubSP(env->isa->sp, irg, block, curr_sp, size);
+ set_irn_dbg_info(subsp, dbg);
+
+ mem = new_r_Proj(irg, block, subsp, mode_M, pn_be_SubSP_M);
+ res = new_r_Proj(irg, block, subsp, sp_mode, pn_be_SubSP_sp);
+
+ /* we need to sync the memory */
+ in[0] = get_Free_mem(free);
+ in[1] = mem;
+ sync = new_r_Sync(irg, block, 2, in);
+
+ /* and make the AddSP dependent on the former memory */
+ add_irn_dep(subsp, get_Free_mem(free));
+
+ /* kill the free */
+ exchange(free, sync);
+ curr_sp = res;
+
+ return curr_sp;
+} /* adjust_free */
+
+/* the following function is replaced by the usage of the heights module */
+#if 0
+/**
+ * Walker for dependent_on().
+ * This function searches a node tgt recursively from a given node
+ * but is restricted to the given block.
+ * @return 1 if tgt was reachable from curr, 0 if not.
+ */
+static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl)
+{
+ int n, i;
+
+ if (get_nodes_block(curr) != bl)
+ return 0;
+
+ if (curr == tgt)
+ return 1;
+
+ /* Phi functions stop the recursion inside a basic block */
+ if (! is_Phi(curr)) {
+ for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
+ if (check_dependence(get_irn_n(curr, i), tgt, bl))
+ return 1;
+ }
+ }
+
+ return 0;
+}
+#endif /* if 0 */
+
+/**
+ * Check if a node is somehow data dependent on another one.
+ * both nodes must be in the same basic block.
+ * @param n1 The first node.
+ * @param n2 The second node.
+ * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
+ */
+static int dependent_on(ir_node *n1, ir_node *n2)
+{
+ assert(get_nodes_block(n1) == get_nodes_block(n2));
+
+ return heights_reachable_in_block(ir_heights, n1, n2);
+}
+
+static int cmp_call_dependency(const void *c1, const void *c2)
+{
+ ir_node *n1 = *(ir_node **) c1;
+ ir_node *n2 = *(ir_node **) c2;
+
+ /*
+ Classical qsort() comparison function behavior:
+ 0 if both elements are equal
+ 1 if second is "smaller" that first
+ -1 if first is "smaller" that second
+ */
+ if (dependent_on(n1, n2))
+ return -1;
+
+ if (dependent_on(n2, n1))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Walker: links all Call/alloc/Free nodes to the Block they are contained.
+ */
+static void link_calls_in_block_walker(ir_node *irn, void *data)
+{
+ ir_opcode code = get_irn_opcode(irn);
+
+ if (code == iro_Call ||
+ (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
+ (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
+ be_abi_irg_t *env = data;
+ ir_node *bl = get_nodes_block(irn);
+ void *save = get_irn_link(bl);
+
+ if (code == iro_Call)
+ env->call->flags.bits.irg_is_leaf = 0;
+
+ set_irn_link(irn, save);
+ set_irn_link(bl, irn);
+ }
+}
+
+/**
+ * Block-walker:
+ * Process all Call nodes inside a basic block.
+ * Note that the link field of the block must contain a linked list of all
+ * Call nodes inside the Block. We first order this list according to data dependency
+ * and that connect the calls together.
+ */
+static void process_calls_in_block(ir_node *bl, void *data)
+{
+ be_abi_irg_t *env = data;
+ ir_node *curr_sp = env->init_sp;
+ ir_node *irn;
+ int n;
+
+ for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
+ obstack_ptr_grow(&env->obst, irn);
+
+ /* If there were call nodes in the block. */
+ if(n > 0) {
+ ir_node *keep;
+ ir_node **nodes;
+ int i;
+
+ nodes = obstack_finish(&env->obst);
+
+ /* order the call nodes according to data dependency */
+ qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependency);
+
+ for(i = n - 1; i >= 0; --i) {
+ ir_node *irn = nodes[i];
+
+ DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
+ switch(get_irn_opcode(irn)) {
+ case iro_Call:
+ curr_sp = adjust_call(env, irn, curr_sp);
+ break;
+ case iro_Alloc:
+ curr_sp = adjust_alloc(env, irn, curr_sp);
+ break;
+ case iro_Free:
+ curr_sp = adjust_free(env, irn, curr_sp);
+ break;
+ default:
+ panic("invalid call");
+ break;
+ }
+ }
+
+ obstack_free(&env->obst, nodes);
+
+ /* Keep the last stack state in the block by tying it to Keep node */
+ if(curr_sp != env->init_sp) {
+ nodes[0] = curr_sp;
+ keep = be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl),
+ bl, 1, nodes);
+ pmap_insert(env->keep_map, bl, keep);
+ }
+ }
+
+ set_irn_link(bl, curr_sp);
+} /* process_calls_in_block */
+
+/**
+ * Adjust all call nodes in the graph to the ABI conventions.
+ */
+static void process_calls(be_abi_irg_t *env)
+{
+ ir_graph *irg = env->birg->irg;
+
+ env->call->flags.bits.irg_is_leaf = 1;
+ irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
+
+ ir_heights = heights_new(env->birg->irg);
+ irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
+ heights_free(ir_heights);
+}
+
+/**
+ * Computes the stack argument layout type.
+ * Changes a possibly allocated value param type by moving
+ * entities to the stack layout type.
+ *
+ * @param env the ABI environment
+ * @param call the current call ABI
+ * @param method_type the method type
+ * @param param_map an array mapping method arguments to the stack layout type
+ *
+ * @return the stack argument layout type
+ */
+static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type, ir_entity ***param_map)
+{
+ int dir = env->call->flags.bits.left_to_right ? 1 : -1;
+ int inc = env->birg->main_env->arch_env->isa->stack_dir * dir;
+ int n = get_method_n_params(method_type);
+ int curr = inc > 0 ? 0 : n - 1;
+ int ofs = 0;
+
+ char buf[128];
+ ir_type *res;
+ int i;
+ ir_type *val_param_tp = get_method_value_param_type(method_type);
+ ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
+ ir_entity **map;
+
+ *param_map = map = obstack_alloc(&env->obst, n * sizeof(ir_entity *));
+ res = new_type_struct(mangle_u(id, new_id_from_chars("arg_type", 8)));
+ for (i = 0; i < n; ++i, curr += inc) {
+ ir_type *param_type = get_method_param_type(method_type, curr);
+ be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
+
+ map[i] = NULL;
+ if (arg->on_stack) {
+ if (val_param_tp) {
+ /* the entity was already created, move it to the param type */
+ arg->stack_ent = get_method_value_param_ent(method_type, i);
+ remove_struct_member(val_param_tp, arg->stack_ent);
+ set_entity_owner(arg->stack_ent, res);
+ add_struct_member(res, arg->stack_ent);
+ /* must be automatic to set a fixed layout */
+ set_entity_allocation(arg->stack_ent, allocation_automatic);
+ }
+ else {
+ snprintf(buf, sizeof(buf), "param_%d", i);
+ arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
+ }
+ ofs += arg->space_before;
+ ofs = round_up2(ofs, arg->alignment);
+ set_entity_offset(arg->stack_ent, ofs);
+ ofs += arg->space_after;
+ ofs += get_type_size_bytes(param_type);
+ map[i] = arg->stack_ent;
+ }
+ }
+ set_type_size_bytes(res, ofs);
+ set_type_state(res, layout_fixed);
+ return res;
+}
+
+#if 0
+static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
+{
+ int i, j, n;
+ struct obstack obst;
+
+ obstack_init(&obst);
+
+ /* Create a Perm after the RegParams node to delimit it. */
+ for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
+ const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
+ ir_node *perm;
+ ir_node **in;
+ int n_regs;
+
+ for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
+ const arch_register_t *reg = &cls->regs[j];
+ ir_node *irn = pmap_get(regs, (void *) reg);
+
+ if(irn && !arch_register_type_is(reg, ignore)) {
+ n_regs++;
+ obstack_ptr_grow(&obst, irn);
+ set_irn_link(irn, (void *) reg);
+ }
+ }
+
+ obstack_ptr_grow(&obst, NULL);
+ in = obstack_finish(&obst);
+ if(n_regs > 0) {
+ perm = be_new_Perm(cls, irg, bl, n_regs, in);
+ for(j = 0; j < n_regs; ++j) {
+ ir_node *arg = in[j];
+ arch_register_t *reg = get_irn_link(arg);
+ pmap_insert(regs, reg, arg);
+ be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
+ }
+ }
+ obstack_free(&obst, in);
+ }
+
+ obstack_free(&obst, NULL);
+}
+#endif
+
+typedef struct {
+ const arch_register_t *reg;
+ ir_node *irn;
+} reg_node_map_t;
+
+static int cmp_regs(const void *a, const void *b)
+{
+ const reg_node_map_t *p = a;
+ const reg_node_map_t *q = b;
+
+ if(p->reg->reg_class == q->reg->reg_class)
+ return p->reg->index - q->reg->index;
+ else
+ return p->reg->reg_class - q->reg->reg_class;
+}
+
+static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
+{
+ pmap_entry *ent;
+ int n = pmap_count(reg_map);
+ int i = 0;
+ reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
+
+ pmap_foreach(reg_map, ent) {
+ res[i].reg = ent->key;
+ res[i].irn = ent->value;
+ i++;
+ }
+
+ qsort(res, n, sizeof(res[0]), cmp_regs);
+ return res;
+}
+
+/**
+ * Creates a barrier.
+ */
+static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
+{
+ ir_graph *irg = env->birg->irg;
+ int n_regs = pmap_count(regs);
+ int n;
+ ir_node *irn;
+ ir_node **in;
+ reg_node_map_t *rm;
+
+ rm = reg_map_to_arr(&env->obst, regs);
+
+ for(n = 0; n < n_regs; ++n)
+ obstack_ptr_grow(&env->obst, rm[n].irn);
+
+ if(mem) {
+ obstack_ptr_grow(&env->obst, *mem);
+ n++;
+ }
+
+ in = (ir_node **) obstack_finish(&env->obst);
+ irn = be_new_Barrier(irg, bl, n, in);
+ obstack_free(&env->obst, in);
+
+ for(n = 0; n < n_regs; ++n) {
+ const arch_register_t *reg = rm[n].reg;
+ int flags = 0;
+ int pos = BE_OUT_POS(n);
+ ir_node *proj;
+
+ proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
+ be_node_set_reg_class(irn, n, reg->reg_class);
+ if(in_req)
+ be_set_constr_single_reg(irn, n, reg);
+ be_set_constr_single_reg(irn, pos, reg);
+ be_node_set_reg_class(irn, pos, reg->reg_class);
+ arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
+
+ /* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */
+ if(arch_register_type_is(reg, ignore) || arch_irn_is(env->birg->main_env->arch_env, in[n], ignore))
+ flags |= arch_irn_flags_ignore;
+
+ if(arch_irn_is(env->birg->main_env->arch_env, in[n], modify_sp))
+ flags |= arch_irn_flags_modify_sp;
+
+ be_node_set_flags(irn, pos, flags);
+
+ pmap_insert(regs, (void *) reg, proj);
+ }
+
+ if(mem) {
+ *mem = new_r_Proj(irg, bl, irn, mode_M, n);
+ }
+
+ obstack_free(&env->obst, rm);
+ return irn;
+}
+
+/**
+ * Creates a be_Return for a Return node.
+ *
+ * @param @env the abi environment
+ * @param irn the Return node or NULL if there was none
+ * @param bl the block where the be_Retun should be placed
+ * @param mem the current memory
+ * @param n_res number of return results
+ */
+static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, ir_node *mem, int n_res) {
+ be_abi_call_t *call = env->call;