- Split bearch.h correctly into bearch.h and bearch_t.h
[libfirm] / ir / be / beabi.c
index 886321f..0f62ac7 100644 (file)
@@ -1,14 +1,19 @@
 /**
  * ABI lowering.
  *
- *
- *
+ * @author Sebastian Hack
+ * @date   7.3.2005
+ * @cvsid  $Id$
  */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
 
-#include "firm_config.h"
 #include "obst.h"
+#include "offset.h"
 
 #include "type.h"
+#include "irgopt.h"
 
 #include "irgraph_t.h"
 #include "irnode_t.h"
 #include "iredges_t.h"
 #include "irgmod.h"
 #include "irgwalk.h"
+#include "irprintf_t.h"
+#include "irgopt.h"
+#include "irbitset.h"
+#include "height.h"
+#include "pdeq.h"
+#include "irtools.h"
+#include "raw_bitset.h"
 
 #include "be.h"
 #include "beabi.h"
-#include "bearch.h"
+#include "bearch_t.h"
 #include "benode_t.h"
+#include "belive_t.h"
 #include "besched_t.h"
-
-#define MAX(x, y) ((x) > (y) ? (x) : (y))
-#define MIN(x, y) ((x) < (y) ? (x) : (y))
+#include "beirg_t.h"
+#include "bessaconstr.h"
 
 typedef struct _be_abi_call_arg_t {
-       unsigned is_res : 1;
-       unsigned in_reg : 1;
+       unsigned is_res   : 1;  /**< 1: the call argument is a return value. 0: it's a call parameter. */
+       unsigned in_reg   : 1;  /**< 1: this argument is transmitted in registers. */
+       unsigned on_stack : 1;  /**< 1: this argument is transmitted on the stack. */
 
        int pos;
        const arch_register_t *reg;
+       ir_entity *stack_ent;
+       unsigned alignment;
+       unsigned space_before;
+       unsigned space_after;
 } be_abi_call_arg_t;
 
 struct _be_abi_call_t {
-       be_abi_call_flags_t flags;
-       unsigned arg_gap;
-       set *params;
+       be_abi_call_flags_t         flags;
+       const be_abi_callbacks_t    *cb;
+       ir_type                     *between_type;
+       set                         *params;
+       const arch_register_class_t *cls_addr;
 };
 
 struct _be_abi_irg_t {
-       struct obstack      obst;
-       be_irg_t            *birg;
-       be_abi_call_t       *call;
-       type                *method_type;
+       struct obstack       obst;
+       be_stack_layout_t    *frame;        /**< The stack frame model. */
+       be_irg_t             *birg;         /**< The back end IRG. */
+       const arch_isa_t     *isa;          /**< The isa. */
+       survive_dce_t        *dce_survivor;
 
-       ir_node             *init_sp;      /**< The node representing the stack pointer
+       be_abi_call_t        *call;         /**< The ABI call information. */
+       ir_type              *method_type;  /**< The type of the method of the IRG. */
+
+       ir_node              *init_sp;      /**< The node representing the stack pointer
                                                                             at the start of the function. */
 
-       ir_node             *reg_params;
+       ir_node              *start_barrier; /**< The barrier of the start block */
+
+       ir_node              *reg_params;   /**< The reg params node. */
+       pmap                 *regs;         /**< A map of all callee-save and ignore regs to
+                                                                                       their Projs to the RegParams node. */
+
+       int                  start_block_bias;  /**< The stack bias at the end of the start block. */
+
+       void                 *cb;           /**< ABI Callback self pointer. */
 
-       pset                *stack_ops;    /**< Contains all nodes modifying the stack pointer. */
-       pmap                *regs;
+       pmap                 *keep_map;     /**< mapping blocks to keep nodes. */
+       pset                 *ignore_regs;  /**< Additional registers which shall be ignored. */
 
-       int start_block_bias;
+       ir_node             **calls;        /**< flexible array containing all be_Call nodes */
 
-       unsigned omit_fp : 1;
-       unsigned dedicated_fp : 1;
-       unsigned left_to_right : 1;
+       arch_register_req_t sp_req;
+       arch_register_req_t sp_cls_req;
 
-       firm_dbg_module_t *dbg;            /**< The debugging module. */
+       DEBUG_ONLY(firm_dbg_module_t    *dbg;)          /**< The debugging module. */
 };
 
+static heights_t *ir_heights;
+
+/* Flag: if set, try to omit the frame pointer if called by the backend */
+static int be_omit_fp = 1;
+
+/*
+     _    ____ ___    ____      _ _ _                _
+    / \  | __ )_ _|  / ___|__ _| | | |__   __ _  ___| | _____
+   / _ \ |  _ \| |  | |   / _` | | | '_ \ / _` |/ __| |/ / __|
+  / ___ \| |_) | |  | |__| (_| | | | |_) | (_| | (__|   <\__ \
+ /_/   \_\____/___|  \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
+
+  These callbacks are used by the backend to set the parameters
+  for a specific call type.
+*/
+
+/**
+ * Set compare function: compares two ABI call object arguments.
+ */
 static int cmp_call_arg(const void *a, const void *b, size_t n)
 {
        const be_abi_call_arg_t *p = a, *q = b;
        return !(p->is_res == q->is_res && p->pos == q->pos);
 }
 
+/**
+ * Get or set an ABI call object argument.
+ *
+ * @param call      the abi call
+ * @param is_res    true for call results, false for call arguments
+ * @param pos       position of the argument
+ * @param do_insert true if the argument is set, false if it's retrieved
+ */
 static be_abi_call_arg_t *get_or_set_call_arg(be_abi_call_t *call, int is_res, int pos, int do_insert)
 {
        be_abi_call_arg_t arg;
        unsigned hash;
 
+       memset(&arg, 0, sizeof(arg));
        arg.is_res = is_res;
        arg.pos    = pos;
 
-       hash = is_res * 100 + pos;
+       hash = is_res * 128 + pos;
 
        return do_insert
                ? set_insert(call->params, &arg, sizeof(arg), hash)
                : set_find(call->params, &arg, sizeof(arg), hash);
 }
 
+/**
+ * Retrieve an ABI call object argument.
+ *
+ * @param call      the ABI call object
+ * @param is_res    true for call results, false for call arguments
+ * @param pos       position of the argument
+ */
 static INLINE be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos)
 {
        return get_or_set_call_arg(call, is_res, pos, 0);
 }
 
-void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, unsigned arg_gap)
+/* Set the flags for a call. */
+void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
+{
+       call->flags = flags;
+       call->cb    = cb;
+}
+
+
+/* Set register class for call address */
+void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
 {
-       call->flags   = flags;
-       call->arg_gap = arg_gap;
+       call->cls_addr = cls;
 }
 
-void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos)
+
+void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos, unsigned alignment, unsigned space_before, unsigned space_after)
 {
        be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
+       arg->on_stack     = 1;
+       arg->alignment    = alignment;
+       arg->space_before = space_before;
+       arg->space_after  = space_after;
+       assert(alignment > 0 && "Alignment must be greater than 0");
 }
 
 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
 {
        be_abi_call_arg_t *arg = get_or_set_call_arg(call, 0, arg_pos, 1);
+       arg->in_reg = 1;
        arg->reg = reg;
 }
 
 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg)
 {
        be_abi_call_arg_t *arg = get_or_set_call_arg(call, 1, arg_pos, 1);
+       arg->in_reg = 1;
        arg->reg = reg;
 }
 
-be_abi_call_t *be_abi_call_new(void)
+/* Get the flags of a ABI call object. */
+be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
+{
+       return call->flags;
+}
+
+/**
+ * Constructor for a new ABI call object.
+ *
+ * @return the new ABI call object
+ */
+static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
 {
-       be_abi_call_t *call = malloc(sizeof(call[0]));
-       call->flags  = BE_ABI_NONE;
-       call->params = new_set(cmp_call_arg, 16);
+       be_abi_call_t *call = xmalloc(sizeof(call[0]));
+
+       call->flags.val  = 0;
+       call->params     = new_set(cmp_call_arg, 16);
+       call->cb         = NULL;
+       call->cls_addr   = cls_addr;
+
+       call->flags.bits.try_omit_fp = be_omit_fp;
+
        return call;
 }
 
-void be_abi_call_free(be_abi_call_t *call)
+/**
+ * Destructor for an ABI call object.
+ */
+static void be_abi_call_free(be_abi_call_t *call)
 {
        del_set(call->params);
        free(call);
 }
 
+/*
+  _____                           _   _                 _ _ _
+ |  ___| __ __ _ _ __ ___   ___  | | | | __ _ _ __   __| | (_)_ __   __ _
+ | |_ | '__/ _` | '_ ` _ \ / _ \ | |_| |/ _` | '_ \ / _` | | | '_ \ / _` |
+ |  _|| | | (_| | | | | | |  __/ |  _  | (_| | | | | (_| | | | | | | (_| |
+ |_|  |_|  \__,_|_| |_| |_|\___| |_| |_|\__,_|_| |_|\__,_|_|_|_| |_|\__, |
+                                                                    |___/
+
+  Handling of the stack frame. It is composed of three types:
+  1) The type of the arguments which are pushed on the stack.
+  2) The "between type" which consists of stuff the call of the
+     function pushes on the stack (like the return address and
+        the old base pointer for ia32).
+  3) The Firm frame type which consists of all local variables
+     and the spills.
+*/
+
+static int get_stack_entity_offset(be_stack_layout_t *frame, ir_entity *ent, int bias)
+{
+       ir_type *t = get_entity_owner(ent);
+       int ofs    = get_entity_offset(ent);
+
+       int i, index;
+
+       /* Find the type the entity is contained in. */
+       for(index = 0; index < N_FRAME_TYPES; ++index) {
+               if(frame->order[index] == t)
+                       break;
+       }
+
+       /* Add the size of all the types below the one of the entity to the entity's offset */
+       for(i = 0; i < index; ++i)
+               ofs += get_type_size_bytes(frame->order[i]);
+
+       /* correct the offset by the initial position of the frame pointer */
+       ofs -= frame->initial_offset;
+
+       /* correct the offset with the current bias. */
+       ofs += bias;
+
+       return ofs;
+}
+
+/**
+ * Retrieve the entity with given offset from a frame type.
+ */
+static ir_entity *search_ent_with_offset(ir_type *t, int offset)
+{
+       int i, n;
+
+       for(i = 0, n = get_compound_n_members(t); i < n; ++i) {
+               ir_entity *ent = get_compound_member(t, i);
+               if(get_entity_offset(ent) == offset)
+                       return ent;
+       }
+
+       return NULL;
+}
+
+static int stack_frame_compute_initial_offset(be_stack_layout_t *frame)
+{
+       ir_type  *base = frame->stack_dir < 0 ? frame->between_type : frame->frame_type;
+       ir_entity *ent = search_ent_with_offset(base, 0);
+
+       frame->initial_offset = ent ? get_stack_entity_offset(frame, ent, 0) : 0;
+
+       return frame->initial_offset;
+}
+
+/**
+ * Initializes the frame layout from parts
+ *
+ * @param frame     the stack layout that will be initialized
+ * @param args      the stack argument layout type
+ * @param between   the between layout type
+ * @param locals    the method frame type
+ * @param stack_dir the stack direction
+ * @param param_map an array mapping method argument positions to the stack argument type
+ *
+ * @return the initialized stack layout
+ */
+static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
+                                           ir_type *between, ir_type *locals, int stack_dir,
+                                           ir_entity *param_map[])
+{
+       frame->arg_type       = args;
+       frame->between_type   = between;
+       frame->frame_type     = locals;
+       frame->initial_offset = 0;
+       frame->stack_dir      = stack_dir;
+       frame->order[1]       = between;
+       frame->param_map      = param_map;
+
+       if(stack_dir > 0) {
+               frame->order[0] = args;
+               frame->order[2] = locals;
+       }
+       else {
+               frame->order[0] = locals;
+               frame->order[2] = args;
+       }
+       return frame;
+}
+
+#if 0
+/** Dumps the stack layout to file. */
+static void stack_layout_dump(FILE *file, be_stack_layout_t *frame)
+{
+       int i, j, n;
+
+       ir_fprintf(file, "initial offset: %d\n", frame->initial_offset);
+       for (j = 0; j < N_FRAME_TYPES; ++j) {
+               ir_type *t = frame->order[j];
+
+               ir_fprintf(file, "type %d: %F size: %d\n", j, t, get_type_size_bytes(t));
+               for (i = 0, n = get_compound_n_members(t); i < n; ++i) {
+                       ir_entity *ent = get_compound_member(t, i);
+                       ir_fprintf(file, "\t%F int ofs: %d glob ofs: %d\n", ent, get_entity_offset_bytes(ent), get_stack_entity_offset(frame, ent, 0));
+               }
+       }
+}
+#endif
+
+/**
+ * Returns non-zero if the call argument at given position
+ * is transfered on the stack.
+ */
 static INLINE int is_on_stack(be_abi_call_t *call, int pos)
 {
        be_abi_call_arg_t *arg = get_call_arg(call, 0, pos);
        return arg && !arg->in_reg;
 }
 
-static void adjust_call(be_abi_irg_t *env, ir_node *irn)
+/*
+   ____      _ _
+  / ___|__ _| | |___
+ | |   / _` | | / __|
+ | |__| (_| | | \__ \
+  \____\__,_|_|_|___/
+
+  Adjustment of the calls inside a graph.
+
+*/
+
+/**
+ * Transform a call node.
+ * @param env The ABI environment for the current irg.
+ * @param irn The call node.
+ * @param curr_sp The stack pointer node to use.
+ * @return The stack pointer after the call.
+ */
+static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp, ir_node *alloca_copy)
 {
        ir_graph *irg             = env->birg->irg;
-       const arch_isa_t *isa     = env->birg->main_env->arch_env->isa;
-       be_abi_call_t *call       = be_abi_call_new();
+       const arch_env_t *arch_env = env->birg->main_env->arch_env;
+       const arch_isa_t *isa     = arch_env->isa;
        ir_type *mt               = get_Call_type(irn);
+       ir_node *call_ptr         = get_Call_ptr(irn);
        int n_params              = get_method_n_params(mt);
-       ir_node *curr_sp          = get_irg_frame(irg);
        ir_node *curr_mem         = get_Call_mem(irn);
        ir_node *bl               = get_nodes_block(irn);
        pset *results             = pset_new_ptr(8);
        pset *caller_save         = pset_new_ptr(8);
+       pset *states              = pset_new_ptr(2);
        int stack_size            = 0;
        int stack_dir             = arch_isa_stack_dir(isa);
        const arch_register_t *sp = arch_isa_sp(isa);
+       be_abi_call_t *call       = be_abi_call_new(sp->reg_class);
        ir_mode *mach_mode        = sp->reg_class->mode;
        struct obstack *obst      = &env->obst;
-       ir_node *no_mem           = get_irg_no_mem(irg);
+       int no_alloc              = call->flags.bits.frame_is_setup_on_call;
 
        ir_node *res_proj = NULL;
-       int curr_res_proj = -1;
+       int curr_res_proj = pn_Call_max;
        int n_low_args    = 0;
        int n_pos         = 0;
+       int count;
 
        ir_node *low_call;
        ir_node **in;
-       ir_node *sp_proj;
+       ir_node **res_projs;
+       const arch_register_t *reg;
        const ir_edge_t *edge;
        int *low_args;
        int *pos;
@@ -167,14 +417,17 @@ static void adjust_call(be_abi_irg_t *env, ir_node *irn)
        /* Let the isa fill out the abi description for that call node. */
        arch_isa_get_call_abi(isa, mt, call);
 
-       assert(get_method_variadicity(mt) == variadicity_non_variadic);
-
        /* Insert code to put the stack arguments on the stack. */
-       /* TODO: Vargargs */
-       for(i = 0, n = get_Call_n_params(irn); i < n; ++i) {
+       assert(get_Call_n_params(irn) == n_params);
+       for(i = 0; i < n_params; ++i) {
                be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
-               if(arg && !arg->in_reg) {
-                       stack_size += get_type_size_bytes(get_method_param_type(mt, i));
+               assert(arg);
+               if (arg->on_stack) {
+                       int arg_size = get_type_size_bytes(get_method_param_type(mt, i));
+
+                       stack_size += round_up2(arg->space_before, arg->alignment);
+                       stack_size += round_up2(arg_size, arg->alignment);
+                       stack_size += round_up2(arg->space_after, arg->alignment);
                        obstack_int_grow(obst, i);
                        n_pos++;
                }
@@ -191,14 +444,18 @@ static void adjust_call(be_abi_irg_t *env, ir_node *irn)
        }
        low_args = obstack_finish(obst);
 
-       /* If there are some parameters shich shall be passed on the stack. */
+       /* If there are some parameters which shall be passed on the stack. */
        if(n_pos > 0) {
                int curr_ofs      = 0;
-               int do_seq        = (call->flags & BE_ABI_USE_PUSH);
+               int do_seq        = call->flags.bits.store_args_sequential && !no_alloc;
 
-               /* Reverse list of stack parameters if call arguments are from left to right */
-               if(call->flags & BE_ABI_LEFT_TO_RIGHT) {
-                       for(i = 0; i < n_pos / 2; ++i) {
+               /*
+                * Reverse list of stack parameters if call arguments are from left to right.
+                * We must them reverse again if they are pushed (not stored) and the stack
+                * direction is downwards.
+                */
+               if (call->flags.bits.left_to_right ^ (do_seq && stack_dir < 0)) {
+                       for (i = 0; i < n_pos >> 1; ++i) {
                                int other  = n_pos - i - 1;
                                int tmp    = pos[i];
                                pos[i]     = pos[other];
@@ -208,71 +465,97 @@ static void adjust_call(be_abi_irg_t *env, ir_node *irn)
 
                /*
                 * If the stack is decreasing and we do not want to store sequentially,
+                * or someone else allocated the call frame
                 * we allocate as much space on the stack all parameters need, by
                 * moving the stack pointer along the stack's direction.
                 */
-               if(stack_dir < 0 && !do_seq) {
-                       curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, stack_size, be_stack_dir_along);
-                       pset_insert_ptr(env->stack_ops, curr_sp);
+               if(stack_dir < 0 && !do_seq && !no_alloc) {
+                       curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, stack_size);
+                       if(alloca_copy) {
+                               add_irn_dep(curr_sp, alloca_copy);
+                               alloca_copy = NULL;
+                       }
+               }
+
+               if(!do_seq) {
+                       obstack_ptr_grow(obst, get_Call_mem(irn));
+                       curr_mem = new_NoMem();
+               } else {
+                       curr_mem = get_Call_mem(irn);
                }
 
-               assert(mode_is_reference(mach_mode) && "machine mode must be pointer");
                for(i = 0; i < n_pos; ++i) {
-                       int p            = pos[i];
-                       ir_node *param   = get_Call_param(irn, p);
-                       ir_node *addr    = curr_sp;
-                       ir_node *mem     = NULL;
-                       type *param_type = get_method_param_type(mt, p);
-                       int param_size   = get_type_size_bytes(param_type);
-
-                       /* Make the expression to compute the argument's offset. */
-                       if(curr_ofs > 0) {
-                               addr = new_r_Const_long(irg, bl, mode_Is, curr_ofs);
-                               addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
+                       int p                  = pos[i];
+                       be_abi_call_arg_t *arg = get_call_arg(call, 0, p);
+                       ir_node *param         = get_Call_param(irn, p);
+                       ir_node *addr          = curr_sp;
+                       ir_node *mem           = NULL;
+                       ir_type *param_type    = get_method_param_type(mt, p);
+                       int param_size         = get_type_size_bytes(param_type) + arg->space_after;
+
+                       /*
+                        * If we wanted to build the arguments sequentially,
+                        * the stack pointer for the next must be incremented,
+                        * and the memory value propagated.
+                        */
+                       if (do_seq) {
+                               curr_ofs = 0;
+                               addr = curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, param_size + arg->space_before);
+                               if(alloca_copy) {
+                                       add_irn_dep(curr_sp, alloca_copy);
+                                       alloca_copy = NULL;
+                               }
+                               add_irn_dep(curr_sp, curr_mem);
+                       }
+                       else {
+                               curr_ofs += arg->space_before;
+                               curr_ofs =  round_up2(curr_ofs, arg->alignment);
+
+                               /* Make the expression to compute the argument's offset. */
+                               if(curr_ofs > 0) {
+                                       ir_mode *constmode = mach_mode;
+                                       if(mode_is_reference(mach_mode)) {
+                                               constmode = mode_Is;
+                                       }
+                                       addr = new_r_Const_long(irg, bl, constmode, curr_ofs);
+                                       addr = new_r_Add(irg, bl, curr_sp, addr, mach_mode);
+                               }
                        }
 
                        /* Insert a store for primitive arguments. */
-                       if(is_atomic_type(param_type)) {
-                               mem = new_r_Store(irg, bl, curr_mem, addr, param);
-                               mem = new_r_Proj(irg, bl, mem, mode_M, pn_Store_M);
+                       if (is_atomic_type(param_type)) {
+                               ir_node *store;
+                               store = new_r_Store(irg, bl, curr_mem, addr, param);
+                               mem = new_r_Proj(irg, bl, store, mode_M, pn_Store_M);
                        }
 
-                       /* Make a memcopy for compound arguments. */
+                       /* Make a mem copy for compound arguments. */
                        else {
+                               ir_node *copy;
+
                                assert(mode_is_reference(get_irn_mode(param)));
-                               mem = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
-                               mem = new_r_Proj(irg, bl, mem, mode_M, pn_CopyB_M_regular);
+                               copy = new_r_CopyB(irg, bl, curr_mem, addr, param, param_type);
+                               mem = new_r_Proj(irg, bl, copy, mode_M, pn_CopyB_M_regular);
                        }
 
-                       obstack_ptr_grow(obst, mem);
-
                        curr_ofs += param_size;
 
-                       /*
-                       * If we wanted to build the arguments sequentially,
-                       * the stack pointer for the next must be incremented,
-                       * and the memory value propagated.
-                       */
-                       if(do_seq) {
-                               curr_ofs = 0;
-                               curr_sp  = be_new_IncSP(sp, irg, bl, curr_sp, no_mem, param_size, be_stack_dir_along);
+                       if (do_seq)
                                curr_mem = mem;
-
-                               /*
-                                * only put the first IncSP to the stack fixup set since the other
-                                * ones are correctly connected to other nodes and do not need
-                                * to be fixed.
-                                */
-                               if(i == 0)
-                                       pset_insert_ptr(env->stack_ops, curr_sp);
-                       }
+                       else
+                               obstack_ptr_grow(obst, mem);
                }
 
                in = (ir_node **) obstack_finish(obst);
 
                /* We need the sync only, if we didn't build the stores sequentially. */
-               if(!do_seq)
-                       curr_mem = new_r_Sync(irg, bl, n_pos, in);
+               if(!do_seq) {
+                       if(n_pos >= 1) {
+                               curr_mem = new_r_Sync(irg, bl, n_pos + 1, in);
+                       } else {
+                               curr_mem = get_Call_mem(irn);
+                       }
+               }
                obstack_free(obst, in);
        }
 
@@ -282,17 +565,31 @@ static void adjust_call(be_abi_irg_t *env, ir_node *irn)
                const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
                for(j = 0; j < cls->n_regs; ++j) {
                        const arch_register_t *reg = arch_register_for_index(cls, j);
-                       if(arch_register_type_is(reg, caller_save))
+                       if(arch_register_type_is(reg, caller_save)) {
                                pset_insert_ptr(caller_save, (void *) reg);
+                       }
+                       if(arch_register_type_is(reg, state)) {
+                               pset_insert_ptr(caller_save, (void*) reg);
+                               pset_insert_ptr(states, (void*) reg);
+                       }
                }
        }
 
        /* search the greatest result proj number */
+
+       /* TODO: what if the result is NOT used? Currently there is
+        * no way to detect this later, especially there is no way to
+        * see this in the proj numbers.
+        * While this is ok for the register allocator, it is bad for
+        * backends which need to change the be_Call further (x87 simulator
+        * for instance. However for this particular case the call_type is
+        * sufficient.).
+        */
        foreach_out_edge(irn, edge) {
                const ir_edge_t *res_edge;
                ir_node *irn = get_edge_src_irn(edge);
 
-               if(is_Proj(irn) && get_irn_mode(irn) == mode_T) {
+               if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_T_result) {
                        res_proj = irn;
                        foreach_out_edge(irn, res_edge) {
                                int proj;
@@ -300,250 +597,1142 @@ static void adjust_call(be_abi_irg_t *env, ir_node *irn)
                                ir_node *res = get_edge_src_irn(res_edge);
 
                                assert(is_Proj(res));
+
                                proj = get_Proj_proj(res);
                                arg = get_call_arg(call, 1, proj);
+
+                               /*
+                                       shift the proj number to the right, since we will drop the
+                                       unspeakable Proj_T from the Call. Therefore, all real argument
+                                       Proj numbers must be increased by pn_be_Call_first_res
+                               */
+                               proj += pn_be_Call_first_res;
+                               set_Proj_proj(res, proj);
+                               obstack_ptr_grow(obst, res);
+
                                if(proj > curr_res_proj)
                                        curr_res_proj = proj;
-                               if(arg->in_reg)
+                               if(arg->in_reg) {
                                        pset_remove_ptr(caller_save, arg->reg);
+                                       //pmap_insert(arg_regs, arg->reg, INT_TO_PTR(proj + 1))
+                               }
                        }
                }
        }
+
        curr_res_proj++;
+       obstack_ptr_grow(obst, NULL);
+       res_projs = obstack_finish(obst);
+
+       /* make the back end call node and set its register requirements. */
+       for(i = 0; i < n_low_args; ++i) {
+               obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
+       }
+       foreach_pset(states, reg) {
+               const arch_register_class_t *cls = arch_register_get_class(reg);
+#if 0
+               ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
+               ir_fprintf(stderr, "Adding %+F\n", regnode);
+#endif
+               ir_node *regnode = new_rd_Unknown(irg, arch_register_class_mode(cls));
+               obstack_ptr_grow(obst, regnode);
+       }
+       count = n_low_args + pset_count(states);
+
+       in = obstack_finish(obst);
+
+       if(env->call->flags.bits.call_has_imm && get_irn_opcode(call_ptr) == iro_SymConst) {
+               low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem,
+                                      curr_sp, curr_sp,
+                                      curr_res_proj + pset_count(caller_save), count,
+                                      in, get_Call_type(irn));
+               be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
+       } else {
+               low_call = be_new_Call(get_irn_dbg_info(irn), irg, bl, curr_mem,
+                                      curr_sp, call_ptr,
+                                      curr_res_proj + pset_count(caller_save),
+                                      count, in, get_Call_type(irn));
+       }
+       ARR_APP1(ir_node*, env->calls, low_call);
+
+       /*
+               Set the register class of the call address to
+               the backend provided class (default: stack pointer class)
+       */
+       be_node_set_reg_class(low_call, be_pos_Call_ptr, call->cls_addr);
+
+       DBG((env->dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
+
+       /* Set the register classes and constraints of the Call parameters. */
+       for(i = 0; i < n_low_args; ++i) {
+               int index = low_args[i];
+               be_abi_call_arg_t *arg = get_call_arg(call, 0, index);
+               assert(arg->reg != NULL);
+
+               be_set_constr_single_reg(low_call, be_pos_Call_first_arg + index, arg->reg);
+       }
+
+       /* Set the register constraints of the results. */
+       for (i = 0; res_projs[i]; ++i) {
+               int pn = get_Proj_proj(res_projs[i]);
+
+               /* Correct Proj number since it has been adjusted! (see above) */
+               const be_abi_call_arg_t *arg = get_call_arg(call, 1, pn - pn_Call_max);
+
+               /* Matze: we need the information about the real mode for later
+                * transforms (signed/unsigend compares, stores...), so leave the fixup
+                * for the backend transform phase... */
+#if 0
+               /* correct mode */
+               const arch_register_class_t *cls = arch_register_get_class(arg->reg);
+               ir_mode *mode = arch_register_class_mode(cls);
+               set_irn_mode(irn, mode);
+#endif
+
+               assert(arg->in_reg);
+               be_set_constr_single_reg(low_call, BE_OUT_POS(pn), arg->reg);
+               arch_set_irn_register(arch_env, res_projs[i], arg->reg);
+       }
+       obstack_free(obst, in);
+       exchange(irn, low_call);
+
+       /* redirect the result projs to the lowered call instead of the Proj_T */
+       for (i = 0; res_projs[i]; ++i)
+               set_Proj_pred(res_projs[i], low_call);
+
+       /* set the now unnecessary projT to bad */
+       if(res_proj != NULL) {
+               be_kill_node(res_proj);
+       }
 
        /* Make additional projs for the caller save registers
           and the Keep node which keeps them alive. */
-       if(pset_count(caller_save) > 0) {
+       if (pset_count(caller_save) > 0) {
                const arch_register_t *reg;
-               ir_node **in;
+               ir_node               **in, *keep;
+               int                   i, n;
 
-               if(!res_proj)
-                       res_proj = new_r_Proj(irg, bl, irn, mode_T, pn_Call_T_result);
+               for (reg = pset_first(caller_save), n = 0; reg; reg = pset_next(caller_save), ++n) {
+                       ir_node *proj = new_r_Proj(irg, bl, low_call, reg->reg_class->mode, curr_res_proj);
 
-               for(reg = pset_first(caller_save); reg; reg = pset_next(caller_save))
-                       obstack_ptr_grow(obst, new_r_Proj(irg, bl, res_proj, reg->reg_class->mode, curr_res_proj++));
+                       /* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
+                       be_set_constr_single_reg(low_call, BE_OUT_POS(curr_res_proj), reg);
 
-               in = (ir_node **) obstack_finish(obst);
-               be_new_Keep(NULL, irg, bl, pset_count(caller_save), in);
+                       /* a call can produce ignore registers, in this case set the flag and register for the Proj */
+                       if (arch_register_type_is(reg, ignore)) {
+                               arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
+                               be_node_set_flags(low_call, BE_OUT_POS(curr_res_proj), arch_irn_flags_ignore);
+                       }
+
+                       set_irn_link(proj, (void *) reg);
+                       obstack_ptr_grow(obst, proj);
+                       curr_res_proj++;
+               }
+
+               /* create the Keep for the caller save registers */
+               in   = (ir_node **) obstack_finish(obst);
+               keep = be_new_Keep(NULL, irg, bl, n, in);
+               for (i = 0; i < n; ++i) {
+                       const arch_register_t *reg = get_irn_link(in[i]);
+                       be_node_set_reg_class(keep, i, reg->reg_class);
+               }
                obstack_free(obst, in);
        }
 
        /* Clean up the stack. */
        if(stack_size > 0) {
-               ir_node *last_inc_sp;
-
-               /* Get the result ProjT */
-               if(!res_proj)
-                       res_proj = new_r_Proj(irg, bl, irn, mode_T, pn_Call_T_result);
-
-               /* Make a Proj for the stack pointer. */
-               sp_proj     = new_r_Proj(irg, bl, res_proj, sp->reg_class->mode, curr_res_proj++);
-               last_inc_sp = be_new_IncSP(sp, irg, bl, sp_proj, no_mem, stack_size, be_stack_dir_against);
-               pset_insert_ptr(env->stack_ops, last_inc_sp);
-       }
+               ir_node *mem_proj = NULL;
 
-       /* at last make the backend call node and set its register requirements. */
-       for(i = 0; i < n_low_args; ++i)
-               obstack_ptr_grow(obst, get_Call_param(irn, low_args[i]));
+               foreach_out_edge(low_call, edge) {
+                       ir_node *irn = get_edge_src_irn(edge);
+                       if(is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
+                               mem_proj = irn;
+                               break;
+                       }
+               }
 
-       in = obstack_finish(obst);
-       low_call = be_new_Call(irg, bl, curr_mem, curr_sp, get_Call_ptr(irn), curr_res_proj, n_low_args, in);
-       obstack_free(obst, in);
+               if(!mem_proj) {
+                       mem_proj = new_r_Proj(irg, bl, low_call, mode_M, pn_Call_M);
+                       keep_alive(mem_proj);
+               }
 
-       exchange(irn, low_call);
+                /* Clean up the stack frame if we allocated it */
+               if(!no_alloc) {
+                       curr_sp = be_new_IncSP(sp, irg, bl, curr_sp, -stack_size);
+                       add_irn_dep(curr_sp, mem_proj);
+                       if(alloca_copy) {
+                               add_irn_dep(curr_sp, alloca_copy);
+                               alloca_copy = NULL;
+                       }
+               }
+       }
 
        be_abi_call_free(call);
        obstack_free(obst, pos);
        del_pset(results);
+       del_pset(states);
        del_pset(caller_save);
-}
 
-static void adjust_call_walker(ir_node *irn, void *data)
-{
-       if(get_irn_opcode(irn) == iro_Call)
-               adjust_call(data, irn);
+       return curr_sp;
 }
 
 /**
- * Walker to implement alloca-style allocations.
- * They are implemented using an add to the stack pointer
- * and a copy instruction.
+ * Adjust an alloca.
+ * The alloca is transformed into a back end alloca node and connected to the stack nodes.
  */
-static void implement_stack_alloc(be_abi_irg_t *env, ir_node *irn)
+static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp, ir_node **result_copy)
 {
-       const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
-       ir_node *bl           = get_nodes_block(irn);
-       ir_node *res          = env->init_sp;
+       ir_node *block;
+       ir_graph *irg;
+       ir_node *alloc_mem;
+       ir_node *alloc_res;
+       ir_type *type;
+
+       const ir_edge_t *edge;
+       ir_node *new_alloc;
        ir_node *size;
+       ir_node *addr;
+       ir_node *copy;
+       ir_node *ins[2];
 
-       assert(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc);
+       if (get_Alloc_where(alloc) != stack_alloc) {
+               assert(0);
+               return alloc;
+       }
 
-       size = get_Alloc_size(irn);
-       if(isa->stack_dir > 0)
-               res = be_new_Copy(isa->sp->reg_class, env->birg->irg, bl, res);
+       block = get_nodes_block(alloc);
+       irg = get_irn_irg(block);
+       alloc_mem = NULL;
+       alloc_res = NULL;
+       type = get_Alloc_type(alloc);
 
-       res = be_new_AddSP(isa->sp, env->birg->irg, bl, res, size);
-       pset_insert_ptr(env->stack_ops, res);
+       foreach_out_edge(alloc, edge) {
+               ir_node *irn = get_edge_src_irn(edge);
 
-       if(isa->stack_dir < 0)
-               res = be_new_Copy(isa->sp->reg_class, env->birg->irg, bl, res);
+               assert(is_Proj(irn));
+               switch(get_Proj_proj(irn)) {
+               case pn_Alloc_M:
+                       alloc_mem = irn;
+                       break;
+               case pn_Alloc_res:
+                       alloc_res = irn;
+                       break;
+               default:
+                       break;
+               }
+       }
 
-}
+       /* Beware: currently Alloc nodes without a result might happen,
+          only escape analysis kills them and this phase runs only for object
+          oriented source. We kill the Alloc here. */
+       if (alloc_res == NULL && alloc_mem) {
+               exchange(alloc_mem, get_Alloc_mem(alloc));
+               return curr_sp;
+       }
 
-static void collect_return_walker(ir_node *irn, void *data)
-{
-       if(get_irn_opcode(irn) == iro_Return) {
-               struct obstack *obst = data;
-               obstack_ptr_grow(obst, irn);
+       /* we might need to multiply the size with the element size */
+       if(type != get_unknown_type() && get_type_size_bytes(type) != 1) {
+               tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
+               ir_node *cnst = new_rd_Const(NULL, irg, block, mode_Iu, tv);
+               ir_node *mul = new_rd_Mul(NULL, irg, block, get_Alloc_size(alloc),
+                                         cnst, mode_Iu);
+               size = mul;
+       } else {
+               size = get_Alloc_size(alloc);
        }
-}
 
-static ir_node *setup_frame(be_abi_irg_t *env)
-{
-       const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
-       const arch_register_t *sp = isa->sp;
-       const arch_register_t *bp = isa->bp;
-       ir_graph *irg      = env->birg->irg;
-       ir_node *bl        = get_irg_start_block(irg);
-       ir_node *no_mem    = get_irg_no_mem(irg);
-       ir_node *old_frame = get_irg_frame(irg);
-       int store_old_fp   = 1;
-       int omit_fp        = env->omit_fp;
-       ir_node *stack     = pmap_get(env->regs, (void *) sp);
-       ir_node *frame     = pmap_get(env->regs, (void *) bp);
+       /* The stack pointer will be modified in an unknown manner.
+          We cannot omit it. */
+       env->call->flags.bits.try_omit_fp = 0;
+       new_alloc = be_new_AddSP(env->isa->sp, irg, block, curr_sp, size);
 
-       int stack_nr       = get_Proj_proj(stack);
+       if(alloc_mem != NULL) {
+               ir_node *addsp_mem;
+               ir_node *sync;
 
-       if(omit_fp) {
-               stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
-               frame = stack;
+               addsp_mem = new_r_Proj(irg, block, new_alloc, mode_M, pn_be_AddSP_M);
+
+               // We need to sync the output mem of the AddSP with the input mem
+               // edge into the alloc node
+               ins[0] = get_Alloc_mem(alloc);
+               ins[1] = addsp_mem;
+               sync = new_r_Sync(irg, block, 2, ins);
+
+               exchange(alloc_mem, sync);
        }
 
-       else {
-               if(store_old_fp) {
-                       ir_node *irn;
+       exchange(alloc, new_alloc);
 
-                       irn   = new_r_Store(irg, bl, get_irg_initial_mem(irg), stack, frame);
-                       irn   = new_r_Proj(irg, bl, irn, mode_M, pn_Store_M);
-                       stack = be_new_IncSP(sp, irg, bl, stack, irn, get_mode_size_bytes(bp->reg_class->mode), be_stack_dir_along);
-               }
+       /* fix projnum of alloca res */
+       set_Proj_proj(alloc_res, pn_be_AddSP_res);
 
-               frame = be_new_Copy(bp->reg_class, irg, bl, stack);
+       addr = env->isa->stack_dir < 0 ? alloc_res : curr_sp;
 
-               be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
-               if(env->dedicated_fp) {
-                       be_set_constr_single_reg(frame, -1, bp);
-                       be_node_set_flags(frame, -1, arch_irn_flags_ignore);
-               }
+       /* copy the address away, since it could be used after further stack pointer modifications. */
+       /* Let it point curr_sp just for the moment, I'll reroute it in a second. */
+       *result_copy = copy = be_new_Copy(env->isa->sp->reg_class, irg, block, curr_sp);
 
-               stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_along);
-       }
+       /* Let all users of the Alloc() result now point to the copy. */
+       edges_reroute(alloc_res, copy, irg);
 
-       be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
-       env->init_sp = stack;
-       set_irg_frame(irg, frame);
-       edges_reroute(old_frame, frame, irg);
+       /* Rewire the copy appropriately. */
+       set_irn_n(copy, be_pos_Copy_op, addr);
 
-       return frame;
-}
+       curr_sp = alloc_res;
+
+       return curr_sp;
+}  /* adjust_alloc */
 
-static void clearup_frame(be_abi_irg_t *env, ir_node *bl, struct obstack *obst)
+/**
+ * Adjust a Free.
+ * The Free is transformed into a back end free node and connected to the stack nodes.
+ */
+static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
 {
-       const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
-       const arch_register_t *sp = isa->sp;
-       const arch_register_t *bp = isa->bp;
-       ir_graph *irg      = env->birg->irg;
-       ir_node *no_mem    = get_irg_no_mem(irg);
-       ir_node *frame     = get_irg_frame(irg);
-       ir_node *stack     = env->init_sp;
-       int store_old_fp   = 1;
+       ir_node *block;
+       ir_graph *irg;
+       ir_node *subsp, *mem, *res, *size, *sync;
+       ir_type *type;
+       ir_node *in[2];
+       ir_mode *sp_mode;
+
+       if (get_Free_where(free) != stack_alloc) {
+               assert(0);
+               return free;
+       }
 
-       pmap_entry *ent;
+       block = get_nodes_block(free);
+       irg = get_irn_irg(block);
+       type = get_Free_type(free);
+       sp_mode = env->isa->sp->reg_class->mode;
+
+       /* we might need to multiply the size with the element size */
+       if(type != get_unknown_type() && get_type_size_bytes(type) != 1) {
+               tarval *tv = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
+               ir_node *cnst = new_rd_Const(NULL, irg, block, mode_Iu, tv);
+               ir_node *mul = new_rd_Mul(NULL, irg, block, get_Free_size(free),
+                                         cnst, mode_Iu);
+               size = mul;
+       } else {
+               size = get_Free_size(free);
+       }
 
+       /* The stack pointer will be modified in an unknown manner.
+          We cannot omit it. */
+       env->call->flags.bits.try_omit_fp = 0;
+       subsp = be_new_SubSP(env->isa->sp, irg, block, curr_sp, size);
 
-       if(env->omit_fp) {
-               stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE, be_stack_dir_against);
-       }
+       mem = new_r_Proj(irg, block, subsp, mode_M, pn_be_SubSP_M);
+       res = new_r_Proj(irg, block, subsp, sp_mode, pn_be_SubSP_res);
 
-       else {
-               stack = be_new_Copy(sp->reg_class, irg, bl, frame);
+       /* we need to sync the memory */
+       in[0] = get_Free_mem(free);
+       in[1] = mem;
+       sync = new_r_Sync(irg, block, 2, in);
 
-               if(store_old_fp) {
-                       ir_mode *mode = sp->reg_class->mode;
-                       ir_node *irn;
+       /* and make the AddSP dependent on the former memory */
+       add_irn_dep(subsp, get_Free_mem(free));
 
-                       stack = be_new_IncSP(sp, irg, bl, stack, no_mem, get_mode_size_bytes(mode), be_stack_dir_against);
-                       irn   = new_r_Load(irg, bl, no_mem, stack, mode);
-                       irn   = new_r_Proj(irg, bl, irn, mode, pn_Load_res);
-                       frame = be_new_Copy(bp->reg_class, irg, bl, irn);
-               }
+       /* kill the free */
+       exchange(free, sync);
+       curr_sp = res;
 
-               if(env->dedicated_fp) {
-                       be_set_constr_single_reg(frame, -1, bp);
-               }
+       return curr_sp;
+}  /* adjust_free */
 
-       }
+/* the following function is replaced by the usage of the heights module */
+#if 0
+/**
+ * Walker for dependent_on().
+ * This function searches a node tgt recursively from a given node
+ * but is restricted to the given block.
+ * @return 1 if tgt was reachable from curr, 0 if not.
+ */
+static int check_dependence(ir_node *curr, ir_node *tgt, ir_node *bl)
+{
+       int n, i;
 
-       pmap_foreach(env->regs, ent) {
-               const arch_register_t *reg = ent->key;
-               ir_node *irn               = ent->value;
+       if (get_nodes_block(curr) != bl)
+               return 0;
 
-               if(reg == sp)
-                       irn = stack;
-               else if(reg == bp)
-                       irn = frame;
+       if (curr == tgt)
+               return 1;
 
-               obstack_ptr_grow(obst, irn);
+       /* Phi functions stop the recursion inside a basic block */
+       if (! is_Phi(curr)) {
+               for(i = 0, n = get_irn_arity(curr); i < n; ++i) {
+                       if (check_dependence(get_irn_n(curr, i), tgt, bl))
+                               return 1;
+               }
        }
+
+       return 0;
 }
+#endif /* if 0 */
 
 /**
- * Modify the irg itself and the frame type.
+ * Check if a node is somehow data dependent on another one.
+ * both nodes must be in the same basic block.
+ * @param n1 The first node.
+ * @param n2 The second node.
+ * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
  */
-static void modify_irg(be_abi_irg_t *env)
+static int dependent_on(ir_node *n1, ir_node *n2)
 {
-       firm_dbg_module_t *dbg    = env->dbg;
-       be_abi_call_t *call       = be_abi_call_new();
-       const arch_isa_t *isa     = env->birg->main_env->arch_env->isa;
-       const arch_register_t *sp = arch_isa_sp(isa);
-       ir_graph *irg             = env->birg->irg;
-       ir_node *bl               = get_irg_start_block(irg);
-       ir_node *end              = get_irg_end_block(irg);
-       ir_node *arg_tuple        = get_irg_args(irg);
-       ir_node *no_mem           = get_irg_no_mem(irg);
-       type *method_type         = get_entity_type(get_irg_entity(irg));
-       int n_params              = get_method_n_params(method_type);
+       assert(get_nodes_block(n1) == get_nodes_block(n2));
 
-       int max_arg               = 0;
-       int reg_params_nr         = 0;
-       int arg_offset            = 0;
+       return heights_reachable_in_block(ir_heights, n1, n2);
+}
 
-       int i, j, n;
+static int cmp_call_dependecy(const void *c1, const void *c2)
+{
+       ir_node *n1 = *(ir_node **) c1;
+       ir_node *n2 = *(ir_node **) c2;
 
-       ir_node *frame_pointer;
-       ir_node *reg_params, *reg_params_bl;
-       ir_node **args, **args_repl;
-       const ir_edge_t *edge;
+       /*
+               Classical qsort() comparison function behavior:
+               0  if both elements are equal
+               1  if second is "smaller" that first
+               -1 if first is "smaller" that second
+       */
+       if (dependent_on(n1, n2))
+               return -1;
+
+       if (dependent_on(n2, n1))
+               return 1;
+
+       return 0;
+}
+
+/**
+ * Walker: links all Call/alloc/Free nodes to the Block they are contained.
+ */
+static void link_calls_in_block_walker(ir_node *irn, void *data)
+{
+       ir_opcode code = get_irn_opcode(irn);
+
+       if (code == iro_Call ||
+               (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
+               (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
+               be_abi_irg_t *env = data;
+               ir_node *bl       = get_nodes_block(irn);
+               void *save        = get_irn_link(bl);
+
+               if (code == iro_Call)
+                       env->call->flags.bits.irg_is_leaf = 0;
+
+               set_irn_link(irn, save);
+               set_irn_link(bl, irn);
+       }
+}
+
+/**
+ * Block-walker:
+ * Process all Call nodes inside a basic block.
+ * Note that the link field of the block must contain a linked list of all
+ * Call nodes inside the Block. We first order this list according to data dependency
+ * and that connect the calls together.
+ */
+static void process_calls_in_block(ir_node *bl, void *data)
+{
+       be_abi_irg_t *env = data;
+       ir_node *curr_sp  = env->init_sp;
+       ir_node *irn;
+       int n;
+
+       for(irn = get_irn_link(bl), n = 0; irn; irn = get_irn_link(irn), ++n)
+               obstack_ptr_grow(&env->obst, irn);
+
+       /* If there were call nodes in the block. */
+       if(n > 0) {
+               ir_node *keep;
+               ir_node **nodes;
+               ir_node *copy = NULL;
+               int i;
+
+               nodes = obstack_finish(&env->obst);
+
+               /* order the call nodes according to data dependency */
+               qsort(nodes, n, sizeof(nodes[0]), cmp_call_dependecy);
+
+               for(i = n - 1; i >= 0; --i) {
+                       ir_node *irn = nodes[i];
+
+                       DBG((env->dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
+                       switch(get_irn_opcode(irn)) {
+                       case iro_Call:
+                               curr_sp = adjust_call(env, irn, curr_sp, copy);
+                               break;
+                       case iro_Alloc:
+                               curr_sp = adjust_alloc(env, irn, curr_sp, &copy);
+                               break;
+                       case iro_Free:
+                               curr_sp = adjust_free(env, irn, curr_sp);
+                               break;
+                       default:
+                               break;
+                       }
+               }
+
+               obstack_free(&env->obst, nodes);
+
+               /* Keep the last stack state in the block by tying it to Keep node */
+               nodes[0] = curr_sp;
+               keep     = be_new_Keep(env->isa->sp->reg_class, get_irn_irg(bl), bl, 1, nodes);
+               pmap_insert(env->keep_map, bl, keep);
+       }
+
+       set_irn_link(bl, curr_sp);
+}  /* process_calls_in_block */
+
+/**
+ * Adjust all call nodes in the graph to the ABI conventions.
+ */
+static void process_calls(be_abi_irg_t *env)
+{
+       ir_graph *irg = env->birg->irg;
+
+       env->call->flags.bits.irg_is_leaf = 1;
+       irg_walk_graph(irg, firm_clear_link, link_calls_in_block_walker, env);
+
+       ir_heights = heights_new(env->birg->irg);
+       irg_block_walk_graph(irg, NULL, process_calls_in_block, env);
+       heights_free(ir_heights);
+}
+
+#if 0 /*
+static ir_node *setup_frame(be_abi_irg_t *env)
+{
+       const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
+       const arch_register_t *sp = isa->sp;
+       const arch_register_t *bp = isa->bp;
+       be_abi_call_flags_bits_t flags = env->call->flags.bits;
+       ir_graph *irg      = env->birg->irg;
+       ir_node *bl        = get_irg_start_block(irg);
+       ir_node *no_mem    = get_irg_no_mem(irg);
+       ir_node *old_frame = get_irg_frame(irg);
+       ir_node *stack     = pmap_get(env->regs, (void *) sp);
+       ir_node *frame     = pmap_get(env->regs, (void *) bp);
+
+       int stack_nr       = get_Proj_proj(stack);
+
+       if(flags.try_omit_fp) {
+               stack = be_new_IncSP(sp, irg, bl, stack, no_mem, BE_STACK_FRAME_SIZE_EXPAND);
+               frame = stack;
+       }
+
+       else {
+               frame = be_new_Copy(bp->reg_class, irg, bl, stack);
+
+               be_node_set_flags(frame, -1, arch_irn_flags_dont_spill);
+               if(!flags.fp_free) {
+                       be_set_constr_single_reg(frame, -1, bp);
+                       be_node_set_flags(frame, -1, arch_irn_flags_ignore);
+                       arch_set_irn_register(env->birg->main_env->arch_env, frame, bp);
+               }
+
+               stack = be_new_IncSP(sp, irg, bl, stack, frame, BE_STACK_FRAME_SIZE_EXPAND);
+       }
+
+       be_node_set_flags(env->reg_params, -(stack_nr + 1), arch_irn_flags_ignore);
+       env->init_sp = stack;
+       set_irg_frame(irg, frame);
+       edges_reroute(old_frame, frame, irg);
+
+       return frame;
+}
+
+static void clearup_frame(be_abi_irg_t *env, ir_node *ret, pmap *reg_map, struct obstack *obst)
+{
+       const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
+       const arch_register_t *sp = isa->sp;
+       const arch_register_t *bp = isa->bp;
+       ir_graph *irg      = env->birg->irg;
+       ir_node *ret_mem   = get_Return_mem(ret);
+       ir_node *frame     = get_irg_frame(irg);
+       ir_node *bl        = get_nodes_block(ret);
+       ir_node *stack     = get_irn_link(bl);
 
        pmap_entry *ent;
 
-       env->regs = pmap_create();
+       if(env->call->flags.bits.try_omit_fp) {
+               stack = be_new_IncSP(sp, irg, bl, stack, ret_mem, -BE_STACK_FRAME_SIZE_SHRINK);
+       }
 
-       DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
+       else {
+               stack = be_new_SetSP(sp, irg, bl, stack, frame, ret_mem);
+               be_set_constr_single_reg(stack, -1, sp);
+               be_node_set_flags(stack, -1, arch_irn_flags_ignore);
+       }
 
-       /* Find the maximum proj number of the argument tuple proj */
-       foreach_out_edge(arg_tuple, edge)  {
-               ir_node *irn = get_edge_src_irn(edge);
-               int nr       = get_Proj_proj(irn);
-               max_arg      = MAX(max_arg, nr);
+       pmap_foreach(env->regs, ent) {
+               const arch_register_t *reg = ent->key;
+               ir_node *irn               = ent->value;
+
+               if(reg == sp)
+                       obstack_ptr_grow(&env->obst, stack);
+               else if(reg == bp)
+                       obstack_ptr_grow(&env->obst, frame);
+               else if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
+                       obstack_ptr_grow(obst, irn);
+       }
+}
+*/
+#endif
+
+/**
+ * Computes the stack argument layout type.
+ * Changes a possibly allocated value param type by moving
+ * entities to the stack layout type.
+ *
+ * @param env          the ABI environment
+ * @param call         the current call ABI
+ * @param method_type  the method type
+ * @param param_map    an array mapping method arguments to the stack layout type
+ *
+ * @return the stack argument layout type
+ */
+static ir_type *compute_arg_type(be_abi_irg_t *env, be_abi_call_t *call, ir_type *method_type, ir_entity ***param_map)
+{
+       int dir  = env->call->flags.bits.left_to_right ? 1 : -1;
+       int inc  = env->birg->main_env->arch_env->isa->stack_dir * dir;
+       int n    = get_method_n_params(method_type);
+       int curr = inc > 0 ? 0 : n - 1;
+       int ofs  = 0;
+
+       char buf[128];
+       ir_type *res;
+       int i;
+       ir_type *val_param_tp = get_method_value_param_type(method_type);
+       ident *id = get_entity_ident(get_irg_entity(env->birg->irg));
+       ir_entity **map;
+
+       *param_map = map = obstack_alloc(&env->obst, n * sizeof(ir_entity *));
+       res = new_type_struct(mangle_u(id, new_id_from_chars("arg_type", 8)));
+       for (i = 0; i < n; ++i, curr += inc) {
+               ir_type *param_type    = get_method_param_type(method_type, curr);
+               be_abi_call_arg_t *arg = get_call_arg(call, 0, curr);
+
+               map[i] = NULL;
+               if (arg->on_stack) {
+                       if (val_param_tp) {
+                               /* the entity was already created, move it to the param type */
+                               arg->stack_ent = get_method_value_param_ent(method_type, i);
+                               remove_struct_member(val_param_tp, arg->stack_ent);
+                               set_entity_owner(arg->stack_ent, res);
+                               add_struct_member(res, arg->stack_ent);
+                               /* must be automatic to set a fixed layout */
+                               set_entity_allocation(arg->stack_ent, allocation_automatic);
+                       }
+                       else {
+                               snprintf(buf, sizeof(buf), "param_%d", i);
+                               arg->stack_ent = new_entity(res, new_id_from_str(buf), param_type);
+                       }
+                       ofs += arg->space_before;
+                       ofs = round_up2(ofs, arg->alignment);
+                       set_entity_offset(arg->stack_ent, ofs);
+                       ofs += arg->space_after;
+                       ofs += get_type_size_bytes(param_type);
+                       map[i] = arg->stack_ent;
+               }
+       }
+       set_type_size_bytes(res, ofs);
+       set_type_state(res, layout_fixed);
+       return res;
+}
+
+#if 0
+static void create_register_perms(const arch_isa_t *isa, ir_graph *irg, ir_node *bl, pmap *regs)
+{
+       int i, j, n;
+       struct obstack obst;
+
+       obstack_init(&obst);
+
+       /* Create a Perm after the RegParams node to delimit it. */
+       for(i = 0, n = arch_isa_get_n_reg_class(isa); i < n; ++i) {
+               const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
+               ir_node *perm;
+               ir_node **in;
+               int n_regs;
+
+               for(n_regs = 0, j = 0; j < cls->n_regs; ++j) {
+                       const arch_register_t *reg = &cls->regs[j];
+                       ir_node *irn = pmap_get(regs, (void *) reg);
+
+                       if(irn && !arch_register_type_is(reg, ignore)) {
+                               n_regs++;
+                               obstack_ptr_grow(&obst, irn);
+                               set_irn_link(irn, (void *) reg);
+                       }
+               }
+
+               obstack_ptr_grow(&obst, NULL);
+               in = obstack_finish(&obst);
+               if(n_regs > 0) {
+                       perm = be_new_Perm(cls, irg, bl, n_regs, in);
+                       for(j = 0; j < n_regs; ++j) {
+                               ir_node *arg = in[j];
+                               arch_register_t *reg = get_irn_link(arg);
+                               pmap_insert(regs, reg, arg);
+                               be_set_constr_single_reg(perm, BE_OUT_POS(j), reg);
+                       }
+               }
+               obstack_free(&obst, in);
+       }
+
+       obstack_free(&obst, NULL);
+}
+#endif
+
+typedef struct {
+       const arch_register_t *reg;
+       ir_node *irn;
+} reg_node_map_t;
+
+static int cmp_regs(const void *a, const void *b)
+{
+       const reg_node_map_t *p = a;
+       const reg_node_map_t *q = b;
+
+       if(p->reg->reg_class == q->reg->reg_class)
+               return p->reg->index - q->reg->index;
+       else
+               return p->reg->reg_class - q->reg->reg_class;
+}
+
+static reg_node_map_t *reg_map_to_arr(struct obstack *obst, pmap *reg_map)
+{
+       pmap_entry *ent;
+       int n = pmap_count(reg_map);
+       int i = 0;
+       reg_node_map_t *res = obstack_alloc(obst, n * sizeof(res[0]));
+
+       pmap_foreach(reg_map, ent) {
+               res[i].reg = ent->key;
+               res[i].irn = ent->value;
+               i++;
+       }
+
+       qsort(res, n, sizeof(res[0]), cmp_regs);
+       return res;
+}
+
+/**
+ * Creates a barrier.
+ */
+static ir_node *create_barrier(be_abi_irg_t *env, ir_node *bl, ir_node **mem, pmap *regs, int in_req)
+{
+       ir_graph *irg = env->birg->irg;
+       int n_regs    = pmap_count(regs);
+       int n;
+       ir_node *irn;
+       ir_node **in;
+       reg_node_map_t *rm;
+
+       rm = reg_map_to_arr(&env->obst, regs);
+
+       for(n = 0; n < n_regs; ++n)
+               obstack_ptr_grow(&env->obst, rm[n].irn);
+
+       if(mem) {
+               obstack_ptr_grow(&env->obst, *mem);
+               n++;
+       }
+
+       in = (ir_node **) obstack_finish(&env->obst);
+       irn = be_new_Barrier(irg, bl, n, in);
+       obstack_free(&env->obst, in);
+
+       for(n = 0; n < n_regs; ++n) {
+               const arch_register_t *reg = rm[n].reg;
+               int flags                  = 0;
+               int pos                    = BE_OUT_POS(n);
+               ir_node *proj;
+
+               proj = new_r_Proj(irg, bl, irn, get_irn_mode(rm[n].irn), n);
+               be_node_set_reg_class(irn, n, reg->reg_class);
+               if(in_req)
+                       be_set_constr_single_reg(irn, n, reg);
+               be_set_constr_single_reg(irn, pos, reg);
+               be_node_set_reg_class(irn, pos, reg->reg_class);
+               arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
+
+               /* if the proj projects a ignore register or a node which is set to ignore, propagate this property. */
+               if(arch_register_type_is(reg, ignore) || arch_irn_is(env->birg->main_env->arch_env, in[n], ignore))
+                       flags |= arch_irn_flags_ignore;
+
+               if(arch_irn_is(env->birg->main_env->arch_env, in[n], modify_sp))
+                       flags |= arch_irn_flags_modify_sp;
+
+               be_node_set_flags(irn, pos, flags);
+
+               pmap_insert(regs, (void *) reg, proj);
+       }
+
+       if(mem) {
+               *mem = new_r_Proj(irg, bl, irn, mode_M, n);
+       }
+
+       obstack_free(&env->obst, rm);
+       return irn;
+}
+
+/**
+ * Creates a be_Return for a Return node.
+ *
+ * @param @env    the abi environment
+ * @param irn     the Return node or NULL if there was none
+ * @param bl      the block where the be_Retun should be placed
+ * @param mem     the current memory
+ * @param n_res   number of return results
+ */
+static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl, ir_node *mem, int n_res) {
+       be_abi_call_t *call = env->call;
+       const arch_isa_t *isa = env->birg->main_env->arch_env->isa;
+
+       pmap *reg_map  = pmap_create();
+       ir_node *keep  = pmap_get(env->keep_map, bl);
+       int in_max;
+       ir_node *ret;
+       int i, n;
+       ir_node **in;
+       ir_node *stack;
+       const arch_register_t **regs;
+       pmap_entry *ent ;
+
+       /*
+               get the valid stack node in this block.
+               If we had a call in that block there is a Keep constructed by process_calls()
+               which points to the last stack modification in that block. we'll use
+               it then. Else we use the stack from the start block and let
+               the ssa construction fix the usage.
+       */
+       stack = be_abi_reg_map_get(env->regs, isa->sp);
+       if (keep) {
+               ir_node *bad = new_r_Bad(env->birg->irg);
+               stack = get_irn_n(keep, 0);
+               set_nodes_block(keep, bad);
+               set_irn_n(keep, 0, bad);
+               // exchange(keep, new_r_Bad(env->birg->irg));
+       }
+
+       /* Insert results for Return into the register map. */
+       for(i = 0; i < n_res; ++i) {
+               ir_node *res           = get_Return_res(irn, i);
+               be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
+               assert(arg->in_reg && "return value must be passed in register");
+               pmap_insert(reg_map, (void *) arg->reg, res);
+       }
+
+       /* Add uses of the callee save registers. */
+       pmap_foreach(env->regs, ent) {
+               const arch_register_t *reg = ent->key;
+               if(arch_register_type_is(reg, callee_save) || arch_register_type_is(reg, ignore))
+                       pmap_insert(reg_map, ent->key, ent->value);
+       }
+
+       be_abi_reg_map_set(reg_map, isa->sp, stack);
+
+       /* Make the Epilogue node and call the arch's epilogue maker. */
+       create_barrier(env, bl, &mem, reg_map, 1);
+       call->cb->epilogue(env->cb, bl, &mem, reg_map);
+
+       /*
+               Maximum size of the in array for Return nodes is
+               return args + callee save/ignore registers + memory + stack pointer
+       */
+       in_max = pmap_count(reg_map) + n_res + 2;
+
+       in   = obstack_alloc(&env->obst, in_max * sizeof(in[0]));
+       regs = obstack_alloc(&env->obst, in_max * sizeof(regs[0]));
+
+       in[0]   = mem;
+       in[1]   = be_abi_reg_map_get(reg_map, isa->sp);
+       regs[0] = NULL;
+       regs[1] = isa->sp;
+       n       = 2;
+
+       /* clear SP entry, since it has already been grown. */
+       pmap_insert(reg_map, (void *) isa->sp, NULL);
+       for(i = 0; i < n_res; ++i) {
+               be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
+
+               in[n]     = be_abi_reg_map_get(reg_map, arg->reg);
+               regs[n++] = arg->reg;
+
+               /* Clear the map entry to mark the register as processed. */
+               be_abi_reg_map_set(reg_map, arg->reg, NULL);
+       }
+
+       /* grow the rest of the stuff. */
+       pmap_foreach(reg_map, ent) {
+               if(ent->value) {
+                       in[n]     = ent->value;
+                       regs[n++] = ent->key;
+               }
        }
-       max_arg += 1;
-       args      = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
-       args_repl = obstack_alloc(&env->obst, max_arg * sizeof(args[0]));
-       memset(args, 0, max_arg * sizeof(args[0]));
-       memset(args_repl, 0, max_arg * sizeof(args[0]));
+
+       /* The in array for the new back end return is now ready. */
+       ret = be_new_Return(irn ? get_irn_dbg_info(irn) : NULL, env->birg->irg, bl, n_res, n, in);
+
+       /* Set the register classes of the return's parameter accordingly. */
+       for(i = 0; i < n; ++i)
+               if(regs[i])
+                       be_node_set_reg_class(ret, i, regs[i]->reg_class);
+
+       /* Free the space of the Epilog's in array and the register <-> proj map. */
+       obstack_free(&env->obst, in);
+       pmap_destroy(reg_map);
+
+       return ret;
+}
+
+typedef struct lower_frame_sels_env_t {
+       be_abi_irg_t *env;
+       ir_entity    *value_param_list;  /**< the list of all value param entities */
+} lower_frame_sels_env_t;
+
+/**
+ * Walker: Replaces Sels of frame type and
+ * value param type entities by FrameAddress.
+ */
+static void lower_frame_sels_walker(ir_node *irn, void *data)
+{
+       lower_frame_sels_env_t *ctx = data;
+
+       if (is_Sel(irn)) {
+               ir_graph *irg        = current_ir_graph;
+               ir_node  *frame      = get_irg_frame(irg);
+               ir_node  *param_base = get_irg_value_param_base(irg);
+               ir_node  *ptr        = get_Sel_ptr(irn);
+
+               if (ptr == frame || ptr == param_base) {
+                       be_abi_irg_t *env = ctx->env;
+                       ir_entity    *ent = get_Sel_entity(irn);
+                       ir_node      *bl  = get_nodes_block(irn);
+                       ir_node      *nw;
+
+                       nw = be_new_FrameAddr(env->isa->sp->reg_class, irg, bl, frame, ent);
+                       exchange(irn, nw);
+
+                       /* check, if it's a param sel and if have not seen this entity immediatly before */
+                       if (ptr == param_base && ctx->value_param_list != ent) {
+                               set_entity_link(ent, ctx->value_param_list);
+                               ctx->value_param_list = ent;
+                       }
+               }
+       }
+}
+
+/**
+ * Check if a value parameter is transmitted as a register.
+ * This might happen if the address of an parameter is taken which is
+ * transmitted in registers.
+ *
+ * Note that on some architectures this case must be handled specially
+ * because the place of the backing store is determined by their ABI.
+ *
+ * In the default case we move the entity to the frame type and create
+ * a backing store into the first block.
+ */
+static void fix_address_of_parameter_access(be_abi_irg_t *env, ir_entity *value_param_list) {
+       be_abi_call_t *call = env->call;
+       ir_graph *irg       = env->birg->irg;
+       ir_entity *ent, *next_ent, *new_list;
+       ir_type *frame_tp;
+       DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
+
+       new_list = NULL;
+       for (ent = value_param_list; ent; ent = next_ent) {
+               int i = get_struct_member_index(get_entity_owner(ent), ent);
+               be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
+
+               next_ent = get_entity_link(ent);
+               if (arg->in_reg) {
+                       DBG((dbg, LEVEL_2, "\targ #%d need backing store\n", i));
+                       set_entity_link(ent, new_list);
+                       new_list = ent;
+               }
+       }
+       if (new_list) {
+               /* ok, change the graph */
+               ir_node *start_bl = get_irg_start_block(irg);
+               ir_node *first_bl = NULL;
+               ir_node *frame, *imem, *nmem, *store, *mem, *args, *args_bl;
+               const ir_edge_t *edge;
+               optimization_state_t state;
+               int offset;
+
+               foreach_block_succ(start_bl, edge) {
+                       ir_node *succ = get_edge_src_irn(edge);
+                       if (start_bl != succ) {
+                               first_bl = succ;
+                               break;
+                       }
+               }
+               assert(first_bl);
+               /* we had already removed critical edges, so the following
+                  assertion should be always true. */
+               assert(get_Block_n_cfgpreds(first_bl) == 1);
+
+               /* now create backing stores */
+               frame = get_irg_frame(irg);
+               imem = get_irg_initial_mem(irg);
+
+               save_optimization_state(&state);
+               set_optimize(0);
+               nmem = new_r_Proj(irg, first_bl, get_irg_start(irg), mode_M, pn_Start_M);
+               restore_optimization_state(&state);
+
+               /* reroute all edges to the new memory source */
+               edges_reroute(imem, nmem, irg);
+
+               store   = NULL;
+               mem     = imem;
+               args    = get_irg_args(irg);
+               args_bl = get_nodes_block(args);
+               for (ent = new_list; ent; ent = get_entity_link(ent)) {
+                       int     i     = get_struct_member_index(get_entity_owner(ent), ent);
+                       ir_type *tp   = get_entity_type(ent);
+                       ir_mode *mode = get_type_mode(tp);
+                       ir_node *addr;
+
+                       /* address for the backing store */
+                       addr = be_new_FrameAddr(env->isa->sp->reg_class, irg, first_bl, frame, ent);
+
+                       if (store)
+                               mem = new_r_Proj(irg, first_bl, store, mode_M, pn_Store_M);
+
+                       /* the backing store itself */
+                       store = new_r_Store(irg, first_bl, mem, addr,
+                                           new_r_Proj(irg, args_bl, args, mode, i));
+               }
+               /* the new memory Proj gets the last Proj from store */
+               set_Proj_pred(nmem, store);
+               set_Proj_proj(nmem, pn_Store_M);
+
+               /* move all entities to the frame type */
+               frame_tp = get_irg_frame_type(irg);
+               offset   = get_type_size_bytes(frame_tp);
+               for (ent = new_list; ent; ent = get_entity_link(ent)) {
+                       ir_type *tp = get_entity_type(ent);
+                       int align = get_type_alignment_bytes(tp);
+
+                       offset += align - 1;
+                       offset &= -align;
+                       set_entity_owner(ent, frame_tp);
+                       add_class_member(frame_tp, ent);
+                       /* must be automatic to set a fixed layout */
+                       set_entity_allocation(ent, allocation_automatic);
+                       set_entity_offset(ent, offset);
+                       offset += get_type_size_bytes(tp);
+               }
+               set_type_size_bytes(frame_tp, offset);
+       }
+}
+
+/**
+ * The start block has no jump, instead it has an initial exec Proj.
+ * The backend wants to handle all blocks the same way, so we replace
+ * the out cfg edge with a real jump.
+ */
+static void fix_start_block(ir_node *block, void *env) {
+       int      *done = env;
+       int      i;
+       ir_node  *start_block;
+       ir_graph *irg;
+
+       /* we processed the start block, return */
+       if (*done)
+               return;
+
+       irg         = get_irn_irg(block);
+       start_block = get_irg_start_block(irg);
+
+       for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
+               ir_node *pred       = get_Block_cfgpred(block, i);
+               ir_node *pred_block = get_nodes_block(pred);
+
+               /* ok, we are in the block, having start as cfg predecessor */
+               if (pred_block == start_block) {
+                       ir_node *jump = new_r_Jmp(irg, pred_block);
+                       set_Block_cfgpred(block, i, jump);
+                       *done = 1;
+               }
+       }
+}
+
+/**
+ * Modify the irg itself and the frame type.
+ */
+static void modify_irg(be_abi_irg_t *env)
+{
+       be_abi_call_t *call       = env->call;
+       const arch_isa_t *isa     = env->birg->main_env->arch_env->isa;
+       const arch_register_t *sp = arch_isa_sp(isa);
+       ir_graph *irg             = env->birg->irg;
+       ir_node *bl               = get_irg_start_block(irg);
+       ir_node *end              = get_irg_end_block(irg);
+       ir_node *old_mem          = get_irg_initial_mem(irg);
+       ir_node *new_mem_proj;
+       ir_node *mem;
+       ir_type *method_type      = get_entity_type(get_irg_entity(irg));
+       pset *dont_save           = pset_new_ptr(8);
+
+       int n_params;
+       int i, j, n, temp;
+
+       reg_node_map_t *rm;
+       const arch_register_t *fp_reg;
+       ir_node *frame_pointer;
+       ir_node *barrier;
+       ir_node *reg_params_bl;
+       ir_node **args;
+       ir_node *arg_tuple;
+       ir_node *value_param_base;
+       const ir_edge_t *edge;
+       ir_type *arg_type, *bet_type;
+       lower_frame_sels_env_t ctx;
+       ir_entity **param_map;
+
+       bitset_t *used_proj_nr;
+       DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
+
+       DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
+
+       /* Convert the Sel nodes in the irg to frame load/store/addr nodes. */
+       ctx.env              = env;
+       ctx.value_param_list = NULL;
+       irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
+
+       /* value_param_base anchor is not needed anymore now */
+       value_param_base = get_irg_value_param_base(irg);
+       be_kill_node(value_param_base);
+       set_irg_value_param_base(irg, new_r_Bad(irg));
+
+       env->frame = obstack_alloc(&env->obst, sizeof(env->frame[0]));
+       env->regs  = pmap_create();
+
+       used_proj_nr = bitset_alloca(1024);
+       n_params     = get_method_n_params(method_type);
+       args         = obstack_alloc(&env->obst, n_params * sizeof(args[0]));
+       memset(args, 0, n_params * sizeof(args[0]));
+
+       /* Check if a value parameter is transmitted as a register.
+        * This might happen if the address of an parameter is taken which is
+        * transmitted in registers.
+        *
+        * Note that on some architectures this case must be handled specially
+        * because the place of the backing store is determined by their ABI.
+        *
+        * In the default case we move the entity to the frame type and create
+        * a backing store into the first block.
+        */
+       fix_address_of_parameter_access(env, ctx.value_param_list);
 
        /* Fill the argument vector */
+       arg_tuple = get_irg_args(irg);
        foreach_out_edge(arg_tuple, edge) {
                ir_node *irn = get_edge_src_irn(edge);
                int nr       = get_Proj_proj(irn);
@@ -551,15 +1740,20 @@ static void modify_irg(be_abi_irg_t *env)
                DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
        }
 
-       /* Get the ABI constraints from the ISA */
-       arch_isa_get_call_abi(isa, method_type, call);
+       arg_type = compute_arg_type(env, call, method_type, &param_map);
+       bet_type = call->cb->get_between_type(env->cb);
+       stack_frame_init(env->frame, arg_type, bet_type, get_irg_frame_type(irg), isa->stack_dir, param_map);
 
        /* Count the register params and add them to the number of Projs for the RegParams node */
        for(i = 0; i < n_params; ++i) {
                be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
-               if(arg->in_reg) {
+               if(arg->in_reg && args[i]) {
                        assert(arg->reg != sp && "cannot use stack pointer as parameter register");
-                       pmap_insert(env->regs, (void *) arg->reg, NULL);
+                       assert(i == get_Proj_proj(args[i]));
+
+                       /* For now, associate the register with the old Proj from Start representing that argument. */
+                       pmap_insert(env->regs, (void *) arg->reg, args[i]);
+                       bitset_set(used_proj_nr, i);
                        DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
                }
        }
@@ -569,324 +1763,488 @@ static void modify_irg(be_abi_irg_t *env)
                const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
                for(j = 0; j < cls->n_regs; ++j) {
                        const arch_register_t *reg = &cls->regs[j];
-                       if(arch_register_type_is(reg, callee_save))
+                       if(arch_register_type_is(reg, callee_save) ||
+                                       arch_register_type_is(reg, state)) {
                                pmap_insert(env->regs, (void *) reg, NULL);
+                       }
                }
        }
 
        pmap_insert(env->regs, (void *) sp, NULL);
        pmap_insert(env->regs, (void *) isa->bp, NULL);
-       reg_params_bl = get_irg_start_block(irg);
-       env->reg_params = reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
-       reg_params_nr = 0;
+       reg_params_bl   = get_irg_start_block(irg);
+       env->reg_params = be_new_RegParams(irg, reg_params_bl, pmap_count(env->regs));
+       add_irn_dep(env->reg_params, get_irg_start(irg));
 
        /*
         * make proj nodes for the callee save registers.
         * memorize them, since Return nodes get those as inputs.
+        *
+        * Note, that if a register corresponds to an argument, the regs map contains
+        * the old Proj from start for that argument.
         */
-       for(ent = pmap_first(env->regs); ent; ent = pmap_next(env->regs)) {
-               arch_register_t *reg = ent->key;
-               int pos = -(reg_params_nr + 1);
-               ent->value = new_r_Proj(irg, reg_params_bl, reg_params, reg->reg_class->mode, reg_params_nr);
-               be_set_constr_single_reg(reg_params, pos, reg);
+
+       rm = reg_map_to_arr(&env->obst, env->regs);
+       for(i = 0, n = pmap_count(env->regs); i < n; ++i) {
+               arch_register_t *reg = (void *) rm[i].reg;
+               ir_mode *mode        = reg->reg_class->mode;
+               long nr              = i;
+               int pos              = BE_OUT_POS((int) nr);
+               int flags            = 0;
+
+               ir_node *proj;
+
+               assert(nr >= 0);
+               bitset_set(used_proj_nr, nr);
+               proj = new_r_Proj(irg, reg_params_bl, env->reg_params, mode, nr);
+               pmap_insert(env->regs, (void *) reg, proj);
+               be_set_constr_single_reg(env->reg_params, pos, reg);
+               arch_set_irn_register(env->birg->main_env->arch_env, proj, reg);
 
                /*
                 * If the register is an ignore register,
                 * The Proj for that register shall also be ignored during register allocation.
                 */
                if(arch_register_type_is(reg, ignore))
-                       be_node_set_flags(reg_params, pos, arch_irn_flags_ignore);
+                       flags |= arch_irn_flags_ignore;
+
+               if(reg == sp)
+                       flags |= arch_irn_flags_modify_sp;
 
-               reg_params_nr++;
+               be_node_set_flags(env->reg_params, pos, flags);
 
-               DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", reg_params_nr - 1, reg->name));
+               DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
        }
+       obstack_free(&env->obst, rm);
 
-       /* Insert the code to set up the stack frame */
-       frame_pointer = setup_frame(env);
+       /* create a new initial memory proj */
+       assert(is_Proj(old_mem));
+       new_mem_proj = new_r_Proj(irg, get_nodes_block(old_mem),
+                                 new_r_Unknown(irg, mode_T), mode_M,
+                                 get_Proj_proj(old_mem));
+       mem = new_mem_proj;
 
-#if 0
-       proj_sp = pmap_get(regs, (void *) sp);
-       proj_bp = pmap_get(regs, (void *) bp);
-       assert(proj_sp != NULL && "There must be a Proj for the stack pointer");
-       assert(proj_sp != NULL && "There must be a Proj for the base pointer");
+       /* Generate the Prologue */
+       fp_reg  = call->cb->prologue(env->cb, &mem, env->regs);
 
-       /* Set the Proj for the stack pointer to ignore. */
-       be_node_set_flags(reg_params, -(get_Proj_proj(proj_sp) + 1), arch_irn_flags_ignore);
+       /* do the stack allocation BEFORE the barrier, or spill code
+          might be added before it */
+       env->init_sp = be_abi_reg_map_get(env->regs, sp);
+       env->init_sp = be_new_IncSP(sp, irg, bl, env->init_sp, BE_STACK_FRAME_SIZE_EXPAND);
+       be_abi_reg_map_set(env->regs, sp, env->init_sp);
 
-       /*
-        * If a frame pointer is needed and the frame pointer is in a dedicated register,
-        * also exclude that from register allocation by setting the corresponding
-        * Proj to ignore.
-        */
-       if(!env->omit_fp && env->dedicated_fp)
-               be_node_set_flags(reg_params, -(get_Proj_proj(proj_bp) + 1), arch_irn_flags_ignore);
+       env->start_barrier = barrier = create_barrier(env, bl, &mem, env->regs, 0);
 
+       env->init_sp = be_abi_reg_map_get(env->regs, sp);
+       arch_set_irn_register(env->birg->main_env->arch_env, env->init_sp, sp);
 
-       if(env->omit_fp) {
-               /* This is the stack pointer add/sub which allocates the frame. remind it for later fix up. */
-               env->init_sp  = be_new_IncSP(sp, irg, reg_params_bl, proj_sp, no_mem, 0, be_stack_dir_along);
-               frame_pointer = env->init_sp;
-       }
-
-       else {
-               env->init_sp  = proj_sp;
-               frame_pointer = be_new_Copy(sp->reg_class, irg, reg_params_bl, proj_sp);
-       }
-
-       /* Set the new frame pointer. */
-       exchange(get_irg_frame(irg), frame_pointer);
+       frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
        set_irg_frame(irg, frame_pointer);
-#endif
+       pset_insert_ptr(env->ignore_regs, fp_reg);
 
-       /* compute the start offset for the stack parameters. */
-       {
-               int arg_offset = 0;
-               int arg_size   = 0;
-               int inc_dir    = isa->stack_dir * (env->left_to_right ? 1 : -1);
+       /* rewire old mem users to new mem */
+       set_Proj_pred(new_mem_proj, get_Proj_pred(old_mem));
+       exchange(old_mem, mem);
 
-               for(i = 0; i < n_params; ++i) {
-                       be_abi_call_arg_t *arg = get_call_arg(call, 0, i);
-                       if(!arg->in_reg)
-                               arg_size += get_type_size_bytes(get_method_param_type(method_type, i));
-               }
+       set_irg_initial_mem(irg, mem);
 
-               arg_offset = -isa->stack_dir * call->arg_gap + env->left_to_right * arg_size;
+       /* Now, introduce stack param nodes for all parameters passed on the stack */
+       for(i = 0; i < n_params; ++i) {
+               ir_node *arg_proj = args[i];
+               ir_node *repl     = NULL;
 
-               /* Now, introduce stack param nodes for all parameters passed on the stack */
-               for(i = 0; i < max_arg; ++i) {
-                       ir_node *arg_proj = args[i];
-                       if(arg_proj != NULL) {
-                               be_abi_call_arg_t *arg;
-                               ir_type *param_type;
-                               int nr = get_Proj_proj(arg_proj);
+               if(arg_proj != NULL) {
+                       be_abi_call_arg_t *arg;
+                       ir_type *param_type;
+                       int nr = get_Proj_proj(arg_proj);
 
-                               nr         = MIN(nr, n_params);
-                               arg        = get_call_arg(call, 0, nr);
-                               param_type = get_method_param_type(method_type, nr);
+                       nr         = MIN(nr, n_params);
+                       arg        = get_call_arg(call, 0, nr);
+                       param_type = get_method_param_type(method_type, nr);
 
-                               if(arg->in_reg) {
-                                       args_repl[i] = new_r_Proj(irg, reg_params_bl, reg_params, get_irn_mode(arg_proj), reg_params_nr);
-                                       be_set_constr_single_reg(reg_params, -(reg_params_nr + 1), arg->reg);
-                                       reg_params_nr++;
+                       if(arg->in_reg) {
+                               repl = pmap_get(env->regs, (void *) arg->reg);
+                       }
+
+                       else if(arg->on_stack) {
+                               /* For atomic parameters which are actually used, we create a StackParam node. */
+                               if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
+                                       ir_mode *mode                    = get_type_mode(param_type);
+                                       const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
+                                       repl = be_new_StackParam(cls, isa->bp->reg_class, irg, reg_params_bl, mode, frame_pointer, arg->stack_ent);
                                }
 
-                               /* when the (stack) parameter is primitive, we insert a StackParam
-                               node representing the load of that parameter */
+                               /* The stack parameter is not primitive (it is a struct or array),
+                               we thus will create a node representing the parameter's address
+                               on the stack. */
                                else {
-                                       int size = get_type_size_bytes(param_type) * isa->stack_dir;
-
-                                       if(inc_dir < 0)
-                                               arg_offset -= size;
-
-                                       /* For atomic parameters which are actually used, we create a StackParam node. */
-                                       if(is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
-                                               ir_mode *mode                    = get_type_mode(param_type);
-                                               const arch_register_class_t *cls = arch_isa_get_reg_class_for_mode(isa, mode);
-                                               args_repl[i] = be_new_StackParam(cls, irg, reg_params_bl, mode, frame_pointer, arg_offset);
-                                       }
-
-                                       /* The stack parameter is not primitive (it is a struct or array),
-                                       we thus will create a node representing the parameter's address
-                                       on the stack. */
-                                       else {
-                                               assert(0 && "struct parameters are not supported");
-                                       }
-
-                                       if(inc_dir > 0)
-                                               arg_offset += size;
+                                       repl = be_new_FrameAddr(sp->reg_class, irg, reg_params_bl, frame_pointer, arg->stack_ent);
                                }
                        }
-               }
-       }
 
-       /* reroute the edges from the original argument projs to the RegParam ones. */
-       for(i = 0; i < max_arg; ++i) {
-               if(args[i] != NULL) {
-                       assert(args_repl[i] != NULL);
-                       edges_reroute(args[i], args_repl[i], irg);
+                       assert(repl != NULL);
+                       exchange(args[i], repl);
                }
        }
 
-       /* All Return nodes hang on the End node, so look for them there. */
-       for(i = 0, n = get_irn_arity(end); i < n; ++i) {
-               ir_node *irn = get_irn_n(end, i);
-
-               if(get_irn_opcode(irn) == iro_Return) {
-                       ir_node *bl   = get_nodes_block(irn);
-                       int n_res     = get_Return_n_ress(irn);
-                       pmap *reg_map = pmap_create_ex(n_res);
-                       ir_node *ret;
-                       int i, n;
-                       ir_node **in;
-
-                       /* collect all arguments of the return */
-                       for(i = 0; i < n_res; ++i) {
-                               ir_node *res           = get_Return_res(irn, i);
-                               be_abi_call_arg_t *arg = get_call_arg(call, 1, i);
-
-                               assert(arg->in_reg && "return value must be passed in register");
-                               pmap_insert(reg_map, res, (void *) arg->reg);
-                               obstack_ptr_grow(&env->obst, res);
-                       }
-
-                       /* generate the clean up code and add additional parameters to the return. */
-                       clearup_frame(env, bl, &env->obst);
+       /* the arg proj is not needed anymore now */
+       assert(get_irn_n_edges(arg_tuple) == 0);
+       be_kill_node(arg_tuple);
+       set_irg_args(irg, new_rd_Bad(irg));
 
-                       /* The in array for the new back end return is now ready. */
-                       n   = obstack_object_size(&env->obst) / sizeof(in[0]);
-                       in  = obstack_finish(&env->obst);
-                       ret = be_new_Return(irg, bl, n, in);
+       /* All Return nodes hang on the End node, so look for them there. */
+       for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
+               ir_node *irn = get_Block_cfgpred(end, i);
 
-                       /* Set the constraints for some arguments of the return. */
-                       for(i = 0; i < n; i++) {
-                               const arch_register_t *reg = pmap_get(reg_map, in[i]);
-                               if(reg != NULL)
-                                       be_set_constr_single_reg(ret, i, reg);
-                       }
+               if (is_Return(irn)) {
+                       ir_node *ret = create_be_return(env, irn, get_nodes_block(irn), get_Return_mem(irn), get_Return_n_ress(irn));
                        exchange(irn, ret);
-                       obstack_free(&env->obst, in);
-                       pmap_destroy(reg_map);
                }
        }
+       /* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
+          the code is dead and will never be executed. */
 
+       del_pset(dont_save);
        obstack_free(&env->obst, args);
-       be_abi_call_free(call);
+
+       /* handle start block here (place a jump in the block) */
+       temp = 0;
+       irg_block_walk_graph(irg, fix_start_block, NULL, &temp);
 }
 
-static void collect_alloca_walker(ir_node *irn, void *data)
+/** Fix the state inputs of calls that still hang on unknowns */
+static
+void fix_call_state_inputs(be_abi_irg_t *env)
 {
-       be_abi_irg_t *env = data;
-       if(get_irn_opcode(irn) == iro_Alloc && get_Alloc_where(irn) == stack_alloc)
-               obstack_ptr_grow(&env->obst, irn);
+       const arch_isa_t *isa = env->isa;
+       int i, n, n_states;
+       arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
+
+       /* Collect caller save registers */
+       n = arch_isa_get_n_reg_class(isa);
+       for(i = 0; i < n; ++i) {
+               int j;
+               const arch_register_class_t *cls = arch_isa_get_reg_class(isa, i);
+               for(j = 0; j < cls->n_regs; ++j) {
+                       const arch_register_t *reg = arch_register_for_index(cls, j);
+                       if(arch_register_type_is(reg, state)) {
+                               ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
+                       }
+               }
+       }
+
+       n = ARR_LEN(env->calls);
+       n_states = ARR_LEN(stateregs);
+       for(i = 0; i < n; ++i) {
+               int s, arity;
+               ir_node *call = env->calls[i];
+
+               arity = get_irn_arity(call);
+
+               /* the statereg inputs are the last n inputs of the calls */
+               for(s = 0; s < n_states; ++s) {
+                       int inp = arity - n_states + s;
+                       const arch_register_t *reg = stateregs[s];
+                       ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
+
+                       set_irn_n(call, inp, regnode);
+               }
+       }
 }
 
 be_abi_irg_t *be_abi_introduce(be_irg_t *birg)
 {
-       be_abi_irg_t *env = malloc(sizeof(env[0]));
+       be_abi_irg_t *env  = xmalloc(sizeof(env[0]));
+       ir_node *old_frame = get_irg_frame(birg->irg);
+       ir_graph *irg      = birg->irg;
+
+       pmap_entry *ent;
+       ir_node *dummy;
+       optimization_state_t state;
+       unsigned *limited_bitset;
+
+       be_omit_fp = birg->main_env->options->omit_fp;
 
-       int i;
-       ir_node **stack_allocs;
-
-       env->method_type   = get_entity_type(get_irg_entity(birg->irg));
-       env->call          = be_abi_call_new();
-       arch_isa_get_call_abi(birg->main_env->arch_env->isa, env->method_type, env->call);
-
-       env->omit_fp       = (env->call->flags & BE_ABI_TRY_OMIT_FRAME_POINTER) != 0;
-       env->dedicated_fp  = (env->call->flags & BE_ABI_FRAME_POINTER_DEDICATED) != 0;
-       env->left_to_right = (env->call->flags & BE_ABI_LEFT_TO_RIGHT) != 0;
-       env->birg          = birg;
-       env->stack_ops     = pset_new_ptr(32);
-       env->dbg           = firm_dbg_register("firm.be.abi");
        obstack_init(&env->obst);
 
-       /* search for stack allocation nodes and record them */
-       irg_walk_graph(env->birg->irg, collect_alloca_walker, NULL, env);
-       obstack_ptr_grow(&env->obst, NULL);
-       stack_allocs = obstack_finish(&env->obst);
+       env->isa         = birg->main_env->arch_env->isa;
+       env->method_type = get_entity_type(get_irg_entity(irg));
+       env->call        = be_abi_call_new(env->isa->sp->reg_class);
+       arch_isa_get_call_abi(env->isa, env->method_type, env->call);
+
+       env->ignore_regs  = pset_new_ptr_default();
+       env->keep_map     = pmap_create();
+       env->dce_survivor = new_survive_dce();
+       env->birg         = birg;
+
+       env->sp_req.type    = arch_register_req_type_limited;
+       env->sp_req.cls     = arch_register_get_class(env->isa->sp);
+       limited_bitset      = rbitset_obstack_alloc(&env->obst, env->sp_req.cls->n_regs);
+       rbitset_set(limited_bitset, arch_register_get_index(env->isa->sp));
+       env->sp_req.limited = limited_bitset;
+
+       env->sp_cls_req.type  = arch_register_req_type_normal;
+       env->sp_cls_req.cls   = arch_register_get_class(env->isa->sp);
 
-       /* If there are stack allocations in the irg, we need a frame pointer */
-       if(stack_allocs[0] != NULL)
-               env->omit_fp = 0;
+       /* Beware: later we replace this node by the real one, ensure it is not CSE'd
+          to another Unknown or the stack pointer gets used */
+       save_optimization_state(&state);
+       set_optimize(0);
+       env->init_sp = dummy  = new_r_Unknown(irg, env->isa->sp->reg_class->mode);
+       restore_optimization_state(&state);
+       FIRM_DBG_REGISTER(env->dbg, "firm.be.abi");
 
+       env->calls = NEW_ARR_F(ir_node*, 0);
+
+       /* Lower all call nodes in the IRG. */
+       process_calls(env);
+
+       /*
+               Beware: init backend abi call object after processing calls,
+               otherwise some information might be not yet available.
+       */
+       env->cb = env->call->cb->init(env->call, birg->main_env->arch_env, irg);
+
+       /* Process the IRG */
        modify_irg(env);
 
-       for(i = 0; stack_allocs[i] != NULL; ++i)
-               implement_stack_alloc(env, stack_allocs[i]);
+       /* fix call inputs for state registers */
+       fix_call_state_inputs(env);
+
+       /* We don't need the keep map anymore. */
+       pmap_destroy(env->keep_map);
+
+       /* calls array is not needed anymore */
+       DEL_ARR_F(env->calls);
 
-       irg_walk_graph(env->birg->irg, NULL, adjust_call_walker, env);
+       /* reroute the stack origin of the calls to the true stack origin. */
+       exchange(dummy, env->init_sp);
+       exchange(old_frame, get_irg_frame(irg));
+
+       /* Make some important node pointers survive the dead node elimination. */
+       survive_dce_register_irn(env->dce_survivor, &env->init_sp);
+       pmap_foreach(env->regs, ent) {
+               survive_dce_register_irn(env->dce_survivor, (ir_node **) &ent->value);
+       }
+
+       env->call->cb->done(env->cb);
+       env->cb = NULL;
        return env;
 }
 
-static void collect_stack_nodes(ir_node *irn, void *data)
+void be_abi_free(be_abi_irg_t *env)
+{
+       free_survive_dce(env->dce_survivor);
+       del_pset(env->ignore_regs);
+       pmap_destroy(env->regs);
+       obstack_free(&env->obst, NULL);
+       free(env);
+}
+
+void be_abi_put_ignore_regs(be_abi_irg_t *abi, const arch_register_class_t *cls, bitset_t *bs)
+{
+       arch_register_t *reg;
+
+       for(reg = pset_first(abi->ignore_regs); reg; reg = pset_next(abi->ignore_regs))
+               if(reg->reg_class == cls)
+                       bitset_set(bs, reg->index);
+}
+
+/* Returns the stack layout from a abi environment. */
+const be_stack_layout_t *be_abi_get_stack_layout(const be_abi_irg_t *abi) {
+       return abi->frame;
+}
+
+/*
+
+  _____ _        ____  _             _
+ |  ___(_)_  __ / ___|| |_ __ _  ___| | __
+ | |_  | \ \/ / \___ \| __/ _` |/ __| |/ /
+ |  _| | |>  <   ___) | || (_| | (__|   <
+ |_|   |_/_/\_\ |____/ \__\__,_|\___|_|\_\
+
+*/
+
+typedef ir_node **node_array;
+
+typedef struct fix_stack_walker_env_t {
+       node_array sp_nodes;
+       const arch_env_t *arch_env;
+} fix_stack_walker_env_t;
+
+/**
+ * Walker. Collect all stack modifying nodes.
+ */
+static void collect_stack_nodes_walker(ir_node *node, void *data)
 {
-       pset *s = data;
+       fix_stack_walker_env_t *env = data;
 
-       switch(be_get_irn_opcode(irn)) {
-       case beo_IncSP:
-       case beo_AddSP:
-               pset_insert_ptr(s, irn);
+       if (arch_irn_is(env->arch_env, node, modify_sp)) {
+               assert(get_irn_mode(node) != mode_M && get_irn_mode(node) != mode_T);
+               ARR_APP1(ir_node*, env->sp_nodes, node);
        }
 }
 
 void be_abi_fix_stack_nodes(be_abi_irg_t *env)
 {
-       dom_front_info_t *df;
-       pset *stack_ops;
+       be_ssa_construction_env_t senv;
+       int i, len;
+       ir_node **phis;
+       be_irg_t *birg = env->birg;
+       be_lv_t *lv = be_get_birg_liveness(birg);
+       fix_stack_walker_env_t walker_env;
+       arch_isa_t *isa;
+
+       walker_env.sp_nodes = NEW_ARR_F(ir_node*, 0);
+       walker_env.arch_env = birg->main_env->arch_env;
+       isa = walker_env.arch_env->isa;
+
+       irg_walk_graph(birg->irg, collect_stack_nodes_walker, NULL, &walker_env);
+
+       /* nothing to be done if we didn't find any node, in fact we mustn't
+        * continue, as for endless loops incsp might have had no users and is bad
+        * now.
+        */
+       len = ARR_LEN(walker_env.sp_nodes);
+       if(len == 0) {
+               DEL_ARR_F(walker_env.sp_nodes);
+               return;
+       }
 
-       /* We need dominance frontiers for fix up */
-       df = be_compute_dominance_frontiers(env->birg->irg);
+       be_ssa_construction_init(&senv, birg);
+       be_ssa_construction_add_copies(&senv, walker_env.sp_nodes,
+                                   ARR_LEN(walker_env.sp_nodes));
+       be_ssa_construction_fix_users_array(&senv, walker_env.sp_nodes,
+                                     ARR_LEN(walker_env.sp_nodes));
 
-       stack_ops = pset_new_ptr_default();
-       pset_insert_ptr(env->stack_ops, env->init_sp);
-       irg_walk_graph(env->birg->irg, collect_stack_nodes, NULL, stack_ops);
-       be_ssa_constr_set(df, stack_ops);
-       del_pset(stack_ops);
+       if(lv != NULL) {
+               len = ARR_LEN(walker_env.sp_nodes);
+               for(i = 0; i < len; ++i) {
+                       be_liveness_update(lv, walker_env.sp_nodes[i]);
+               }
+               be_ssa_construction_update_liveness_phis(&senv, lv);
+       }
 
-       /* free these dominance frontiers */
-       be_free_dominance_frontiers(df);
-}
+       phis = be_ssa_construction_get_new_phis(&senv);
 
-static int get_dir(ir_node *irn)
-{
-       return 1 - 2 * (be_get_IncSP_direction(irn) == be_stack_dir_against);
+       /* set register requirements for stack phis */
+       len = ARR_LEN(phis);
+       for(i = 0; i < len; ++i) {
+               ir_node *phi = phis[i];
+               be_set_phi_reg_req(walker_env.arch_env, phi, &env->sp_req);
+               be_set_phi_flags(walker_env.arch_env, phi, arch_irn_flags_ignore | arch_irn_flags_modify_sp);
+               arch_set_irn_register(walker_env.arch_env, phi, env->isa->sp);
+       }
+       be_ssa_construction_destroy(&senv);
+
+       DEL_ARR_F(walker_env.sp_nodes);
 }
 
 static int process_stack_bias(be_abi_irg_t *env, ir_node *bl, int bias)
 {
-       const arch_env_t *aenv = env->birg->main_env->arch_env;
+       const arch_env_t *arch_env = env->birg->main_env->arch_env;
+       int omit_fp            = env->call->flags.bits.try_omit_fp;
        ir_node *irn;
-       int start_bias = bias;
 
        sched_foreach(bl, irn) {
-               if(be_is_IncSP(irn)) {
-                       int ofs = be_get_IncSP_offset(irn);
-                       int dir = get_dir(irn);
 
-                       if(ofs == BE_STACK_FRAME_SIZE) {
-                               ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
-                               be_set_IncSP_offset(irn, ofs);
+               /*
+                  Check, if the node relates to an entity on the stack frame.
+                  If so, set the true offset (including the bias) for that
+                  node.
+                */
+               ir_entity *ent = arch_get_frame_entity(arch_env, irn);
+               if(ent) {
+                       int offset = get_stack_entity_offset(env->frame, ent, bias);
+                       arch_set_frame_offset(arch_env, irn, offset);
+                       DBG((env->dbg, LEVEL_2, "%F has offset %d (including bias %d)\n", ent, offset, bias));
+               }
+
+               /*
+                  If the node modifies the stack pointer by a constant offset,
+                  record that in the bias.
+                */
+               if(arch_irn_is(arch_env, irn, modify_sp)) {
+                       int ofs = arch_get_sp_bias(arch_env, irn);
+
+                       if(be_is_IncSP(irn)) {
+                               if(ofs == BE_STACK_FRAME_SIZE_EXPAND) {
+                                       ofs = get_type_size_bytes(get_irg_frame_type(env->birg->irg));
+                                       be_set_IncSP_offset(irn, ofs);
+                               } else if(ofs == BE_STACK_FRAME_SIZE_SHRINK) {
+                                       ofs = - get_type_size_bytes(get_irg_frame_type(env->birg->irg));
+                                       be_set_IncSP_offset(irn, ofs);
+                               }
                        }
 
-                       bias += dir * ofs;
+                       if(omit_fp)
+                               bias += ofs;
                }
-
-               else
-                       arch_set_stack_bias(aenv, irn, bias);
        }
 
        return bias;
 }
 
+/**
+ * A helper struct for the bias walker.
+ */
+struct bias_walk {
+       be_abi_irg_t *env;     /**< The ABI irg environment. */
+       int start_block_bias;  /**< The bias at the end of the start block. */
+       ir_node *start_block;  /**< The start block of the current graph. */
+};
+
+/**
+ * Block-Walker: fix all stack offsets
+ */
 static void stack_bias_walker(ir_node *bl, void *data)
 {
-       if(bl != get_irg_start_block(get_irn_irg(bl))) {
-               be_abi_irg_t *env = data;
-               process_stack_bias(env, bl, env->start_block_bias);
+       struct bias_walk *bw = data;
+       if (bl != bw->start_block) {
+               process_stack_bias(bw->env, bl, bw->start_block_bias);
        }
 }
 
 void be_abi_fix_stack_bias(be_abi_irg_t *env)
 {
        ir_graph *irg  = env->birg->irg;
+       struct bias_walk bw;
 
-       /* Determine the stack bias at the and of the start block. */
-       env->start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
+       stack_frame_compute_initial_offset(env->frame);
+       // stack_layout_dump(stdout, env->frame);
+
+       /* Determine the stack bias at the end of the start block. */
+       bw.start_block_bias = process_stack_bias(env, get_irg_start_block(irg), 0);
 
        /* fix the bias is all other blocks */
-       irg_block_walk_graph(irg, stack_bias_walker, NULL, env);
+       bw.env = env;
+       bw.start_block = get_irg_start_block(irg);
+       irg_block_walk_graph(irg, stack_bias_walker, NULL, &bw);
 }
 
-void be_abi_free(be_abi_irg_t *env)
+ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
 {
-       del_pset(env->stack_ops);
-       obstack_free(&env->obst, NULL);
-       free(env);
+       assert(arch_register_type_is(reg, callee_save));
+       assert(pmap_contains(abi->regs, (void *) reg));
+       return pmap_get(abi->regs, (void *) reg);
 }
 
-ir_node *be_abi_get_callee_save_irn(be_abi_irg_t *abi, const arch_register_t *reg)
+ir_node *be_abi_get_ignore_irn(be_abi_irg_t *abi, const arch_register_t *reg)
 {
-       assert(arch_register_type_is(reg, callee_save));
+       assert(arch_register_type_is(reg, ignore));
        assert(pmap_contains(abi->regs, (void *) reg));
        return pmap_get(abi->regs, (void *) reg);
 }
+
+ir_node *be_abi_get_start_barrier(be_abi_irg_t *abi)
+{
+       return abi->start_barrier;
+}
+
+/**
+ * Returns non-zero if the ABI has omitted the frame pointer in
+ * the current graph.
+ */
+int be_abi_omit_fp(const be_abi_irg_t *abi) {
+       return abi->call->flags.bits.try_omit_fp;
+}