Fix: Spills have ProjMs now
[libfirm] / ir / be / ia32 / bearch_ia32.c
index 502c659..0b36118 100644 (file)
@@ -51,6 +51,7 @@
 #include "iroptimize.h"
 #include "instrument.h"
 #include "iropt_t.h"
+#include "lower_dw.h"
 
 #include "../beabi.h"
 #include "../beirg.h"
@@ -84,7 +85,6 @@
 #include "ia32_x87.h"
 #include "ia32_dbg_stat.h"
 #include "ia32_finish.h"
-#include "ia32_util.h"
 #include "ia32_fpu.h"
 #include "ia32_architecture.h"
 
@@ -99,7 +99,6 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
 ir_mode         *ia32_mode_fpcw       = NULL;
 
 /** The current omit-fp state */
-static unsigned ia32_curr_fp_ommitted  = 0;
 static ir_type *omit_fp_between_type   = NULL;
 static ir_type *between_type           = NULL;
 static ir_entity *old_bp_ent           = NULL;
@@ -258,159 +257,13 @@ static int ia32_get_sp_bias(const ir_node *node)
        if (is_ia32_Pop(node) || is_ia32_PopMem(node))
                return -4;
 
-       if (is_ia32_Leave(node) || (be_is_Copy(node)
-           && arch_get_irn_register(node) == &ia32_registers[REG_ESP])) {
+       if (is_ia32_Leave(node) || is_ia32_CopyEbpEsp(node)) {
                return SP_BIAS_RESET;
        }
 
        return 0;
 }
 
-/**
- * Generate the routine prologue.
- *
- * @param self       The callback object.
- * @param mem        A pointer to the mem node. Update this if you define new memory.
- * @param reg_map    A map mapping all callee_save/ignore/parameter registers to their defining nodes.
- * @param stack_bias Points to the current stack bias, can be modified if needed.
- *
- * @return           The register which shall be used as a stack frame base.
- *
- * All nodes which define registers in @p reg_map must keep @p reg_map current.
- */
-static const arch_register_t *ia32_abi_prologue(void *self, ir_node **mem, pmap *reg_map, int *stack_bias)
-{
-       ia32_abi_env_t   *env      = (ia32_abi_env_t*)self;
-       ir_graph         *irg      = env->irg;
-       const arch_env_t *arch_env = be_get_irg_arch_env(irg);
-
-       ia32_curr_fp_ommitted = env->flags.try_omit_fp;
-       if (! env->flags.try_omit_fp) {
-               ir_node  *bl      = get_irg_start_block(env->irg);
-               ir_node  *curr_sp = be_abi_reg_map_get(reg_map, arch_env->sp);
-               ir_node  *curr_bp = be_abi_reg_map_get(reg_map, arch_env->bp);
-               ir_node  *noreg   = ia32_new_NoReg_gp(irg);
-               ir_node  *push;
-
-               /* mark bp register as ignore */
-               be_set_constr_single_reg_out(get_Proj_pred(curr_bp),
-                               get_Proj_proj(curr_bp), arch_env->bp, arch_register_req_type_ignore);
-
-               /* push ebp */
-               push    = new_bd_ia32_Push(NULL, bl, noreg, noreg, *mem, curr_bp, curr_sp);
-               arch_irn_add_flags(push, arch_irn_flags_prolog);
-               curr_sp = new_r_Proj(push, get_irn_mode(curr_sp), pn_ia32_Push_stack);
-               *mem    = new_r_Proj(push, mode_M, pn_ia32_Push_M);
-
-               /* the push must have SP out register */
-               arch_set_irn_register(curr_sp, arch_env->sp);
-
-               /* this modifies the stack bias, because we pushed 32bit */
-               *stack_bias -= 4;
-
-               /* move esp to ebp */
-               curr_bp = be_new_Copy(arch_env->bp->reg_class, bl, curr_sp);
-               arch_irn_add_flags(curr_bp, arch_irn_flags_prolog);
-               be_set_constr_single_reg_out(curr_bp, 0, arch_env->bp,
-                                            arch_register_req_type_ignore);
-
-               /* beware: the copy must be done before any other sp use */
-               curr_sp = be_new_CopyKeep_single(arch_env->sp->reg_class, bl, curr_sp, curr_bp, get_irn_mode(curr_sp));
-               arch_irn_add_flags(curr_sp, arch_irn_flags_prolog);
-               be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
-                                                    arch_register_req_type_produces_sp);
-
-               be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
-               be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
-
-               return arch_env->bp;
-       }
-
-       return arch_env->sp;
-}
-
-/**
- * Generate the routine epilogue.
- * @param self    The callback object.
- * @param bl      The block for the epilog
- * @param mem     A pointer to the mem node. Update this if you define new memory.
- * @param reg_map A map mapping all callee_save/ignore/parameter registers to their defining nodes.
- * @return        The register which shall be used as a stack frame base.
- *
- * All nodes which define registers in @p reg_map must keep @p reg_map current.
- */
-static void ia32_abi_epilogue(void *self, ir_node *bl, ir_node **mem, pmap *reg_map)
-{
-       ia32_abi_env_t   *env      = (ia32_abi_env_t*)self;
-       const arch_env_t *arch_env = be_get_irg_arch_env(env->irg);
-       ir_node          *curr_sp  = be_abi_reg_map_get(reg_map, arch_env->sp);
-       ir_node          *curr_bp  = be_abi_reg_map_get(reg_map, arch_env->bp);
-
-       if (env->flags.try_omit_fp) {
-               /* simply remove the stack frame here */
-               curr_sp = be_new_IncSP(arch_env->sp, bl, curr_sp, BE_STACK_FRAME_SIZE_SHRINK, 0);
-               arch_irn_add_flags(curr_sp, arch_irn_flags_epilog);
-       } else {
-               ir_mode *mode_bp = arch_env->bp->reg_class->mode;
-
-               if (ia32_cg_config.use_leave) {
-                       ir_node *leave;
-
-                       /* leave */
-                       leave   = new_bd_ia32_Leave(NULL, bl, curr_bp);
-                       curr_bp = new_r_Proj(leave, mode_bp, pn_ia32_Leave_frame);
-                       curr_sp = new_r_Proj(leave, get_irn_mode(curr_sp), pn_ia32_Leave_stack);
-                       arch_irn_add_flags(leave, arch_irn_flags_epilog);
-               } else {
-                       ir_node *pop;
-
-                       /* copy ebp to esp */
-                       curr_sp = be_new_Copy(&ia32_reg_classes[CLASS_ia32_gp], bl, curr_bp);
-                       arch_set_irn_register(curr_sp, arch_env->sp);
-                       be_set_constr_single_reg_out(curr_sp, 0, arch_env->sp,
-                                                        arch_register_req_type_ignore);
-                       arch_irn_add_flags(curr_sp, arch_irn_flags_epilog);
-
-                       /* pop ebp */
-                       pop     = new_bd_ia32_PopEbp(NULL, bl, *mem, curr_sp);
-                       curr_bp = new_r_Proj(pop, mode_bp, pn_ia32_Pop_res);
-                       curr_sp = new_r_Proj(pop, get_irn_mode(curr_sp), pn_ia32_Pop_stack);
-                       arch_irn_add_flags(pop, arch_irn_flags_epilog);
-
-                       *mem = new_r_Proj(pop, mode_M, pn_ia32_Pop_M);
-               }
-               arch_set_irn_register(curr_sp, arch_env->sp);
-               arch_set_irn_register(curr_bp, arch_env->bp);
-       }
-
-       be_abi_reg_map_set(reg_map, arch_env->sp, curr_sp);
-       be_abi_reg_map_set(reg_map, arch_env->bp, curr_bp);
-}
-
-/**
- * Initialize the callback object.
- * @param call The call object.
- * @param irg  The graph with the method.
- * @return     Some pointer. This pointer is passed to all other callback functions as self object.
- */
-static void *ia32_abi_init(const be_abi_call_t *call, ir_graph *irg)
-{
-       ia32_abi_env_t      *env = XMALLOC(ia32_abi_env_t);
-       be_abi_call_flags_t  fl  = be_abi_call_get_flags(call);
-       env->flags = fl.bits;
-       env->irg   = irg;
-       return env;
-}
-
-/**
- * Destroy the callback object.
- * @param self The callback object.
- */
-static void ia32_abi_done(void *self)
-{
-       free(self);
-}
-
 /**
  * Build the between type and entities if not already build.
  */
@@ -445,30 +298,31 @@ static void ia32_build_between_type(void)
  * it will contain the return address and space to store the old base pointer.
  * @return The Firm type modeling the ABI between type.
  */
-static ir_type *ia32_abi_get_between_type(void *self)
+static ir_type *ia32_abi_get_between_type(ir_graph *irg)
 {
-       ia32_abi_env_t *env = (ia32_abi_env_t*)self;
-
+       const be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
        ia32_build_between_type();
-       return env->flags.try_omit_fp ? omit_fp_between_type : between_type;
+       return layout->sp_relative ? omit_fp_between_type : between_type;
 }
 
 /**
  * Return the stack entity that contains the return address.
  */
-ir_entity *ia32_get_return_address_entity(void)
+ir_entity *ia32_get_return_address_entity(ir_graph *irg)
 {
+       const be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
        ia32_build_between_type();
-       return ia32_curr_fp_ommitted ? omit_fp_ret_addr_ent : ret_addr_ent;
+       return layout->sp_relative ? omit_fp_ret_addr_ent : ret_addr_ent;
 }
 
 /**
  * Return the stack entity that contains the frame address.
  */
-ir_entity *ia32_get_frame_address_entity(void)
+ir_entity *ia32_get_frame_address_entity(ir_graph *irg)
 {
+       const be_stack_layout_t *layout = be_get_irg_stack_layout(irg);
        ia32_build_between_type();
-       return ia32_curr_fp_ommitted ? NULL : old_bp_ent;
+       return layout->sp_relative ? NULL : old_bp_ent;
 }
 
 /**
@@ -565,7 +419,7 @@ static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_
        mode     = get_irn_mode(irn);
        irn_mode = get_irn_mode(irn);
        noreg    = get_irn_n(irn, 0);
-       nomem    = new_r_NoMem(irg);
+       nomem    = get_irg_no_mem(irg);
        dbg      = get_irn_dbg_info(irn);
 
        /* initialize structure */
@@ -575,7 +429,6 @@ static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_
 
        switch (get_ia32_irn_opcode(irn)) {
                case iro_ia32_Add:
-#if 0
                        if (get_ia32_immop_type(irn) == ia32_ImmConst) {
                                /* we have an add with a const here */
                                /* invers == add with negated const */
@@ -597,10 +450,8 @@ static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_
                                inverse->nodes[0] = new_bd_ia32_Sub(dbg, block, noreg, noreg, nomem, (ir_node*) irn, get_irn_n(irn, i ^ 1));
                                inverse->costs   += 2;
                        }
-#endif
                        break;
                case iro_ia32_Sub:
-#if 0
                        if (get_ia32_immop_type(irn) != ia32_ImmNone) {
                                /* we have a sub with a const/symconst here */
                                /* invers == add with this const */
@@ -618,10 +469,8 @@ static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_
                                }
                                inverse->costs += 1;
                        }
-#endif
                        break;
                case iro_ia32_Xor:
-#if 0
                        if (get_ia32_immop_type(irn) != ia32_ImmNone) {
                                /* xor with const: inverse = xor */
                                inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, get_irn_n(irn, i), noreg);
@@ -633,7 +482,6 @@ static arch_inverse_t *ia32_get_inverse(const ir_node *irn, int i, arch_inverse_
                                inverse->nodes[0] = new_bd_ia32_Xor(dbg, block, noreg, noreg, nomem, (ir_node *) irn, get_irn_n(irn, i));
                                inverse->costs   += 1;
                        }
-#endif
                        break;
                case iro_ia32_Not: {
                        inverse->nodes[0] = new_bd_ia32_Not(dbg, block, (ir_node*) irn);
@@ -778,11 +626,7 @@ static void ia32_perform_memory_operand(ir_node *irn, ir_node *spill,
 }
 
 static const be_abi_callbacks_t ia32_abi_callbacks = {
-       ia32_abi_init,
-       ia32_abi_done,
        ia32_abi_get_between_type,
-       ia32_abi_prologue,
-       ia32_abi_epilogue
 };
 
 /* register allocator interface */
@@ -875,7 +719,7 @@ ir_node *ia32_turn_back_am(ir_node *node)
        ia32_copy_am_attrs(load, node);
        if (is_ia32_is_reload(node))
                set_ia32_is_reload(load);
-       set_irn_n(node, n_ia32_mem, new_r_NoMem(irg));
+       set_irn_n(node, n_ia32_mem, get_irg_no_mem(irg));
 
        switch (get_ia32_am_support(node)) {
                case ia32_am_unary:
@@ -985,7 +829,7 @@ static void transform_to_Load(ir_node *node)
        ir_node *noreg       = ia32_new_NoReg_gp(irg);
        ir_node *sched_point = NULL;
        ir_node *ptr         = get_irg_frame(irg);
-       ir_node *mem         = get_irn_n(node, be_pos_Reload_mem);
+       ir_node *mem         = get_irn_n(node, n_be_Reload_mem);
        ir_node *new_op, *proj;
        const arch_register_t *reg;
 
@@ -1039,12 +883,13 @@ static void transform_to_Store(ir_node *node)
        dbg_info *dbg  = get_irn_dbg_info(node);
        ir_node *block = get_nodes_block(node);
        ir_entity *ent = be_get_frame_entity(node);
-       const ir_node *spillval = get_irn_n(node, be_pos_Spill_val);
+       const ir_node *spillval = get_irn_n(node, n_be_Spill_val);
        ir_mode *mode  = get_spill_mode(spillval);
        ir_node *noreg = ia32_new_NoReg_gp(irg);
-       ir_node *nomem = new_r_NoMem(irg);
+       ir_node *nomem = get_irg_no_mem(irg);
        ir_node *ptr   = get_irg_frame(irg);
-       ir_node *val   = get_irn_n(node, be_pos_Spill_val);
+       ir_node *val   = get_irn_n(node, n_be_Spill_val);
+       ir_node *res;
        ir_node *store;
        ir_node *sched_point = NULL;
 
@@ -1053,17 +898,23 @@ static void transform_to_Store(ir_node *node)
        }
 
        if (mode_is_float(mode)) {
-               if (ia32_cg_config.use_sse2)
+               if (ia32_cg_config.use_sse2) {
                        store = new_bd_ia32_xStore(dbg, block, ptr, noreg, nomem, val);
-               else
+                       res   = new_r_Proj(store, mode_M, pn_ia32_xStore_M);
+               } else {
                        store = new_bd_ia32_vfst(dbg, block, ptr, noreg, nomem, val, mode);
+                       res   = new_r_Proj(store, mode_M, pn_ia32_vfst_M);
+               }
        } else if (get_mode_size_bits(mode) == 128) {
                /* Spill 128 bit SSE registers */
                store = new_bd_ia32_xxStore(dbg, block, ptr, noreg, nomem, val);
+               res   = new_r_Proj(store, mode_M, pn_ia32_xxStore_M);
        } else if (get_mode_size_bits(mode) == 8) {
                store = new_bd_ia32_Store8Bit(dbg, block, ptr, noreg, nomem, val);
+               res   = new_r_Proj(store, mode_M, pn_ia32_Store8Bit_M);
        } else {
                store = new_bd_ia32_Store(dbg, block, ptr, noreg, nomem, val);
+               res   = new_r_Proj(store, mode_M, pn_ia32_Store_M);
        }
 
        set_ia32_op_type(store, ia32_AddrModeD);
@@ -1079,7 +930,7 @@ static void transform_to_Store(ir_node *node)
                sched_remove(node);
        }
 
-       exchange(node, store);
+       exchange(node, res);
 }
 
 static ir_node *create_push(ir_node *node, ir_node *schedpoint, ir_node *sp, ir_node *mem, ir_entity *ent)
@@ -1110,7 +961,8 @@ static ir_node *create_pop(ir_node *node, ir_node *schedpoint, ir_node *sp, ir_e
        ir_node  *noreg = ia32_new_NoReg_gp(irg);
        ir_node  *frame = get_irg_frame(irg);
 
-       ir_node *pop = new_bd_ia32_PopMem(dbg, block, frame, noreg, new_r_NoMem(irg), sp);
+       ir_node *pop = new_bd_ia32_PopMem(dbg, block, frame, noreg,
+                                         get_irg_no_mem(irg), sp);
 
        set_ia32_frame_ent(pop, ent);
        set_ia32_use_frame(pop);
@@ -1145,7 +997,7 @@ static void transform_MemPerm(ir_node *node)
 {
        ir_node         *block = get_nodes_block(node);
        ir_graph        *irg   = get_irn_irg(node);
-       ir_node         *sp    = be_abi_get_ignore_irn(be_get_irg_abi(irg), &ia32_registers[REG_ESP]);
+       ir_node         *sp    = be_get_initial_reg_value(irg, &ia32_registers[REG_ESP]);
        int              arity = be_get_MemPerm_entity_arity(node);
        ir_node        **pops  = ALLOCAN(ir_node*, arity);
        ir_node         *in[1];
@@ -1178,7 +1030,7 @@ static void transform_MemPerm(ir_node *node)
                        sp = create_spproj(node, push, pn_ia32_Push_stack);
                }
 
-               set_irn_n(node, i, new_r_Bad(irg));
+               set_irn_n(node, i, new_r_Bad(irg, mode_X));
        }
 
        /* create pops */
@@ -1224,11 +1076,8 @@ static void transform_MemPerm(ir_node *node)
        }
 
        /* remove memperm */
-       arity = get_irn_arity(node);
-       for (i = 0; i < arity; ++i) {
-               set_irn_n(node, i, new_r_Bad(irg));
-       }
        sched_remove(node);
+       kill_node(node);
 }
 
 /**
@@ -1327,6 +1176,143 @@ need_stackent:
        be_node_needs_frame_entity(env, node, mode, align);
 }
 
+static int determine_ebp_input(ir_node *ret)
+{
+       const arch_register_t *bp = &ia32_registers[REG_EBP];
+       int   arity               = get_irn_arity(ret);
+       int   i;
+
+       for (i = 0; i < arity; ++i) {
+               ir_node *input = get_irn_n(ret, i);
+               if (arch_get_irn_register(input) == bp)
+                       return i;
+       }
+       panic("no ebp input found at %+F", ret);
+}
+
+static void introduce_epilog(ir_node *ret)
+{
+       const arch_register_t *sp         = &ia32_registers[REG_ESP];
+       const arch_register_t *bp         = &ia32_registers[REG_EBP];
+       ir_graph              *irg        = get_irn_irg(ret);
+       ir_type               *frame_type = get_irg_frame_type(irg);
+       unsigned               frame_size = get_type_size_bytes(frame_type);
+       be_stack_layout_t     *layout     = be_get_irg_stack_layout(irg);
+       ir_node               *block      = get_nodes_block(ret);
+       ir_node               *first_sp   = get_irn_n(ret, n_be_Return_sp);
+       ir_node               *curr_sp    = first_sp;
+       ir_mode               *mode_gp    = mode_Iu;
+
+       if (!layout->sp_relative) {
+               int      n_ebp   = determine_ebp_input(ret);
+               ir_node *curr_bp = get_irn_n(ret, n_ebp);
+               if (ia32_cg_config.use_leave) {
+                       ir_node *leave = new_bd_ia32_Leave(NULL, block, curr_bp);
+                       curr_bp        = new_r_Proj(leave, mode_gp, pn_ia32_Leave_frame);
+                       curr_sp        = new_r_Proj(leave, mode_gp, pn_ia32_Leave_stack);
+                       arch_set_irn_register(curr_bp, bp);
+                       arch_set_irn_register(curr_sp, sp);
+                       sched_add_before(ret, leave);
+               } else {
+                       ir_node *pop;
+                       ir_node *curr_mem = get_irn_n(ret, n_be_Return_mem);
+                       /* copy ebp to esp */
+                       curr_sp = new_bd_ia32_CopyEbpEsp(NULL, block, curr_bp);
+                       arch_set_irn_register(curr_sp, sp);
+                       sched_add_before(ret, curr_sp);
+
+                       /* pop ebp */
+                       pop      = new_bd_ia32_PopEbp(NULL, block, curr_mem, curr_sp);
+                       curr_bp  = new_r_Proj(pop, mode_gp, pn_ia32_PopEbp_res);
+                       curr_sp  = new_r_Proj(pop, mode_gp, pn_ia32_PopEbp_stack);
+                       curr_mem = new_r_Proj(pop, mode_M, pn_ia32_Pop_M);
+                       arch_set_irn_register(curr_bp, bp);
+                       arch_set_irn_register(curr_sp, sp);
+                       sched_add_before(ret, pop);
+
+                       set_irn_n(ret, n_be_Return_mem, curr_mem);
+               }
+               set_irn_n(ret, n_ebp, curr_bp);
+       } else {
+               ir_node *incsp = be_new_IncSP(sp, block, curr_sp, -(int)frame_size, 0);
+               sched_add_before(ret, incsp);
+               curr_sp = incsp;
+       }
+       set_irn_n(ret, n_be_Return_sp, curr_sp);
+
+       /* keep verifier happy... */
+       if (get_irn_n_edges(first_sp) == 0 && is_Proj(first_sp)) {
+               kill_node(first_sp);
+       }
+}
+
+/**
+ * put the Prolog code at the beginning, epilog code before each return
+ */
+static void introduce_prolog_epilog(ir_graph *irg)
+{
+       const arch_register_t *sp         = &ia32_registers[REG_ESP];
+       const arch_register_t *bp         = &ia32_registers[REG_EBP];
+       ir_node               *start      = get_irg_start(irg);
+       ir_node               *block      = get_nodes_block(start);
+       ir_type               *frame_type = get_irg_frame_type(irg);
+       unsigned               frame_size = get_type_size_bytes(frame_type);
+       be_stack_layout_t     *layout     = be_get_irg_stack_layout(irg);
+       ir_node               *initial_sp = be_get_initial_reg_value(irg, sp);
+       ir_node               *curr_sp    = initial_sp;
+       ir_mode               *mode_gp    = mode_Iu;
+
+       if (!layout->sp_relative) {
+               /* push ebp */
+               ir_node *mem        = get_irg_initial_mem(irg);
+               ir_node *noreg      = ia32_new_NoReg_gp(irg);
+               ir_node *initial_bp = be_get_initial_reg_value(irg, bp);
+               ir_node *curr_bp    = initial_bp;
+               ir_node *push       = new_bd_ia32_Push(NULL, block, noreg, noreg, mem, curr_bp, curr_sp);
+               ir_node *incsp;
+
+               curr_sp = new_r_Proj(push, mode_gp, pn_ia32_Push_stack);
+               mem     = new_r_Proj(push, mode_M, pn_ia32_Push_M);
+               arch_set_irn_register(curr_sp, sp);
+               sched_add_after(start, push);
+
+               /* move esp to ebp */
+               curr_bp = be_new_Copy(bp->reg_class, block, curr_sp);
+               sched_add_after(push, curr_bp);
+               be_set_constr_single_reg_out(curr_bp, 0, bp, arch_register_req_type_ignore);
+               curr_sp = be_new_CopyKeep_single(sp->reg_class, block, curr_sp, curr_bp, mode_gp);
+               sched_add_after(curr_bp, curr_sp);
+               be_set_constr_single_reg_out(curr_sp, 0, sp, arch_register_req_type_produces_sp);
+               edges_reroute(initial_bp, curr_bp);
+               set_irn_n(push, n_ia32_Push_val, initial_bp);
+
+               incsp = be_new_IncSP(sp, block, curr_sp, frame_size, 0);
+               edges_reroute(initial_sp, incsp);
+               set_irn_n(push, n_ia32_Push_stack, initial_sp);
+               sched_add_after(curr_sp, incsp);
+
+               layout->initial_bias = -4;
+       } else {
+               ir_node *incsp = be_new_IncSP(sp, block, curr_sp, frame_size, 0);
+               edges_reroute(initial_sp, incsp);
+               be_set_IncSP_pred(incsp, curr_sp);
+               sched_add_after(start, incsp);
+       }
+
+       /* introduce epilog for every return node */
+       {
+               ir_node *end_block = get_irg_end_block(irg);
+               int      arity     = get_irn_arity(end_block);
+               int      i;
+
+               for (i = 0; i < arity; ++i) {
+                       ir_node *ret = get_irn_n(end_block, i);
+                       assert(be_is_Return(ret));
+                       introduce_epilog(ret);
+               }
+       }
+}
+
 /**
  * We transform Spill and Reload here. This needs to be done before
  * stack biasing otherwise we would miss the corrected offset for these nodes.
@@ -1343,6 +1329,8 @@ static void ia32_after_ra(ir_graph *irg)
        be_free_frame_entity_coalescer(fec_env);
 
        irg_block_walk_graph(irg, NULL, ia32_after_ra_walker, NULL);
+
+       introduce_prolog_epilog(irg);
 }
 
 /**
@@ -1460,7 +1448,6 @@ static ia32_isa_t ia32_isa_template = {
                &ia32_registers[REG_ESP],  /* stack pointer register */
                &ia32_registers[REG_EBP],  /* base pointer register */
                &ia32_reg_classes[CLASS_ia32_gp],  /* static link pointer register class */
-               -1,                      /* stack direction */
                2,                       /* power of two stack alignment, 2^2 == 4 */
                NULL,                    /* main environment */
                7,                       /* costs for a spill instruction */
@@ -1528,7 +1515,7 @@ static arch_env_t *ia32_init(FILE *file_handle)
 
        set_tarval_output_modes();
 
-       memcpy(isa, &ia32_isa_template, sizeof(*isa));
+       *isa = ia32_isa_template;
 
        if (ia32_mode_fpcw == NULL) {
                ia32_mode_fpcw = new_ir_mode("Fpcw", irms_int_number, 16, 0, irma_none, 0);
@@ -1972,7 +1959,7 @@ static int ia32_is_mux_allowed(ir_node *sel, ir_node *mux_false,
        if (get_mode_size_bits(mode) > 32)
                return false;
        /* we can handle Abs for all modes and compares (except 64bit) */
-       if (be_mux_is_abs(sel, mux_true, mux_false) != 0)
+       if (ir_mux_is_abs(sel, mux_true, mux_false) != 0)
                return true;
        /* we can't handle MuxF yet */
        if (mode_is_float(mode))
@@ -2051,7 +2038,8 @@ static void ia32_lower_for_target(void)
        /* lower compound param handling */
        lower_calls_with_compounds(&params);
 
-       lower_dw_ops(&lower_dw_params);
+       ir_prepare_dw_lowering(&lower_dw_params);
+       ir_lower_dw_ops();
 
        for (i = 0; i < n_irgs; ++i) {
                ir_graph *irg = get_irp_irg(i);
@@ -2189,7 +2177,7 @@ const arch_isa_if_t ia32_isa_if = {
        ia32_emit,           /* emit && done */
 };
 
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_arch_ia32)
 void be_init_arch_ia32(void)
 {
        lc_opt_entry_t *be_grp   = lc_opt_get_grp(firm_opt_get_root(), "be");