Remove the unused parameter const arch_env_t *env from arch_get_irn_register().
[libfirm] / ir / be / ia32 / ia32_optimize.c
index bfaa352..5b8a35c 100644 (file)
@@ -1,19 +1,36 @@
+/*
+ * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
+ *
+ * This file is part of libFirm.
+ *
+ * This file may be distributed and/or modified under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation and appearing in the file LICENSE.GPL included in the
+ * packaging of this file.
+ *
+ * Licensees holding valid libFirm Professional Edition licenses may use
+ * this file in accordance with the libFirm Commercial License.
+ * Agreement provided with the Software.
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
 /**
- * Project:     libFIRM
- * File name:   ir/be/ia32/ia32_optimize.c
- * Purpose:     Implements several optimizations for IA32
- * Author:      Christian Wuerdig
- * CVS-ID:      $Id$
- * Copyright:   (c) 2006 Universitaet Karlsruhe
- * Licence:     This file protected by GPL -  GNU GENERAL PUBLIC LICENSE.
+ * @file
+ * @brief       Implements several optimizations for IA32.
+ * @author      Matthias Braun, Christian Wuerdig
+ * @version     $Id$
  */
 #ifdef HAVE_CONFIG_H
-#include <config.h>
+#include "config.h"
 #endif
 
 #include "irnode.h"
 #include "irprog_t.h"
 #include "ircons.h"
+#include "irtools.h"
 #include "firm_types.h"
 #include "iredges.h"
 #include "tv.h"
 #include "irgwalk.h"
 #include "height.h"
 #include "irbitset.h"
+#include "irprintf.h"
+#include "error.h"
 
 #include "../be_t.h"
 #include "../beabi.h"
 #include "../benode_t.h"
 #include "../besched_t.h"
+#include "../bepeephole.h"
 
 #include "ia32_new_nodes.h"
+#include "ia32_optimize.h"
 #include "bearch_ia32_t.h"
-#include "gen_ia32_regalloc_if.h"     /* the generated interface (register type and class defenitions) */
+#include "gen_ia32_regalloc_if.h"
+#include "ia32_common_transform.h"
 #include "ia32_transform.h"
 #include "ia32_dbg_stat.h"
 #include "ia32_util.h"
+#include "ia32_architecture.h"
 
-#define AGGRESSIVE_AM
-
-typedef enum {
-       IA32_AM_CAND_NONE  = 0,  /**< no addressmode possible with irn inputs */
-       IA32_AM_CAND_LEFT  = 1,  /**< addressmode possible with left input */
-       IA32_AM_CAND_RIGHT = 2,  /**< addressmode possible with right input */
-       IA32_AM_CAND_BOTH  = 3   /**< addressmode possible with both inputs */
-} ia32_am_cand_t;
+DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
 
-typedef int is_op_func_t(const ir_node *n);
-typedef ir_node *load_func_t(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, ir_node *mem);
+static const arch_env_t *arch_env;
+static ia32_code_gen_t  *cg;
 
-/**
- * checks if a node represents the NOREG value
- */
-static INLINE int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) {
-       return irn == cg->noreg_gp || irn == cg->noreg_xmm || irn == cg->noreg_vfp;
-}
-
-void ia32_pre_transform_phase(ia32_code_gen_t *cg) {
-       /*
-               We need to transform the consts twice:
-               - the psi condition tree transformer needs existing constants to be ia32 constants
-               - the psi condition tree transformer inserts new firm constants which need to be transformed
-       */
-       //ia32_transform_all_firm_consts(cg);
-       irg_walk_graph(cg->irg, NULL, ia32_transform_psi_cond_tree, cg);
-       //ia32_transform_all_firm_consts(cg);
+static void copy_mark(const ir_node *old, ir_node *new)
+{
+       if (is_ia32_is_reload(old))
+               set_ia32_is_reload(new);
+       if (is_ia32_is_spill(old))
+               set_ia32_is_spill(new);
+       if (is_ia32_is_remat(old))
+               set_ia32_is_remat(new);
 }
 
-/********************************************************************************************************
- *  _____                _           _         ____        _   _           _          _   _
- * |  __ \              | |         | |       / __ \      | | (_)         (_)        | | (_)
- * | |__) |__  ___ _ __ | |__   ___ | | ___  | |  | |_ __ | |_ _ _ __ ___  _ ______ _| |_ _  ___  _ __
- * |  ___/ _ \/ _ \ '_ \| '_ \ / _ \| |/ _ \ | |  | | '_ \| __| | '_ ` _ \| |_  / _` | __| |/ _ \| '_ \
- * | |  |  __/  __/ |_) | | | | (_) | |  __/ | |__| | |_) | |_| | | | | | | |/ / (_| | |_| | (_) | | | |
- * |_|   \___|\___| .__/|_| |_|\___/|_|\___|  \____/| .__/ \__|_|_| |_| |_|_/___\__,_|\__|_|\___/|_| |_|
- *                | |                               | |
- *                |_|                               |_|
- ********************************************************************************************************/
+typedef enum produces_flag_t {
+       produces_no_flag,
+       produces_flag_zero,
+       produces_flag_carry
+} produces_flag_t;
 
 /**
- * NOTE: THESE PEEPHOLE OPTIMIZATIONS MUST BE CALLED AFTER SCHEDULING AND REGISTER ALLOCATION.
+ * Return which usable flag the given node produces
+ *
+ * @param node  the node to check
+ * @param pn    the projection number of the used result
  */
+static produces_flag_t produces_test_flag(ir_node *node, int pn)
+{
+       ir_node                     *count;
+       const ia32_immediate_attr_t *imm_attr;
+
+       if (!is_ia32_irn(node))
+               return produces_no_flag;
+
+       switch (get_ia32_irn_opcode(node)) {
+               case iro_ia32_Add:
+               case iro_ia32_Adc:
+               case iro_ia32_And:
+               case iro_ia32_Or:
+               case iro_ia32_Xor:
+               case iro_ia32_Sub:
+               case iro_ia32_Sbb:
+               case iro_ia32_Neg:
+               case iro_ia32_Inc:
+               case iro_ia32_Dec:
+                       break;
 
-static int ia32_const_equal(const ir_node *n1, const ir_node *n2) {
-       if(get_ia32_immop_type(n1) != get_ia32_immop_type(n2))
-               return 0;
+               case iro_ia32_ShlD:
+               case iro_ia32_ShrD:
+                       assert(n_ia32_ShlD_count == n_ia32_ShrD_count);
+                       count = get_irn_n(node, n_ia32_ShlD_count);
+                       goto check_shift_amount;
+
+               case iro_ia32_Shl:
+               case iro_ia32_Shr:
+               case iro_ia32_Sar:
+                       assert(n_ia32_Shl_count == n_ia32_Shr_count
+                                       && n_ia32_Shl_count == n_ia32_Sar_count);
+                       count = get_irn_n(node, n_ia32_Shl_count);
+check_shift_amount:
+                       /* when shift count is zero the flags are not affected, so we can only
+                        * do this for constants != 0 */
+                       if (!is_ia32_Immediate(count))
+                               return produces_no_flag;
+
+                       imm_attr = get_ia32_immediate_attr_const(count);
+                       if (imm_attr->symconst != NULL)
+                               return produces_no_flag;
+                       if ((imm_attr->offset & 0x1f) == 0)
+                               return produces_no_flag;
+                       break;
 
-       if(get_ia32_immop_type(n1) == ia32_ImmConst) {
-               return get_ia32_Immop_tarval(n1) == get_ia32_Immop_tarval(n2);
-       } else if(get_ia32_immop_type(n1) == ia32_ImmSymConst) {
-               return get_ia32_Immop_symconst(n1) == get_ia32_Immop_symconst(n2);
+               case iro_ia32_Mul:
+                       return pn == pn_ia32_Mul_res_high ?
+                               produces_flag_carry : produces_no_flag;
+
+               default:
+                       return produces_no_flag;
        }
 
-       assert(get_ia32_immop_type(n1) == ia32_ImmNone);
-       return 1;
+       return pn == pn_ia32_res ?
+               produces_flag_zero : produces_no_flag;
 }
 
 /**
- * Checks for potential CJmp/CJmpAM optimization candidates.
+ * If the given node has not mode_T, creates a mode_T version (with a result Proj).
+ *
+ * @param node  the node to change
+ *
+ * @return the new mode_T node (if the mode was changed) or node itself
  */
-static ir_node *ia32_determine_cjmp_cand(ir_node *irn, is_op_func_t *is_op_func) {
-       ir_node *cand = NULL;
-       ir_node *prev = sched_prev(irn);
-
-       if (is_Block(prev)) {
-               if (get_Block_n_cfgpreds(prev) == 1)
-                       prev = get_Block_cfgpred(prev, 0);
-               else
-                       prev = NULL;
-       }
+static ir_node *turn_into_mode_t(ir_node *node)
+{
+       ir_node               *block;
+       ir_node               *res_proj;
+       ir_node               *new_node;
+       const arch_register_t *reg;
 
-       /* The predecessor must be a ProjX. */
-       if (prev && is_Proj(prev) && get_irn_mode(prev) == mode_X) {
-               prev = get_Proj_pred(prev);
+       if(get_irn_mode(node) == mode_T)
+               return node;
 
-               if (is_op_func(prev))
-                       cand = prev;
-       }
+       assert(get_irn_mode(node) == mode_Iu);
 
-       return cand;
-}
+       new_node = exact_copy(node);
+       set_irn_mode(new_node, mode_T);
+
+       block    = get_nodes_block(new_node);
+       res_proj = new_r_Proj(current_ir_graph, block, new_node, mode_Iu,
+                             pn_ia32_res);
+
+       reg = arch_get_irn_register(node);
+       arch_set_irn_register(arch_env, res_proj, reg);
 
-static int is_TestJmp_cand(const ir_node *irn) {
-       return is_ia32_TestJmp(irn) || is_ia32_And(irn);
+       sched_add_before(node, new_node);
+       be_peephole_exchange(node, res_proj);
+       return new_node;
 }
 
 /**
- * Checks if two consecutive arguments of cand matches
- * the two arguments of irn (TestJmp).
+ * Replace Cmp(x, 0) by a Test(x, x)
  */
-static int is_TestJmp_replacement(ir_node *cand, ir_node *irn) {
-       ir_node *in1       = get_irn_n(irn, 0);
-       ir_node *in2       = get_irn_n(irn, 1);
-       int      i, n      = get_irn_arity(cand);
-       int      same_args = 0;
-
-       for (i = 0; i < n - 1; i++) {
-               if (get_irn_n(cand, i)     == in1 &&
-                       get_irn_n(cand, i + 1) == in2)
-               {
-                       same_args = 1;
-                       break;
-               }
+static void peephole_ia32_Cmp(ir_node *const node)
+{
+       ir_node                     *right;
+       ia32_immediate_attr_t const *imm;
+       dbg_info                    *dbgi;
+       ir_graph                    *irg;
+       ir_node                     *block;
+       ir_node                     *noreg;
+       ir_node                     *nomem;
+       ir_node                     *op;
+       ia32_attr_t           const *attr;
+       int                          ins_permuted;
+       int                          cmp_unsigned;
+       ir_node                     *test;
+       arch_register_t       const *reg;
+       ir_edge_t             const *edge;
+       ir_edge_t             const *tmp;
+
+       if (get_ia32_op_type(node) != ia32_Normal)
+               return;
+
+       right = get_irn_n(node, n_ia32_Cmp_right);
+       if (!is_ia32_Immediate(right))
+               return;
+
+       imm = get_ia32_immediate_attr_const(right);
+       if (imm->symconst != NULL || imm->offset != 0)
+               return;
+
+       dbgi         = get_irn_dbg_info(node);
+       irg          = current_ir_graph;
+       block        = get_nodes_block(node);
+       noreg        = ia32_new_NoReg_gp(cg);
+       nomem        = get_irg_no_mem(irg);
+       op           = get_irn_n(node, n_ia32_Cmp_left);
+       attr         = get_irn_generic_attr(node);
+       ins_permuted = attr->data.ins_permuted;
+       cmp_unsigned = attr->data.cmp_unsigned;
+
+       if (is_ia32_Cmp(node)) {
+               test = new_rd_ia32_Test(dbgi, irg, block, noreg, noreg, nomem,
+                                       op, op, ins_permuted, cmp_unsigned);
+       } else {
+               test = new_rd_ia32_Test8Bit(dbgi, irg, block, noreg, noreg, nomem,
+                                           op, op, ins_permuted, cmp_unsigned);
        }
+       set_ia32_ls_mode(test, get_ia32_ls_mode(node));
+
+       reg = arch_get_irn_register(node);
+       arch_set_irn_register(arch_env, test, reg);
 
-       if (!same_args)
-               return 0;
+       foreach_out_edge_safe(node, edge, tmp) {
+               ir_node *const user = get_edge_src_irn(edge);
 
-       return ia32_const_equal(cand, irn);
+               if (is_Proj(user))
+                       exchange(user, test);
+       }
+
+       sched_add_before(node, test);
+       copy_mark(node, test);
+       be_peephole_exchange(node, test);
 }
 
 /**
- * Tries to replace a TestJmp by a CJmp or CJmpAM (in case of And)
+ * Peephole optimization for Test instructions.
+ * We can remove the Test, if a zero flags was produced which is still
+ * live.
  */
-static void ia32_optimize_TestJmp(ir_node *irn, ia32_code_gen_t *cg) {
-       ir_node *cand    = ia32_determine_cjmp_cand(irn, is_TestJmp_cand);
-       int      replace = 0;
+static void peephole_ia32_Test(ir_node *node)
+{
+       ir_node         *left  = get_irn_n(node, n_ia32_Test_left);
+       ir_node         *right = get_irn_n(node, n_ia32_Test_right);
+       ir_node         *flags_proj;
+       ir_node         *block;
+       ir_mode         *flags_mode;
+       int              pn    = pn_ia32_res;
+       ir_node         *schedpoint;
+       const ir_edge_t *edge;
 
-       /* we found a possible candidate */
-       replace = cand ? is_TestJmp_replacement(cand, irn) : 0;
+       assert(n_ia32_Test_left == n_ia32_Test8Bit_left
+                       && n_ia32_Test_right == n_ia32_Test8Bit_right);
 
-       if (replace) {
-               DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn));
+       /* we need a test for 0 */
+       if(left != right)
+               return;
 
-               if (is_ia32_And(cand))
-                       set_irn_op(irn, op_ia32_CJmpAM);
-               else
-                       set_irn_op(irn, op_ia32_CJmp);
+       block = get_nodes_block(node);
+       if(get_nodes_block(left) != block)
+               return;
 
-               DB((cg->mod, LEVEL_1, "%+F\n", irn));
+       if(is_Proj(left)) {
+               pn   = get_Proj_proj(left);
+               left = get_Proj_pred(left);
        }
-}
 
-static int is_CondJmp_cand(const ir_node *irn) {
-       return is_ia32_CondJmp(irn) || is_ia32_Sub(irn);
-}
+       /* happens rarely, but if it does code will panic' */
+       if (is_ia32_Unknown_GP(left))
+               return;
 
-/**
- * Checks if the arguments of cand are the same of irn.
- */
-static int is_CondJmp_replacement(ir_node *cand, ir_node *irn) {
-       int i, arity;
+       /* walk schedule up and abort when we find left or some other node destroys
+          the flags */
+       schedpoint = node;
+       for (;;) {
+               schedpoint = sched_prev(schedpoint);
+               if (schedpoint == left)
+                       break;
+               if (arch_irn_is(arch_env, schedpoint, modify_flags))
+                       return;
+               if (schedpoint == block)
+                       panic("couldn't find left");
+       }
+
+       /* make sure only Lg/Eq tests are used */
+       foreach_out_edge(node, edge) {
+               ir_node *user = get_edge_src_irn(edge);
+               int      pnc  = get_ia32_condcode(user);
 
-       arity = get_irn_arity(cand);
-       for (i = 0; i < arity; i++) {
-               if (get_irn_n(cand, i) != get_irn_n(irn, i)) {
-                       return 0;
+               if(pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) {
+                       return;
                }
        }
 
-       return ia32_const_equal(cand, irn);
+       switch (produces_test_flag(left, pn)) {
+               case produces_flag_zero:
+                       break;
+
+               case produces_flag_carry:
+                       foreach_out_edge(node, edge) {
+                               ir_node *user = get_edge_src_irn(edge);
+                               int      pnc  = get_ia32_condcode(user);
+
+                               switch (pnc) {
+                                       case pn_Cmp_Eq: pnc = pn_Cmp_Ge | ia32_pn_Cmp_unsigned; break;
+                                       case pn_Cmp_Lg: pnc = pn_Cmp_Lt | ia32_pn_Cmp_unsigned; break;
+                                       default: panic("unexpected pn");
+                               }
+                               set_ia32_condcode(user, pnc);
+                       }
+                       break;
+
+               default:
+                       return;
+       }
+
+       left = turn_into_mode_t(left);
+
+       flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode;
+       flags_proj = new_r_Proj(current_ir_graph, block, left, flags_mode,
+                               pn_ia32_flags);
+       arch_set_irn_register(arch_env, flags_proj, &ia32_flags_regs[REG_EFLAGS]);
+
+       assert(get_irn_mode(node) != mode_T);
+
+       be_peephole_exchange(node, flags_proj);
 }
 
 /**
- * Tries to replace a CondJmp by a CJmpAM
+ * AMD Athlon works faster when RET is not destination of
+ * conditional jump or directly preceded by other jump instruction.
+ * Can be avoided by placing a Rep prefix before the return.
  */
-static void ia32_optimize_CondJmp(ir_node *irn, ia32_code_gen_t *cg) {
-       ir_node *cand    = ia32_determine_cjmp_cand(irn, is_CondJmp_cand);
-       int      replace = 0;
-
-       /* we found a possible candidate */
-       replace = cand ? is_CondJmp_replacement(cand, irn) : 0;
+static void peephole_ia32_Return(ir_node *node) {
+       ir_node *block, *irn;
 
-       if (replace) {
-               DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn));
-               DBG_OPT_CJMP(irn);
+       if (!ia32_cg_config.use_pad_return)
+               return;
 
-               set_irn_op(irn, op_ia32_CJmpAM);
+       block = get_nodes_block(node);
 
-               DB((cg->mod, LEVEL_1, "%+F\n", irn));
+       /* check if this return is the first on the block */
+       sched_foreach_reverse_from(node, irn) {
+               switch (get_irn_opcode(irn)) {
+               case beo_Return:
+                       /* the return node itself, ignore */
+                       continue;
+               case iro_Start:
+               case beo_RegParams:
+               case beo_Barrier:
+                       /* ignore no code generated */
+                       continue;
+               case beo_IncSP:
+                       /* arg, IncSP 0 nodes might occur, ignore these */
+                       if (be_get_IncSP_offset(irn) == 0)
+                               continue;
+                       return;
+               case iro_Phi:
+                       continue;
+               default:
+                       return;
+               }
        }
+
+       /* ensure, that the 3 byte return is generated */
+       be_Return_set_emit_pop(node, 1);
 }
 
-// only optimize up to 48 stores behind IncSPs
+/* only optimize up to 48 stores behind IncSPs */
 #define MAXPUSH_OPTIMIZE       48
 
 /**
- * Tries to create pushs from IncSP,Store combinations
+ * Tries to create Push's from IncSP, Store combinations.
+ * The Stores are replaced by Push's, the IncSP is modified
+ * (possibly into IncSP 0, but not removed).
  */
-static void ia32_create_Pushs(ir_node *irn, ia32_code_gen_t *cg) {
-       int i;
-       int offset;
-       ir_node *node;
-       ir_node *stores[MAXPUSH_OPTIMIZE];
-       ir_node *block = get_nodes_block(irn);
-       ir_graph *irg = cg->irg;
-       ir_node *curr_sp;
-       ir_mode *spmode = get_irn_mode(irn);
+static void peephole_IncSP_Store_to_push(ir_node *irn)
+{
+       int              i;
+       int              maxslot;
+       int              inc_ofs;
+       ir_node         *node;
+       ir_node         *stores[MAXPUSH_OPTIMIZE];
+       ir_node         *block;
+       ir_graph        *irg;
+       ir_node         *curr_sp;
+       ir_mode         *spmode;
+       ir_node         *first_push = NULL;
+       ir_edge_t const *edge;
+       ir_edge_t const *next;
 
        memset(stores, 0, sizeof(stores));
 
        assert(be_is_IncSP(irn));
 
-       offset = be_get_IncSP_offset(irn);
-       if(offset < 4)
+       inc_ofs = be_get_IncSP_offset(irn);
+       if (inc_ofs < 4)
                return;
 
        /*
         * We first walk the schedule after the IncSP node as long as we find
-        * suitable stores that could be transformed to a push.
+        * suitable Stores that could be transformed to a Push.
         * We save them into the stores array which is sorted by the frame offset/4
         * attached to the node
         */
-       for(node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
+       maxslot = -1;
+       for (node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
                ir_node *mem;
                int offset;
                int storeslot;
 
-               // it has to be a store
-               if(!is_ia32_Store(node))
+               /* it has to be a Store */
+               if (!is_ia32_Store(node))
                        break;
 
-               // it has to use our sp value
-               if(get_irn_n(node, 0) != irn)
+               /* it has to use our sp value */
+               if (get_irn_n(node, n_ia32_base) != irn)
                        continue;
-               // store has to be attached to NoMem
-               mem = get_irn_n(node, 3);
-               if(!is_NoMem(mem)) {
+               /* Store has to be attached to NoMem */
+               mem = get_irn_n(node, n_ia32_mem);
+               if (!is_NoMem(mem))
                        continue;
-               }
 
-               if( (get_ia32_am_flavour(node) & ia32_am_IS) != 0)
+               /* unfortunately we can't support the full AMs possible for push at the
+                * moment. TODO: fix this */
+               if (!is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
                        break;
 
                offset = get_ia32_am_offs_int(node);
+               /* we should NEVER access uninitialized stack BELOW the current SP */
+               assert(offset >= 0);
 
-               storeslot = offset / 4;
-               if(storeslot >= MAXPUSH_OPTIMIZE)
-                       continue;
-
-               // storing into the same slot twice is bad (and shouldn't happen...)
-               if(stores[storeslot] != NULL)
+               /* storing at half-slots is bad */
+               if ((offset & 3) != 0)
                        break;
 
-               // storing at half-slots is bad
-               if(offset % 4 != 0)
+               if (inc_ofs - 4 < offset || offset >= MAXPUSH_OPTIMIZE * 4)
+                       continue;
+               storeslot = offset >> 2;
+
+               /* storing into the same slot twice is bad (and shouldn't happen...) */
+               if (stores[storeslot] != NULL)
                        break;
 
                stores[storeslot] = node;
+               if (storeslot > maxslot)
+                       maxslot = storeslot;
        }
 
-       curr_sp = get_irn_n(irn, 0);
+       curr_sp = irn;
 
-       // walk the stores in inverse order and create pushs for them
-       i = (offset / 4) - 1;
-       if(i >= MAXPUSH_OPTIMIZE) {
-               i = MAXPUSH_OPTIMIZE - 1;
+       for (i = -1; i < maxslot; ++i) {
+               if (stores[i + 1] == NULL)
+                       break;
        }
 
-       for( ; i >= 0; --i) {
+       /* walk through the Stores and create Pushs for them */
+       block  = get_nodes_block(irn);
+       spmode = get_irn_mode(irn);
+       irg    = cg->irg;
+       for (; i >= 0; --i) {
                const arch_register_t *spreg;
                ir_node *push;
                ir_node *val, *mem, *mem_proj;
                ir_node *store = stores[i];
                ir_node *noreg = ia32_new_NoReg_gp(cg);
 
-               if(store == NULL || is_Bad(store))
-                       break;
+               val = get_irn_n(store, n_ia32_unary_op);
+               mem = get_irn_n(store, n_ia32_mem);
+               spreg = arch_get_irn_register(curr_sp);
 
-               val = get_irn_n(store, 2);
-               mem = get_irn_n(store, 3);
-               spreg = arch_get_irn_register(cg->arch_env, curr_sp);
+               push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, mem, val, curr_sp);
+               copy_mark(store, push);
 
-               // create a push
-               push = new_rd_ia32_Push(NULL, irg, block, noreg, noreg, val, curr_sp, mem);
+               if (first_push == NULL)
+                       first_push = push;
 
-               set_ia32_am_support(push, ia32_am_Source);
-               copy_ia32_Immop_attr(push, store);
+               sched_add_after(curr_sp, push);
 
-               sched_add_before(irn, push);
-
-               // create stackpointer proj
+               /* create stackpointer Proj */
                curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack);
                arch_set_irn_register(cg->arch_env, curr_sp, spreg);
-               sched_add_before(irn, curr_sp);
 
-               // create memory proj
+               /* create memory Proj */
                mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
-               sched_add_before(irn, mem_proj);
-
-               // use the memproj now
-               exchange(store, mem_proj);
 
-               // we can remove the store now
-               sched_remove(store);
+               /* use the memproj now */
+               be_peephole_exchange(store, mem_proj);
 
-               offset -= 4;
+               inc_ofs -= 4;
        }
 
-       be_set_IncSP_offset(irn, offset);
+       foreach_out_edge_safe(irn, edge, next) {
+               ir_node *const src = get_edge_src_irn(edge);
+               int      const pos = get_edge_src_pos(edge);
 
-       // can we remove the IncSP now?
-       if(offset == 0) {
-               const ir_edge_t *edge, *next;
-
-               foreach_out_edge_safe(irn, edge, next) {
-                       ir_node *arg = get_edge_src_irn(edge);
-                       int pos = get_edge_src_pos(edge);
-
-                       set_irn_n(arg, pos, curr_sp);
-               }
+               if (src == first_push)
+                       continue;
 
-               set_irn_n(irn, 0, new_Bad());
-               sched_remove(irn);
-       } else {
-               set_irn_n(irn, 0, curr_sp);
+               set_irn_n(src, pos, curr_sp);
        }
+
+       be_set_IncSP_offset(irn, inc_ofs);
 }
 
 #if 0
-/**
- * Tries to optimize two following IncSP.
- */
-static void ia32_optimize_IncSP(ir_node *irn, ia32_code_gen_t *cg) {
-       ir_node *prev = be_get_IncSP_pred(irn);
-       int real_uses = get_irn_n_edges(prev);
+static void peephole_store_incsp(ir_node *store)
+{
+       dbg_info *dbgi;
+       ir_node  *node;
+       ir_node  *block;
+       ir_node  *noref;
+       ir_node  *mem;
+       ir_node  *push;
+       ir_node  *val;
+       ir_node  *am_base = get_irn_n(store, n_ia32_Store_base);
+       if (!be_is_IncSP(am_base)
+                       || get_nodes_block(am_base) != get_nodes_block(store))
+               return;
+       mem = get_irn_n(store, n_ia32_Store_mem);
+       if (!is_ia32_NoReg_GP(get_irn_n(store, n_ia32_Store_index))
+                       || !is_NoMem(mem))
+               return;
 
-       if (be_is_IncSP(prev) && real_uses == 1) {
-               /* first IncSP has only one IncSP user, kill the first one */
-               int prev_offs = be_get_IncSP_offset(prev);
-               int curr_offs = be_get_IncSP_offset(irn);
+       int incsp_offset = be_get_IncSP_offset(am_base);
+       if (incsp_offset <= 0)
+               return;
 
-               be_set_IncSP_offset(prev, prev_offs + curr_offs);
+       /* we have to be at offset 0 */
+       int my_offset = get_ia32_am_offs_int(store);
+       if (my_offset != 0) {
+               /* TODO here: find out wether there is a store with offset 0 before
+                * us and wether we can move it down to our place */
+               return;
+       }
+       ir_mode *ls_mode = get_ia32_ls_mode(store);
+       int my_store_size = get_mode_size_bytes(ls_mode);
+
+       if (my_offset + my_store_size > incsp_offset)
+               return;
 
-               /* Omit the optimized IncSP */
-               be_set_IncSP_pred(irn, be_get_IncSP_pred(prev));
+       /* correctness checking:
+               - noone else must write to that stackslot
+                   (because after translation incsp won't allocate it anymore)
+       */
+       sched_foreach_reverse_from(store, node) {
+               int i, arity;
+
+               if (node == am_base)
+                       break;
 
-               set_irn_n(prev, 0, new_Bad());
-               sched_remove(prev);
+               /* make sure noone else can use the space on the stack */
+               arity = get_irn_arity(node);
+               for (i = 0; i < arity; ++i) {
+                       ir_node *pred = get_irn_n(node, i);
+                       if (pred != am_base)
+                               continue;
+
+                       if (i == n_ia32_base &&
+                                       (get_ia32_op_type(node) == ia32_AddrModeS
+                                        || get_ia32_op_type(node) == ia32_AddrModeD)) {
+                               int      node_offset  = get_ia32_am_offs_int(node);
+                               ir_mode *node_ls_mode = get_ia32_ls_mode(node);
+                               int      node_size    = get_mode_size_bytes(node);
+                               /* overlapping with our position? abort */
+                               if (node_offset < my_offset + my_store_size
+                                               && node_offset + node_size >= my_offset)
+                                       return;
+                               /* otherwise it's fine */
+                               continue;
+                       }
+
+                       /* strange use of esp: abort */
+                       return;
+               }
        }
+
+       /* all ok, change to push */
+       dbgi  = get_irn_dbg_info(store);
+       block = get_nodes_block(store);
+       noreg = ia32_new_NoReg_gp(cg);
+       val   = get_ia32_
+
+       push  = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, mem,
+
+       create_push(dbgi, current_ir_graph, block, am_base, store);
 }
 #endif
 
 /**
- * Performs Peephole Optimizations.
+ * Return true if a mode can be stored in the GP register set
  */
-static void ia32_peephole_optimize_node(ir_node *irn, void *env) {
-       ia32_code_gen_t *cg = env;
-
-       /* AMD CPUs want explicit compare before conditional jump  */
-       if (! ARCH_AMD(cg->opt_arch)) {
-               if (is_ia32_TestJmp(irn))
-                       ia32_optimize_TestJmp(irn, cg);
-               else if (is_ia32_CondJmp(irn))
-                       ia32_optimize_CondJmp(irn, cg);
-       }
+static INLINE int mode_needs_gp_reg(ir_mode *mode) {
+        if (mode == mode_fpcw)
+                return 0;
+        if (get_mode_size_bits(mode) > 32)
+                return 0;
+        return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b;
+}
 
-       if (be_is_IncSP(irn)) {
-               // optimize_IncSP doesn't respect dependency edges yet...
-               //ia32_optimize_IncSP(irn, cg);
+/**
+ * Tries to create Pops from Load, IncSP combinations.
+ * The Loads are replaced by Pops, the IncSP is modified
+ * (possibly into IncSP 0, but not removed).
+ */
+static void peephole_Load_IncSP_to_pop(ir_node *irn)
+{
+       const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
+       int      i, maxslot, inc_ofs, ofs;
+       ir_node  *node, *pred_sp, *block;
+       ir_node  *loads[MAXPUSH_OPTIMIZE];
+       ir_graph *irg;
+       unsigned regmask = 0;
+       unsigned copymask = ~0;
+
+       memset(loads, 0, sizeof(loads));
+       assert(be_is_IncSP(irn));
 
-               if (cg->opt & IA32_OPT_PUSHARGS)
-                       ia32_create_Pushs(irn, cg);
-       }
-}
+       inc_ofs = -be_get_IncSP_offset(irn);
+       if (inc_ofs < 4)
+               return;
 
-void ia32_peephole_optimization(ir_graph *irg, ia32_code_gen_t *cg) {
-       irg_walk_graph(irg, ia32_peephole_optimize_node, NULL, cg);
-}
+       /*
+        * We first walk the schedule before the IncSP node as long as we find
+        * suitable Loads that could be transformed to a Pop.
+        * We save them into the stores array which is sorted by the frame offset/4
+        * attached to the node
+        */
+       maxslot = -1;
+       pred_sp = be_get_IncSP_pred(irn);
+       for (node = sched_prev(irn); !sched_is_end(node); node = sched_prev(node)) {
+               int offset;
+               int loadslot;
+               const arch_register_t *sreg, *dreg;
+
+               /* it has to be a Load */
+               if (!is_ia32_Load(node)) {
+                       if (be_is_Copy(node)) {
+                               if (!mode_needs_gp_reg(get_irn_mode(node))) {
+                                       /* not a GP copy, ignore */
+                                       continue;
+                               }
+                               dreg = arch_get_irn_register(node);
+                               sreg = arch_get_irn_register(be_get_Copy_op(node));
+                               if (regmask & copymask & (1 << sreg->index)) {
+                                       break;
+                               }
+                               if (regmask & copymask & (1 << dreg->index)) {
+                                       break;
+                               }
+                               /* we CAN skip Copies if neither the destination nor the source
+                                * is not in our regmask, ie none of our future Pop will overwrite it */
+                               regmask |= (1 << dreg->index) | (1 << sreg->index);
+                               copymask &= ~((1 << dreg->index) | (1 << sreg->index));
+                               continue;
+                       }
+                       break;
+               }
 
-/******************************************************************
- *              _     _                   __  __           _
- *     /\      | |   | |                 |  \/  |         | |
- *    /  \   __| | __| |_ __ ___  ___ ___| \  / | ___   __| | ___
- *   / /\ \ / _` |/ _` | '__/ _ \/ __/ __| |\/| |/ _ \ / _` |/ _ \
- *  / ____ \ (_| | (_| | | |  __/\__ \__ \ |  | | (_) | (_| |  __/
- * /_/    \_\__,_|\__,_|_|  \___||___/___/_|  |_|\___/ \__,_|\___|
- *
- ******************************************************************/
+               /* we can handle only GP loads */
+               if (!mode_needs_gp_reg(get_ia32_ls_mode(node)))
+                       continue;
 
-typedef struct {
-       ia32_code_gen_t *cg;
-       heights_t       *h;
-} ia32_am_opt_env_t;
+               /* it has to use our predecessor sp value */
+               if (get_irn_n(node, n_ia32_base) != pred_sp) {
+                       /* it would be ok if this load does not use a Pop result,
+                        * but we do not check this */
+                       break;
+               }
 
-static int node_is_ia32_comm(const ir_node *irn) {
-       return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0;
-}
+               /* should have NO index */
+               if (!is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
+                       break;
 
-static int ia32_get_irn_n_edges(const ir_node *irn) {
-       const ir_edge_t *edge;
-       int cnt = 0;
+               offset = get_ia32_am_offs_int(node);
+               /* we should NEVER access uninitialized stack BELOW the current SP */
+               assert(offset >= 0);
 
-       foreach_out_edge(irn, edge) {
-               cnt++;
-       }
+               /* storing at half-slots is bad */
+               if ((offset & 3) != 0)
+                       break;
 
-       return cnt;
-}
+               if (offset < 0 || offset >= MAXPUSH_OPTIMIZE * 4)
+                       continue;
+               /* ignore those outside the possible windows */
+               if (offset > inc_ofs - 4)
+                       continue;
+               loadslot = offset >> 2;
 
-/**
- * Determines if pred is a Proj and if is_op_func returns true for it's predecessor.
- *
- * @param pred       The node to be checked
- * @param is_op_func The check-function
- * @return 1 if conditions are fulfilled, 0 otherwise
- */
-static int pred_is_specific_node(const ir_node *pred, is_op_func_t *is_op_func) {
-       return is_op_func(pred);
-}
+               /* loading from the same slot twice is bad (and shouldn't happen...) */
+               if (loads[loadslot] != NULL)
+                       break;
 
-/**
- * Determines if pred is a Proj and if is_op_func returns true for it's predecessor
- * and if the predecessor is in block bl.
- *
- * @param bl         The block
- * @param pred       The node to be checked
- * @param is_op_func The check-function
- * @return 1 if conditions are fulfilled, 0 otherwise
- */
-static int pred_is_specific_nodeblock(const ir_node *bl, const ir_node *pred,
-       int (*is_op_func)(const ir_node *n))
-{
-       if (is_Proj(pred)) {
-               pred = get_Proj_pred(pred);
-               if ((bl == get_nodes_block(pred)) && is_op_func(pred)) {
-                       return 1;
+               dreg = arch_get_irn_register(node);
+               if (regmask & (1 << dreg->index)) {
+                       /* this register is already used */
+                       break;
                }
+               regmask |= 1 << dreg->index;
+
+               loads[loadslot] = node;
+               if (loadslot > maxslot)
+                       maxslot = loadslot;
        }
 
-       return 0;
-}
+       if (maxslot < 0)
+               return;
 
-/**
- * Checks if irn is a candidate for address calculation. We avoid transforming
- * adds to leas if they have a load as pred, because then we can use AM mode
- * for the add later.
- *
- * - none of the operand must be a Load  within the same block OR
- * - all Loads must have more than one user                    OR
- *
- * @param block   The block the Loads must/mustnot be in
- * @param irn     The irn to check
- * return 1 if irn is a candidate, 0 otherwise
- */
-static int is_addr_candidate(const ir_node *irn) {
-#ifndef AGGRESSIVE_AM
-       const ir_node *block = get_nodes_block(irn);
-       ir_node *left, *right;
-       int      n;
-
-       left  = get_irn_n(irn, 2);
-       right = get_irn_n(irn, 3);
-
-       if (pred_is_specific_nodeblock(block, left, is_ia32_Ld)) {
-               n         = ia32_get_irn_n_edges(left);
-               /* load with only one user: don't create LEA */
-               if(n == 1)
-                       return 0;
-       }
+       /* find the first slot */
+       for (i = maxslot; i >= 0; --i) {
+               ir_node *load = loads[i];
 
-       if (pred_is_specific_nodeblock(block, right, is_ia32_Ld)) {
-               n         = ia32_get_irn_n_edges(right);
-               if(n == 1)
-                       return 0;
+               if (load == NULL)
+                       break;
        }
-#endif
 
-       return 1;
-}
+       ofs = inc_ofs - (maxslot + 1) * 4;
+       inc_ofs = (i+1) * 4;
 
-/**
- * Checks if irn is a candidate for address mode.
- *
- * address mode (AM):
- * - at least one operand has to be a Load within the same block AND
- * - the load must not have other users than the irn             AND
- * - the irn must not have a frame entity set
- *
- * @param cg          The ia32 code generator
- * @param h           The height information of the irg
- * @param block       The block the Loads must/mustnot be in
- * @param irn         The irn to check
- * return 0 if irn is no candidate, 1 if left load can be used, 2 if right one, 3 for both
- */
-static ia32_am_cand_t is_am_candidate(ia32_code_gen_t *cg, heights_t *h, const ir_node *block, ir_node *irn) {
-       ir_node *in, *load, *other, *left, *right;
-       int      is_cand = 0, cand;
-       int arity;
-
-       if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn) || is_ia32_vfild(irn) || is_ia32_vfist(irn) ||
-               is_ia32_GetST0(irn) || is_ia32_SetST0(irn) || is_ia32_xStoreSimple(irn))
-               return 0;
-
-       left  = get_irn_n(irn, 2);
-       arity = get_irn_arity(irn);
-       assert(arity == 5 || arity == 4);
-       if(arity == 5) {
-               /* binary op */
-               right = get_irn_n(irn, 3);
-       } else {
-               /* unary op */
-               right = left;
+       /* create a new IncSP if needed */
+       block = get_nodes_block(irn);
+       irg   = cg->irg;
+       if (inc_ofs > 0) {
+               pred_sp = be_new_IncSP(esp, irg, block, pred_sp, -inc_ofs, be_get_IncSP_align(irn));
+               sched_add_before(irn, pred_sp);
        }
 
-       in = left;
+       /* walk through the Loads and create Pops for them */
+       for (++i; i <= maxslot; ++i) {
+               ir_node *load = loads[i];
+               ir_node *mem, *pop;
+               const ir_edge_t *edge, *tmp;
+               const arch_register_t *reg;
 
-       if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
-#ifndef AGGRESSIVE_AM
-               int n;
-               n         = ia32_get_irn_n_edges(in);
-               is_cand   = (n == 1) ? 1 : is_cand;  /* load with more than one user: no AM */
-#else
-               is_cand   = 1;
-#endif
+               mem = get_irn_n(load, n_ia32_mem);
+               reg = arch_get_irn_register(load);
 
-               load  = get_Proj_pred(in);
-               other = right;
+               pop = new_rd_ia32_Pop(get_irn_dbg_info(load), irg, block, mem, pred_sp);
+               arch_set_irn_register(arch_env, pop, reg);
 
-               /* 8bit Loads are not supported (for binary ops),
-                * they cannot be used with every register */
-               if (get_irn_arity(irn) != 4 && get_mode_size_bits(get_ia32_ls_mode(load)) < 16) {
-                       assert(get_irn_arity(irn) == 5);
-                       is_cand = 0;
-               }
+               copy_mark(load, pop);
 
-               /* If there is a data dependency of other irn from load: cannot use AM */
-               if (is_cand && get_nodes_block(other) == block) {
-                       other   = skip_Proj(other);
-                       is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand;
-                       /* this could happen in loops */
-                       is_cand = heights_reachable_in_block(h, load, irn) ? 0 : is_cand;
-               }
-       }
-
-       cand    = is_cand ? IA32_AM_CAND_LEFT : IA32_AM_CAND_NONE;
-       in      = right;
-       is_cand = 0;
-
-       if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
-#ifndef AGGRESSIVE_AM
-               int n;
-               n         = ia32_get_irn_n_edges(in);
-               is_cand   = (n == 1) ? 1 : is_cand;  /* load with more than one user: no AM */
-#else
-               is_cand = 1;
-#endif
+               /* create stackpointer Proj */
+               pred_sp = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack);
+               arch_set_irn_register(arch_env, pred_sp, esp);
 
-               load  = get_Proj_pred(in);
-               other = left;
+               sched_add_before(irn, pop);
 
-               /* 8bit Loads are not supported, they cannot be used with every register */
-               if (get_mode_size_bits(get_ia32_ls_mode(load)) < 16)
-                       is_cand = 0;
+               /* rewire now */
+               foreach_out_edge_safe(load, edge, tmp) {
+                       ir_node *proj = get_edge_src_irn(edge);
 
-               /* If there is a data dependency of other irn from load: cannot use load */
-               if (is_cand && get_nodes_block(other) == block) {
-                       other   = skip_Proj(other);
-                       is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand;
-                       /* this could happen in loops */
-                       is_cand = heights_reachable_in_block(h, load, irn) ? 0 : is_cand;
+                       set_Proj_pred(proj, pop);
                }
-       }
 
-       cand = is_cand ? (cand | IA32_AM_CAND_RIGHT) : cand;
+               /* we can remove the Load now */
+               sched_remove(load);
+               kill_node(load);
+       }
 
-       /* if the irn has a frame entity: we do not use address mode */
-       return get_ia32_frame_ent(irn) ? IA32_AM_CAND_NONE : cand;
+       be_set_IncSP_offset(irn, -ofs);
+       be_set_IncSP_pred(irn, pred_sp);
 }
 
+
 /**
- * Compares the base and index addr and the load/store entities
- * and returns 1 if they are equal.
+ * Find a free GP register if possible, else return NULL.
  */
-static int load_store_addr_is_equal(const ir_node *load, const ir_node *store,
-                                                                       const ir_node *addr_b, const ir_node *addr_i)
+static const arch_register_t *get_free_gp_reg(void)
 {
-       if(get_irn_n(load, 0) != addr_b)
-               return 0;
-       if(get_irn_n(load, 1) != addr_i)
-               return 0;
-
-       if(get_ia32_frame_ent(load) != get_ia32_frame_ent(store))
-               return 0;
-
-       if(get_ia32_am_sc(load) != get_ia32_am_sc(store))
-               return 0;
-       if(is_ia32_am_sc_sign(load) != is_ia32_am_sc_sign(store))
-               return 0;
-       if(get_ia32_am_offs_int(load) != get_ia32_am_offs_int(store))
-               return 0;
-       if(get_ia32_ls_mode(load) != get_ia32_ls_mode(store))
-               return 0;
-
-       return 1;
-}
+       int i;
 
-typedef enum _ia32_take_lea_attr {
-       IA32_LEA_ATTR_NONE  = 0,
-       IA32_LEA_ATTR_BASE  = (1 << 0),
-       IA32_LEA_ATTR_INDEX = (1 << 1),
-       IA32_LEA_ATTR_OFFS  = (1 << 2),
-       IA32_LEA_ATTR_SCALE = (1 << 3),
-       IA32_LEA_ATTR_AMSC  = (1 << 4),
-       IA32_LEA_ATTR_FENT  = (1 << 5)
-} ia32_take_lea_attr;
+       for(i = 0; i < N_ia32_gp_REGS; ++i) {
+               const arch_register_t *reg = &ia32_gp_regs[i];
+               if(arch_register_type_is(reg, ignore))
+                       continue;
 
-/**
- * Decides if we have to keep the LEA operand or if we can assimilate it.
- */
-static int do_new_lea(ir_node *irn, ir_node *base, ir_node *index, ir_node *lea,
-               int have_am_sc, ia32_code_gen_t *cg)
-{
-       ir_entity *irn_ent  = get_ia32_frame_ent(irn);
-       ir_entity *lea_ent  = get_ia32_frame_ent(lea);
-       int        ret_val  = 0;
-       int        is_noreg_base  = be_is_NoReg(cg, base);
-       int        is_noreg_index = be_is_NoReg(cg, index);
-       ia32_am_flavour_t am_flav = get_ia32_am_flavour(lea);
-
-       /* If the Add and the LEA both have a different frame entity set: keep */
-       if (irn_ent && lea_ent && (irn_ent != lea_ent))
-               return IA32_LEA_ATTR_NONE;
-       else if (! irn_ent && lea_ent)
-               ret_val |= IA32_LEA_ATTR_FENT;
-
-       /* If the Add and the LEA both have already an address mode symconst: keep */
-       if (have_am_sc && get_ia32_am_sc(lea))
-               return IA32_LEA_ATTR_NONE;
-       else if (get_ia32_am_sc(lea))
-               ret_val |= IA32_LEA_ATTR_AMSC;
-
-       /* Check the different base-index combinations */
-
-       if (! is_noreg_base && ! is_noreg_index) {
-               /* Assimilate if base is the lea and the LEA is just a Base + Offset calculation */
-               if ((base == lea) && ! (am_flav & ia32_I ? 1 : 0)) {
-                       if (am_flav & ia32_O)
-                               ret_val |= IA32_LEA_ATTR_OFFS;
-
-                       ret_val |= IA32_LEA_ATTR_BASE;
-               }
-               else
-                       return IA32_LEA_ATTR_NONE;
-       }
-       else if (! is_noreg_base && is_noreg_index) {
-               /* Base is set but index not */
-               if (base == lea) {
-                       /* Base points to LEA: assimilate everything */
-                       if (am_flav & ia32_O)
-                               ret_val |= IA32_LEA_ATTR_OFFS;
-                       if (am_flav & ia32_S)
-                               ret_val |= IA32_LEA_ATTR_SCALE;
-                       if (am_flav & ia32_I)
-                               ret_val |= IA32_LEA_ATTR_INDEX;
-
-                       ret_val |= IA32_LEA_ATTR_BASE;
-               }
-               else if (am_flav & ia32_B ? 0 : 1) {
-                       /* Base is not the LEA but the LEA is an index only calculation: assimilate */
-                       if (am_flav & ia32_O)
-                               ret_val |= IA32_LEA_ATTR_OFFS;
-                       if (am_flav & ia32_S)
-                               ret_val |= IA32_LEA_ATTR_SCALE;
-
-                       ret_val |= IA32_LEA_ATTR_INDEX;
-               }
-               else
-                       return IA32_LEA_ATTR_NONE;
-       }
-       else if (is_noreg_base && ! is_noreg_index) {
-               /* Index is set but not base */
-               if (index == lea) {
-                       /* Index points to LEA: assimilate everything */
-                       if (am_flav & ia32_O)
-                               ret_val |= IA32_LEA_ATTR_OFFS;
-                       if (am_flav & ia32_S)
-                               ret_val |= IA32_LEA_ATTR_SCALE;
-                       if (am_flav & ia32_B)
-                               ret_val |= IA32_LEA_ATTR_BASE;
-
-                       ret_val |= IA32_LEA_ATTR_INDEX;
-               }
-               else if (am_flav & ia32_I ? 0 : 1) {
-                       /* Index is not the LEA but the LEA is a base only calculation: assimilate */
-                       if (am_flav & ia32_O)
-                               ret_val |= IA32_LEA_ATTR_OFFS;
-                       if (am_flav & ia32_S)
-                               ret_val |= IA32_LEA_ATTR_SCALE;
-
-                       ret_val |= IA32_LEA_ATTR_BASE;
-               }
-               else
-                       return IA32_LEA_ATTR_NONE;
-       }
-       else {
-               assert(0 && "There must have been set base or index");
+               if(be_peephole_get_value(CLASS_ia32_gp, i) == NULL)
+                       return &ia32_gp_regs[i];
        }
 
-       return ret_val;
+       return NULL;
 }
 
 /**
- * Adds res before irn into schedule if irn was scheduled.
- * @param irn  The schedule point
- * @param res  The node to be scheduled
+ * Creates a Pop instruction before the given schedule point.
+ *
+ * @param dbgi        debug info
+ * @param irg         the graph
+ * @param block       the block
+ * @param stack       the previous stack value
+ * @param schedpoint  the new node is added before this node
+ * @param reg         the register to pop
+ *
+ * @return the new stack value
  */
-static INLINE void try_add_to_sched(ir_node *irn, ir_node *res) {
-       if (sched_is_scheduled(irn))
-               sched_add_before(irn, res);
-}
+static ir_node *create_pop(dbg_info *dbgi, ir_graph *irg, ir_node *block,
+                           ir_node *stack, ir_node *schedpoint,
+                           const arch_register_t *reg)
+{
+       const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
+       ir_node *pop;
+       ir_node *keep;
+       ir_node *val;
+       ir_node *in[1];
 
-/**
- * Removes node from schedule if it is not used anymore. If irn is a mode_T node
- * all it's Projs are removed as well.
- * @param irn  The irn to be removed from schedule
- */
-static INLINE void try_remove_from_sched(ir_node *node) {
-       int i, arity;
+       pop   = new_rd_ia32_Pop(dbgi, irg, block, new_NoMem(), stack);
 
-       if(get_irn_mode(node) == mode_T) {
-               const ir_edge_t *edge;
-               foreach_out_edge(node, edge) {
-                       ir_node *proj = get_edge_src_irn(edge);
-                       try_remove_from_sched(proj);
-               }
-       }
+       stack = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack);
+       arch_set_irn_register(arch_env, stack, esp);
+       val   = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_res);
+       arch_set_irn_register(arch_env, val, reg);
 
-       if(get_irn_n_edges(node) != 0)
-               return;
+       sched_add_before(schedpoint, pop);
 
-       if (sched_is_scheduled(node)) {
-               sched_remove(node);
-       }
+       in[0] = val;
+       keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
+       sched_add_before(schedpoint, keep);
 
-       arity = get_irn_arity(node);
-       for(i = 0; i < arity; ++i) {
-               set_irn_n(node, i, new_Bad());
-       }
+       return stack;
 }
 
 /**
- * Folds Add or Sub to LEA if possible
+ * Creates a Push instruction before the given schedule point.
+ *
+ * @param dbgi        debug info
+ * @param irg         the graph
+ * @param block       the block
+ * @param stack       the previous stack value
+ * @param schedpoint  the new node is added before this node
+ * @param reg         the register to pop
+ *
+ * @return the new stack value
  */
-static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn) {
-       ir_graph   *irg        = get_irn_irg(irn);
-       dbg_info   *dbg        = get_irn_dbg_info(irn);
-       ir_node    *block      = get_nodes_block(irn);
-       ir_node    *res        = irn;
-       ir_node    *shift      = NULL;
-       ir_node    *lea_o      = NULL;
-       ir_node    *lea        = NULL;
-       long        offs       = 0;
-       long        offs_cnst  = 0;
-       long        offs_lea   = 0;
-       int         scale      = 0;
-       int         isadd      = 0;
-       int         dolea      = 0;
-       int         have_am_sc = 0;
-       int         am_sc_sign = 0;
-       ident      *am_sc      = NULL;
-       ir_entity  *lea_ent    = NULL;
-       ir_node    *noreg      = ia32_new_NoReg_gp(cg);
-       ir_node    *left, *right, *temp;
-       ir_node    *base, *index;
-       int consumed_left_shift;
-       ia32_am_flavour_t am_flav;
-       DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;)
-
-       if (is_ia32_Add(irn))
-               isadd = 1;
-
-       left  = get_irn_n(irn, 2);
-       right = get_irn_n(irn, 3);
-
-       /* "normalize" arguments in case of add with two operands */
-       if  (isadd && ! be_is_NoReg(cg, right)) {
-               /* put LEA == ia32_am_O as right operand */
-               if (is_ia32_Lea(left) && get_ia32_am_flavour(left) == ia32_am_O) {
-                       set_irn_n(irn, 2, right);
-                       set_irn_n(irn, 3, left);
-                       temp  = left;
-                       left  = right;
-                       right = temp;
-               }
-
-               /* put LEA != ia32_am_O as left operand */
-               if (is_ia32_Lea(right) && get_ia32_am_flavour(right) != ia32_am_O) {
-                       set_irn_n(irn, 2, right);
-                       set_irn_n(irn, 3, left);
-                       temp  = left;
-                       left  = right;
-                       right = temp;
-               }
-
-               /* put SHL as left operand iff left is NOT a LEA */
-               if (! is_ia32_Lea(left) && pred_is_specific_node(right, is_ia32_Shl)) {
-                       set_irn_n(irn, 2, right);
-                       set_irn_n(irn, 3, left);
-                       temp  = left;
-                       left  = right;
-                       right = temp;
-               }
-       }
+static ir_node *create_push(dbg_info *dbgi, ir_graph *irg, ir_node *block,
+                            ir_node *stack, ir_node *schedpoint)
+{
+       const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
 
-       base    = left;
-       index   = noreg;
-       offs    = 0;
-       scale   = 0;
-       am_flav = 0;
+       ir_node *val   = ia32_new_Unknown_gp(cg);
+       ir_node *noreg = ia32_new_NoReg_gp(cg);
+       ir_node *nomem = get_irg_no_mem(irg);
+       ir_node *push  = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, nomem, val, stack);
+       sched_add_before(schedpoint, push);
 
-       /* check for operation with immediate */
-       if (is_ia32_ImmConst(irn)) {
-               tarval *tv = get_ia32_Immop_tarval(irn);
+       stack = new_r_Proj(irg, block, push, mode_Iu, pn_ia32_Push_stack);
+       arch_set_irn_register(arch_env, stack, esp);
 
-               DBG((mod, LEVEL_1, "\tfound op with imm const"));
+       return stack;
+}
 
-               offs_cnst = get_tarval_long(tv);
-               dolea     = 1;
-       }
-       else if (isadd && is_ia32_ImmSymConst(irn)) {
-               DBG((mod, LEVEL_1, "\tfound op with imm symconst"));
+/**
+ * Optimize an IncSp by replacing it with Push/Pop.
+ */
+static void peephole_be_IncSP(ir_node *node)
+{
+       const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
+       const arch_register_t *reg;
+       ir_graph              *irg = current_ir_graph;
+       dbg_info              *dbgi;
+       ir_node               *block;
+       ir_node               *stack;
+       int                    offset;
 
-               have_am_sc = 1;
-               dolea      = 1;
-               am_sc      = get_ia32_Immop_symconst(irn);
-               am_sc_sign = is_ia32_am_sc_sign(irn);
-       }
+       /* first optimize incsp->incsp combinations */
+       node = be_peephole_IncSP_IncSP(node);
 
-       /* determine the operand which needs to be checked */
-       temp = be_is_NoReg(cg, right) ? left : right;
-
-       /* check if right operand is AMConst (LEA with ia32_am_O)  */
-       /* but we can only eat it up if there is no other symconst */
-       /* because the linker won't accept two symconsts           */
-       if (! have_am_sc && is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
-               DBG((mod, LEVEL_1, "\tgot op with LEA am_O"));
-
-               offs_lea   = get_ia32_am_offs_int(temp);
-               am_sc      = get_ia32_am_sc(temp);
-               am_sc_sign = is_ia32_am_sc_sign(temp);
-               have_am_sc = 1;
-               dolea      = 1;
-               lea_o      = temp;
-
-               if (temp == base)
-                       base = noreg;
-               else if (temp == right)
-                       right = noreg;
-       }
+       /* transform IncSP->Store combinations to Push where possible */
+       peephole_IncSP_Store_to_push(node);
 
-       if (isadd) {
-               /* default for add -> make right operand to index */
-               index               = right;
-               dolea               = 1;
-               consumed_left_shift = -1;
+       /* transform Load->IncSP combinations to Pop where possible */
+       peephole_Load_IncSP_to_pop(node);
 
-               DBG((mod, LEVEL_1, "\tgot LEA candidate with index %+F\n", index));
+       if (arch_get_irn_register(node) != esp)
+               return;
 
-               /* determine the operand which needs to be checked */
-               temp = left;
-               if (is_ia32_Lea(left)) {
-                       temp = right;
-                       consumed_left_shift = 0;
-               }
+       /* replace IncSP -4 by Pop freereg when possible */
+       offset = be_get_IncSP_offset(node);
+       if ((offset != -8 || ia32_cg_config.use_add_esp_8) &&
+           (offset != -4 || ia32_cg_config.use_add_esp_4) &&
+           (offset != +4 || ia32_cg_config.use_sub_esp_4) &&
+           (offset != +8 || ia32_cg_config.use_sub_esp_8))
+               return;
 
-               /* check for SHL 1,2,3 */
-               if (pred_is_specific_node(temp, is_ia32_Shl)) {
+       if (offset < 0) {
+               /* we need a free register for pop */
+               reg = get_free_gp_reg();
+               if (reg == NULL)
+                       return;
 
-                       if (is_ia32_ImmConst(temp)) {
-                               long shiftval = get_tarval_long(get_ia32_Immop_tarval(temp));
+               dbgi  = get_irn_dbg_info(node);
+               block = get_nodes_block(node);
+               stack = be_get_IncSP_pred(node);
 
-                               if (shiftval <= 3) {
-                                       index               = get_irn_n(temp, 2);
-                                       consumed_left_shift = consumed_left_shift < 0 ? 1 : 0;
-                                       shift = temp;
-                                       scale = shiftval;
+               stack = create_pop(dbgi, irg, block, stack, node, reg);
 
-                                       DBG((mod, LEVEL_1, "\tgot scaled index %+F\n", index));
-                               }
-                       }
+               if (offset == -8) {
+                       stack = create_pop(dbgi, irg, block, stack, node, reg);
                }
+       } else {
+               dbgi  = get_irn_dbg_info(node);
+               block = get_nodes_block(node);
+               stack = be_get_IncSP_pred(node);
+               stack = create_push(dbgi, irg, block, stack, node);
 
-               /* fix base */
-               if (! be_is_NoReg(cg, index)) {
-                       /* if we have index, but left == right -> no base */
-                       if (left == right) {
-                               base = noreg;
-                       }
-                       else if (consumed_left_shift == 1) {
-                               /* -> base is right operand  */
-                               base = (right == lea_o) ? noreg : right;
-                       }
+               if (offset == +8) {
+                       stack = create_push(dbgi, irg, block, stack, node);
                }
        }
 
-       /* Try to assimilate a LEA as left operand */
-       if (is_ia32_Lea(left) && (get_ia32_am_flavour(left) != ia32_am_O)) {
-               /* check if we can assimilate the LEA */
-               int take_attr = do_new_lea(irn, base, index, left, have_am_sc, cg);
+       be_peephole_exchange(node, stack);
+}
 
-               if (take_attr == IA32_LEA_ATTR_NONE) {
-                       DBG((mod, LEVEL_1, "\tleave old LEA, creating new one\n"));
-               }
-               else {
-                       DBG((mod, LEVEL_1, "\tgot LEA as left operand ... assimilating\n"));
-                       lea = left; /* for statistics */
+/**
+ * Peephole optimisation for ia32_Const's
+ */
+static void peephole_ia32_Const(ir_node *node)
+{
+       const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
+       const arch_register_t       *reg;
+       ir_graph                    *irg = current_ir_graph;
+       ir_node                     *block;
+       dbg_info                    *dbgi;
+       ir_node                     *produceval;
+       ir_node                     *xor;
+       ir_node                     *noreg;
+
+       /* try to transform a mov 0, reg to xor reg reg */
+       if (attr->offset != 0 || attr->symconst != NULL)
+               return;
+       if (ia32_cg_config.use_mov_0)
+               return;
+       /* xor destroys the flags, so no-one must be using them */
+       if (be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
+               return;
 
-                       if (take_attr & IA32_LEA_ATTR_OFFS)
-                               offs = get_ia32_am_offs_int(left);
+       reg = arch_get_irn_register(node);
+       assert(be_peephole_get_reg_value(reg) == NULL);
 
-                       if (take_attr & IA32_LEA_ATTR_AMSC) {
-                               am_sc      = get_ia32_am_sc(left);
-                               have_am_sc = 1;
-                               am_sc_sign = is_ia32_am_sc_sign(left);
-                       }
+       /* create xor(produceval, produceval) */
+       block      = get_nodes_block(node);
+       dbgi       = get_irn_dbg_info(node);
+       produceval = new_rd_ia32_ProduceVal(dbgi, irg, block);
+       arch_set_irn_register(arch_env, produceval, reg);
 
-                       if (take_attr & IA32_LEA_ATTR_SCALE)
-                               scale = get_ia32_am_scale(left);
+       noreg = ia32_new_NoReg_gp(cg);
+       xor   = new_rd_ia32_Xor(dbgi, irg, block, noreg, noreg, new_NoMem(),
+                               produceval, produceval);
+       arch_set_irn_register(arch_env, xor, reg);
 
-                       if (take_attr & IA32_LEA_ATTR_BASE)
-                               base = get_irn_n(left, 0);
+       sched_add_before(node, produceval);
+       sched_add_before(node, xor);
 
-                       if (take_attr & IA32_LEA_ATTR_INDEX)
-                               index = get_irn_n(left, 1);
+       copy_mark(node, xor);
+       be_peephole_exchange(node, xor);
+}
 
-                       if (take_attr & IA32_LEA_ATTR_FENT)
-                               lea_ent = get_ia32_frame_ent(left);
-               }
-       }
+static INLINE int is_noreg(ia32_code_gen_t *cg, const ir_node *node)
+{
+       return node == cg->noreg_gp;
+}
 
-       /* ok, we can create a new LEA */
-       if (dolea) {
-               res = new_rd_ia32_Lea(dbg, irg, block, base, index);
+static ir_node *create_immediate_from_int(ia32_code_gen_t *cg, int val)
+{
+       ir_graph *irg         = current_ir_graph;
+       ir_node  *start_block = get_irg_start_block(irg);
+       ir_node  *immediate   = new_rd_ia32_Immediate(NULL, irg, start_block, NULL,
+                                                     0, val);
+       arch_set_irn_register(cg->arch_env, immediate, &ia32_gp_regs[REG_GP_NOREG]);
 
-               /* add the old offset of a previous LEA */
-               add_ia32_am_offs_int(res, offs);
+       return immediate;
+}
 
-               /* add the new offset */
-               if (isadd) {
-                       add_ia32_am_offs_int(res, offs_cnst);
-                       add_ia32_am_offs_int(res, offs_lea);
-               } else {
-                       /* either lea_O-cnst, -cnst or -lea_O  */
-                       if (offs_cnst != 0) {
-                               add_ia32_am_offs_int(res, offs_lea);
-                               add_ia32_am_offs_int(res, -offs_cnst);
-                       } else {
-                               add_ia32_am_offs_int(res, offs_lea);
-                       }
-               }
+static ir_node *create_immediate_from_am(ia32_code_gen_t *cg,
+                                         const ir_node *node)
+{
+       ir_graph  *irg     = get_irn_irg(node);
+       ir_node   *block   = get_nodes_block(node);
+       int        offset  = get_ia32_am_offs_int(node);
+       int        sc_sign = is_ia32_am_sc_sign(node);
+       ir_entity *entity  = get_ia32_am_sc(node);
+       ir_node   *res;
+
+       res = new_rd_ia32_Immediate(NULL, irg, block, entity, sc_sign, offset);
+       arch_set_irn_register(cg->arch_env, res, &ia32_gp_regs[REG_GP_NOREG]);
+       return res;
+}
 
-               /* set the address mode symconst */
-               if (have_am_sc) {
-                       set_ia32_am_sc(res, am_sc);
-                       if (am_sc_sign)
-                               set_ia32_am_sc_sign(res);
-               }
+static int is_am_one(const ir_node *node)
+{
+       int        offset  = get_ia32_am_offs_int(node);
+       ir_entity *entity  = get_ia32_am_sc(node);
 
-               /* copy the frame entity (could be set in case of Add */
-               /* which was a FrameAddr) */
-               if (lea_ent != NULL) {
-                       set_ia32_frame_ent(res, lea_ent);
-                       set_ia32_use_frame(res);
-               } else {
-                       set_ia32_frame_ent(res, get_ia32_frame_ent(irn));
-                       if(is_ia32_use_frame(irn))
-                               set_ia32_use_frame(res);
-               }
+       return offset == 1 && entity == NULL;
+}
 
-               /* set scale */
-               set_ia32_am_scale(res, scale);
+static int is_am_minus_one(const ir_node *node)
+{
+       int        offset  = get_ia32_am_offs_int(node);
+       ir_entity *entity  = get_ia32_am_sc(node);
 
-               am_flav = ia32_am_N;
-               /* determine new am flavour */
-               if (offs || offs_cnst || offs_lea || have_am_sc) {
-                       am_flav |= ia32_O;
-               }
-               if (! be_is_NoReg(cg, base)) {
-                       am_flav |= ia32_B;
-               }
-               if (! be_is_NoReg(cg, index)) {
-                       am_flav |= ia32_I;
-               }
-               if (scale > 0) {
-                       am_flav |= ia32_S;
-               }
-               set_ia32_am_flavour(res, am_flav);
+       return offset == -1 && entity == NULL;
+}
 
-               set_ia32_op_type(res, ia32_AddrModeS);
+/**
+ * Transforms a LEA into an Add or SHL if possible.
+ */
+static void peephole_ia32_Lea(ir_node *node)
+{
+       const arch_env_t      *arch_env = cg->arch_env;
+       ir_graph              *irg      = current_ir_graph;
+       ir_node               *base;
+       ir_node               *index;
+       const arch_register_t *base_reg;
+       const arch_register_t *index_reg;
+       const arch_register_t *out_reg;
+       int                    scale;
+       int                    has_immediates;
+       ir_node               *op1;
+       ir_node               *op2;
+       dbg_info              *dbgi;
+       ir_node               *block;
+       ir_node               *res;
+       ir_node               *noreg;
+       ir_node               *nomem;
+
+       assert(is_ia32_Lea(node));
+
+       /* we can only do this if are allowed to globber the flags */
+       if(be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
+               return;
 
-               SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
+       base  = get_irn_n(node, n_ia32_Lea_base);
+       index = get_irn_n(node, n_ia32_Lea_index);
 
-               DBG((mod, LEVEL_1, "\tLEA [%+F + %+F * %d + %d]\n", base, index, scale, get_ia32_am_offs_int(res)));
+       if(is_noreg(cg, base)) {
+               base     = NULL;
+               base_reg = NULL;
+       } else {
+               base_reg = arch_get_irn_register(base);
+       }
+       if(is_noreg(cg, index)) {
+               index     = NULL;
+               index_reg = NULL;
+       } else {
+               index_reg = arch_get_irn_register(index);
+       }
 
-               /* we will exchange it, report here before the Proj is created */
-               if (shift && lea && lea_o) {
-                       try_remove_from_sched(shift);
-                       try_remove_from_sched(lea);
-                       try_remove_from_sched(lea_o);
-                       DBG_OPT_LEA4(irn, lea_o, lea, shift, res);
-               }
-               else if (shift && lea) {
-                       try_remove_from_sched(shift);
-                       try_remove_from_sched(lea);
-                       DBG_OPT_LEA3(irn, lea, shift, res);
-               }
-               else if (shift && lea_o) {
-                       try_remove_from_sched(shift);
-                       try_remove_from_sched(lea_o);
-                       DBG_OPT_LEA3(irn, lea_o, shift, res);
+       if(base == NULL && index == NULL) {
+               /* we shouldn't construct these in the first place... */
+#ifdef DEBUG_libfirm
+               ir_fprintf(stderr, "Optimisation warning: found immediate only lea\n");
+#endif
+               return;
+       }
+
+       out_reg = arch_get_irn_register(node);
+       scale   = get_ia32_am_scale(node);
+       assert(!is_ia32_need_stackent(node) || get_ia32_frame_ent(node) != NULL);
+       /* check if we have immediates values (frame entities should already be
+        * expressed in the offsets) */
+       if(get_ia32_am_offs_int(node) != 0 || get_ia32_am_sc(node) != NULL) {
+               has_immediates = 1;
+       } else {
+               has_immediates = 0;
+       }
+
+       /* we can transform leas where the out register is the same as either the
+        * base or index register back to an Add or Shl */
+       if(out_reg == base_reg) {
+               if(index == NULL) {
+#ifdef DEBUG_libfirm
+                       if(!has_immediates) {
+                               ir_fprintf(stderr, "Optimisation warning: found lea which is "
+                                          "just a copy\n");
+                       }
+#endif
+                       op1 = base;
+                       goto make_add_immediate;
                }
-               else if (lea && lea_o) {
-                       try_remove_from_sched(lea);
-                       try_remove_from_sched(lea_o);
-                       DBG_OPT_LEA3(irn, lea_o, lea, res);
+               if(scale == 0 && !has_immediates) {
+                       op1 = base;
+                       op2 = index;
+                       goto make_add;
                }
-               else if (shift) {
-                       try_remove_from_sched(shift);
-                       DBG_OPT_LEA2(irn, shift, res);
+               /* can't create an add */
+               return;
+       } else if(out_reg == index_reg) {
+               if(base == NULL) {
+                       if(has_immediates && scale == 0) {
+                               op1 = index;
+                               goto make_add_immediate;
+                       } else if(!has_immediates && scale > 0) {
+                               op1 = index;
+                               op2 = create_immediate_from_int(cg, scale);
+                               goto make_shl;
+                       } else if(!has_immediates) {
+#ifdef DEBUG_libfirm
+                               ir_fprintf(stderr, "Optimisation warning: found lea which is "
+                                          "just a copy\n");
+#endif
+                       }
+               } else if(scale == 0 && !has_immediates) {
+                       op1 = index;
+                       op2 = base;
+                       goto make_add;
                }
-               else if (lea) {
-                       try_remove_from_sched(lea);
-                       DBG_OPT_LEA2(irn, lea, res);
+               /* can't create an add */
+               return;
+       } else {
+               /* can't create an add */
+               return;
+       }
+
+make_add_immediate:
+       if(ia32_cg_config.use_incdec) {
+               if(is_am_one(node)) {
+                       dbgi  = get_irn_dbg_info(node);
+                       block = get_nodes_block(node);
+                       res   = new_rd_ia32_Inc(dbgi, irg, block, op1);
+                       arch_set_irn_register(arch_env, res, out_reg);
+                       goto exchange;
                }
-               else if (lea_o) {
-                       try_remove_from_sched(lea_o);
-                       DBG_OPT_LEA2(irn, lea_o, res);
+               if(is_am_minus_one(node)) {
+                       dbgi  = get_irn_dbg_info(node);
+                       block = get_nodes_block(node);
+                       res   = new_rd_ia32_Dec(dbgi, irg, block, op1);
+                       arch_set_irn_register(arch_env, res, out_reg);
+                       goto exchange;
                }
-               else
-                       DBG_OPT_LEA1(irn, res);
-
-               /* get the result Proj of the Add/Sub */
-               try_add_to_sched(irn, res);
-               try_remove_from_sched(irn);
-
-               assert(irn && "Couldn't find result proj");
-
-               /* exchange the old op with the new LEA */
-               exchange(irn, res);
        }
-
-       return res;
+       op2 = create_immediate_from_am(cg, node);
+
+make_add:
+       dbgi  = get_irn_dbg_info(node);
+       block = get_nodes_block(node);
+       noreg = ia32_new_NoReg_gp(cg);
+       nomem = new_NoMem();
+       res   = new_rd_ia32_Add(dbgi, irg, block, noreg, noreg, nomem, op1, op2);
+       arch_set_irn_register(arch_env, res, out_reg);
+       set_ia32_commutative(res);
+       goto exchange;
+
+make_shl:
+       dbgi  = get_irn_dbg_info(node);
+       block = get_nodes_block(node);
+       noreg = ia32_new_NoReg_gp(cg);
+       nomem = new_NoMem();
+       res   = new_rd_ia32_Shl(dbgi, irg, block, op1, op2);
+       arch_set_irn_register(arch_env, res, out_reg);
+       goto exchange;
+
+exchange:
+       SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, node));
+
+       /* add new ADD/SHL to schedule */
+       DBG_OPT_LEA2ADD(node, res);
+
+       /* exchange the Add and the LEA */
+       sched_add_before(node, res);
+       copy_mark(node, res);
+       be_peephole_exchange(node, res);
 }
 
-
 /**
- * Merges a Load/Store node with a LEA.
- * @param irn The Load/Store node
- * @param lea The LEA
+ * Split a Imul mem, imm into a Load mem and Imul reg, imm if possible.
  */
-static void merge_loadstore_lea(ir_node *irn, ir_node *lea) {
-       ir_entity *irn_ent = get_ia32_frame_ent(irn);
-       ir_entity *lea_ent = get_ia32_frame_ent(lea);
+static void peephole_ia32_Imul_split(ir_node *imul)
+{
+       const ir_node         *right = get_irn_n(imul, n_ia32_IMul_right);
+       const arch_register_t *reg;
+       ir_node               *res;
 
-       /* If the irn and the LEA both have a different frame entity set: do not merge */
-       if (irn_ent != NULL && lea_ent != NULL && (irn_ent != lea_ent))
+       if (!is_ia32_Immediate(right) || get_ia32_op_type(imul) != ia32_AddrModeS) {
+               /* no memory, imm form ignore */
                return;
-       else if (irn_ent == NULL && lea_ent != NULL) {
-               set_ia32_frame_ent(irn, lea_ent);
-               set_ia32_use_frame(irn);
        }
+       /* we need a free register */
+       reg = get_free_gp_reg();
+       if (reg == NULL)
+               return;
 
-       /* get the AM attributes from the LEA */
-       add_ia32_am_offs_int(irn, get_ia32_am_offs_int(lea));
-       set_ia32_am_scale(irn, get_ia32_am_scale(lea));
-       set_ia32_am_flavour(irn, get_ia32_am_flavour(lea));
-
-       set_ia32_am_sc(irn, get_ia32_am_sc(lea));
-       if (is_ia32_am_sc_sign(lea))
-               set_ia32_am_sc_sign(irn);
-
-       set_ia32_op_type(irn, is_ia32_Ld(irn) ? ia32_AddrModeS : ia32_AddrModeD);
-
-       /* set base and index */
-       set_irn_n(irn, 0, get_irn_n(lea, 0));
-       set_irn_n(irn, 1, get_irn_n(lea, 1));
-
-       try_remove_from_sched(lea);
-
-       /* clear remat flag */
-       set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
-
-       if (is_ia32_Ld(irn))
-               DBG_OPT_LOAD_LEA(lea, irn);
-       else
-               DBG_OPT_STORE_LEA(lea, irn);
-
+       /* fine, we can rebuild it */
+       res = turn_back_am(imul);
+       arch_set_irn_register(arch_env, res, reg);
 }
 
 /**
- * Sets new_right index of irn to right and new_left index to left.
- * Also exchange left and right
+ * Replace xorps r,r and xorpd r,r by pxor r,r
  */
-static void exchange_left_right(ir_node *irn, ir_node **left, ir_node **right, int new_left, int new_right) {
-       ir_node *temp;
-
-       set_irn_n(irn, new_right, *right);
-       set_irn_n(irn, new_left, *left);
-
-       temp   = *left;
-       *left  = *right;
-       *right = temp;
-
-       /* this is only needed for Compares, but currently ALL nodes
-        * have this attribute :-) */
-       set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn)));
+static void peephole_ia32_xZero(ir_node *xor) {
+       set_irn_op(xor, op_ia32_xPzero);
 }
 
 /**
- * Performs address calculation optimization (create LEAs if possible)
+ * Register a peephole optimisation function.
  */
-static void optimize_lea(ir_node *irn, void *env) {
-       ia32_code_gen_t *cg  = env;
-
-       if (! is_ia32_irn(irn))
-               return;
-
-       /* Following cases can occur:                                  */
-       /* - Sub (l, imm) -> LEA [base - offset]                       */
-       /* - Sub (l, r == LEA with ia32_am_O)   -> LEA [base - offset] */
-       /* - Add (l, imm) -> LEA [base + offset]                       */
-       /* - Add (l, r == LEA with ia32_am_O)  -> LEA [base + offset]  */
-       /* - Add (l == LEA with ia32_am_O, r)  -> LEA [base + offset]  */
-       /* - Add (l, r) -> LEA [base + index * scale]                  */
-       /*              with scale > 1 iff l/r == shl (1,2,3)          */
-       if (is_ia32_Sub(irn) || is_ia32_Add(irn)) {
-               ir_node *res;
-
-               if(!is_addr_candidate(irn))
-                       return;
+static void register_peephole_optimisation(ir_op *op, peephole_opt_func func) {
+       assert(op->ops.generic == NULL);
+       op->ops.generic = (op_func)func;
+}
 
-               DBG((cg->mod, LEVEL_1, "\tfound address calculation candidate %+F ... ", irn));
-               res = fold_addr(cg, irn);
-
-               if (res != irn)
-                       DB((cg->mod, LEVEL_1, "transformed into %+F\n", res));
-               else
-                       DB((cg->mod, LEVEL_1, "not transformed\n"));
-       } else if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn)) {
-               /* - Load  -> LEA into Load  } TODO: If the LEA is used by more than one Load/Store */
-               /* - Store -> LEA into Store }       it might be better to keep the LEA             */
-               ir_node *left = get_irn_n(irn, 0);
-
-               if (is_ia32_Lea(left)) {
-                       const ir_edge_t *edge, *ne;
-                       ir_node *src;
-
-                       /* merge all Loads/Stores connected to this LEA with the LEA */
-                       foreach_out_edge_safe(left, edge, ne) {
-                               src = get_edge_src_irn(edge);
-
-                               if (src && (get_edge_src_pos(edge) == 0) && (is_ia32_Ld(src) || is_ia32_St(src) || is_ia32_Store8Bit(src))) {
-                                       DBG((cg->mod, LEVEL_1, "\nmerging %+F into %+F\n", left, irn));
-                                       if (! is_ia32_got_lea(src))
-                                               merge_loadstore_lea(src, left);
-                                       set_ia32_got_lea(src);
-                               }
-                       }
-               }
-       }
+/* Perform peephole-optimizations. */
+void ia32_peephole_optimization(ia32_code_gen_t *new_cg)
+{
+       cg       = new_cg;
+       arch_env = cg->arch_env;
+
+       /* register peephole optimisations */
+       clear_irp_opcodes_generic_func();
+       register_peephole_optimisation(op_ia32_Const,    peephole_ia32_Const);
+       register_peephole_optimisation(op_be_IncSP,      peephole_be_IncSP);
+       register_peephole_optimisation(op_ia32_Lea,      peephole_ia32_Lea);
+       register_peephole_optimisation(op_ia32_Cmp,      peephole_ia32_Cmp);
+       register_peephole_optimisation(op_ia32_Cmp8Bit,  peephole_ia32_Cmp);
+       register_peephole_optimisation(op_ia32_Test,     peephole_ia32_Test);
+       register_peephole_optimisation(op_ia32_Test8Bit, peephole_ia32_Test);
+       register_peephole_optimisation(op_be_Return,     peephole_ia32_Return);
+       if (! ia32_cg_config.use_imul_mem_imm32)
+               register_peephole_optimisation(op_ia32_IMul, peephole_ia32_Imul_split);
+       if (ia32_cg_config.use_pxor)
+               register_peephole_optimisation(op_ia32_xZero, peephole_ia32_xZero);
+
+       be_peephole_opt(cg->birg);
 }
 
 /**
- * Checks for address mode patterns and performs the
- * necessary transformations.
- * This function is called by a walker.
+ * Removes node from schedule if it is not used anymore. If irn is a mode_T node
+ * all it's Projs are removed as well.
+ * @param irn  The irn to be removed from schedule
  */
-static void optimize_am(ir_node *irn, void *env) {
-       ia32_am_opt_env_t *am_opt_env = env;
-       ia32_code_gen_t   *cg         = am_opt_env->cg;
-       ir_graph          *irg        = get_irn_irg(irn);
-       heights_t         *h          = am_opt_env->h;
-       ir_node           *block, *left, *right;
-       ir_node           *store, *load, *mem_proj;
-       ir_node           *addr_b, *addr_i;
-       int               need_exchange_on_fail = 0;
-       ia32_am_type_t    am_support;
-       ia32_am_cand_t cand;
-       ia32_am_cand_t orig_cand;
-       int               dest_possible;
-       int               source_possible;
-       DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;)
-
-       if (!is_ia32_irn(irn) || is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn))
-               return;
-       if (is_ia32_Lea(irn))
+static INLINE void try_kill(ir_node *node)
+{
+       if(get_irn_mode(node) == mode_T) {
+               const ir_edge_t *edge, *next;
+               foreach_out_edge_safe(node, edge, next) {
+                       ir_node *proj = get_edge_src_irn(edge);
+                       try_kill(proj);
+               }
+       }
+
+       if(get_irn_n_edges(node) != 0)
                return;
 
-       am_support = get_ia32_am_support(irn);
-       block = get_nodes_block(irn);
+       if (sched_is_scheduled(node)) {
+               sched_remove(node);
+       }
 
-       DBG((mod, LEVEL_1, "checking for AM\n"));
-
-       /* fold following patterns:                                                         */
-       /* - op -> Load into AMop with am_Source                                            */
-       /*   conditions:                                                                    */
-       /*     - op is am_Source capable AND                                                */
-       /*     - the Load is only used by this op AND                                       */
-       /*     - the Load is in the same block                                              */
-       /* - Store -> op -> Load  into AMop with am_Dest                                    */
-       /*   conditions:                                                                    */
-       /*     - op is am_Dest capable AND                                                  */
-       /*     - the Store uses the same address as the Load AND                            */
-       /*     - the Load is only used by this op AND                                       */
-       /*     - the Load and Store are in the same block AND                               */
-       /*     - nobody else uses the result of the op                                      */
-       if (get_ia32_am_support(irn) == ia32_am_None)
-               return;
+       kill_node(node);
+}
 
-       cand = is_am_candidate(cg, h, block, irn);
-       if (cand == IA32_AM_CAND_NONE)
-               return;
+static void optimize_conv_store(ir_node *node)
+{
+       ir_node *pred;
+       ir_node *pred_proj;
+       ir_mode *conv_mode;
+       ir_mode *store_mode;
 
-       orig_cand = cand;
-       DBG((mod, LEVEL_1, "\tfound address mode candidate %+F ... ", irn));
+       if(!is_ia32_Store(node) && !is_ia32_Store8Bit(node))
+               return;
 
-       left  = get_irn_n(irn, 2);
-       if (get_irn_arity(irn) == 4) {
-               /* it's an "unary" operation */
-               right = left;
-               assert(cand == IA32_AM_CAND_BOTH);
+       assert(n_ia32_Store_val == n_ia32_Store8Bit_val);
+       pred_proj = get_irn_n(node, n_ia32_Store_val);
+       if(is_Proj(pred_proj)) {
+               pred = get_Proj_pred(pred_proj);
        } else {
-               right = get_irn_n(irn, 3);
+               pred = pred_proj;
        }
+       if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
+               return;
+       if(get_ia32_op_type(pred) != ia32_Normal)
+               return;
 
-       dest_possible = am_support & ia32_am_Dest ? 1 : 0;
-       source_possible = am_support & ia32_am_Source ? 1 : 0;
+       /* the store only stores the lower bits, so we only need the conv
+        * it it shrinks the mode */
+       conv_mode  = get_ia32_ls_mode(pred);
+       store_mode = get_ia32_ls_mode(node);
+       if(get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode))
+               return;
 
-       if (dest_possible) {
-               addr_b = NULL;
-               addr_i = NULL;
-               store  = NULL;
+       set_irn_n(node, n_ia32_Store_val, get_irn_n(pred, n_ia32_Conv_I2I_val));
+       if(get_irn_n_edges(pred_proj) == 0) {
+               kill_node(pred_proj);
+               if(pred != pred_proj)
+                       kill_node(pred);
+       }
+}
 
-               /* we should only have 1 user which is a store */
-               if (ia32_get_irn_n_edges(irn) == 1) {
-                       ir_node *succ = get_edge_src_irn(get_irn_out_edge_first(irn));
+static void optimize_load_conv(ir_node *node)
+{
+       ir_node *pred, *predpred;
+       ir_mode *load_mode;
+       ir_mode *conv_mode;
 
-                       if (is_ia32_xStore(succ) || is_ia32_Store(succ)) {
-                               store  = succ;
-                               addr_b = get_irn_n(store, 0);
-                               addr_i = get_irn_n(store, 1);
-                       }
-               }
+       if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
+               return;
 
-               if (store == NULL) {
-                       dest_possible = 0;
-               }
-       }
+       assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
+       pred = get_irn_n(node, n_ia32_Conv_I2I_val);
+       if(!is_Proj(pred))
+               return;
 
-       if (dest_possible) {
-               /* normalize nodes, we need the interesting load on the left side */
-               if (cand & IA32_AM_CAND_RIGHT) {
-                       load = get_Proj_pred(right);
-                       if (load_store_addr_is_equal(load, store, addr_b, addr_i)) {
-                               exchange_left_right(irn, &left, &right, 3, 2);
-                               need_exchange_on_fail ^= 1;
-                               if (cand == IA32_AM_CAND_RIGHT)
-                                       cand = IA32_AM_CAND_LEFT;
-                       }
-               }
-       }
+       predpred = get_Proj_pred(pred);
+       if(!is_ia32_Load(predpred))
+               return;
 
-       if (dest_possible) {
-               if(cand & IA32_AM_CAND_LEFT && is_Proj(left)) {
-                       load = get_Proj_pred(left);
+       /* the load is sign extending the upper bits, so we only need the conv
+        * if it shrinks the mode */
+       load_mode = get_ia32_ls_mode(predpred);
+       conv_mode = get_ia32_ls_mode(node);
+       if(get_mode_size_bits(conv_mode) < get_mode_size_bits(load_mode))
+               return;
 
-#ifndef AGGRESSIVE_AM
-                       /* we have to be the only user of the load */
-                       if (get_irn_n_edges(left) > 1) {
-                               dest_possible = 0;
+       if(get_mode_sign(conv_mode) != get_mode_sign(load_mode)) {
+               /* change the load if it has only 1 user */
+               if(get_irn_n_edges(pred) == 1) {
+                       ir_mode *newmode;
+                       if(get_mode_sign(conv_mode)) {
+                               newmode = find_signed_mode(load_mode);
+                       } else {
+                               newmode = find_unsigned_mode(load_mode);
                        }
-#endif
+                       assert(newmode != NULL);
+                       set_ia32_ls_mode(predpred, newmode);
                } else {
-                       dest_possible = 0;
-               }
-       }
-
-       if (dest_possible) {
-               /* the store has to use the loads memory or the same memory
-                * as the load */
-               ir_node *loadmem = get_irn_n(load, 2);
-               ir_node *storemem = get_irn_n(store, 3);
-               assert(get_irn_mode(loadmem) == mode_M);
-               assert(get_irn_mode(storemem) == mode_M);
-               if(storemem != loadmem || !is_Proj(storemem)
-                               || get_Proj_pred(storemem) != load) {
-                       dest_possible = 0;
+                       /* otherwise we have to keep the conv */
+                       return;
                }
        }
 
-       if (dest_possible) {
-               /* Compare Load and Store address */
-               if (!load_store_addr_is_equal(load, store, addr_b, addr_i))
-                       dest_possible = 0;
-       }
-
-       if (dest_possible) {
-               /* all conditions fullfilled, do the transformation */
-               assert(cand & IA32_AM_CAND_LEFT);
-
-               /* set new base, index and attributes */
-               set_irn_n(irn, 0, addr_b);
-               set_irn_n(irn, 1, addr_i);
-               add_ia32_am_offs_int(irn, get_ia32_am_offs_int(load));
-               set_ia32_am_scale(irn, get_ia32_am_scale(load));
-               set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
-               set_ia32_op_type(irn, ia32_AddrModeD);
-               set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
-               set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
-
-               set_ia32_am_sc(irn, get_ia32_am_sc(load));
-               if (is_ia32_am_sc_sign(load))
-                       set_ia32_am_sc_sign(irn);
-
-               /* connect to Load memory and disconnect Load */
-               if (get_irn_arity(irn) == 5) {
-                       /* binary AMop */
-                       set_irn_n(irn, 4, get_irn_n(load, 2));
-                       set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
-               } else {
-                       /* unary AMop */
-                       set_irn_n(irn, 3, get_irn_n(load, 2));
-                       set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
-               }
-
-               set_irn_mode(irn, mode_M);
+       /* kill the conv */
+       exchange(node, pred);
+}
 
-               /* connect the memory Proj of the Store to the op */
-               mem_proj = ia32_get_proj_for_mode(store, mode_M);
-               edges_reroute(mem_proj, irn, irg);
+static void optimize_conv_conv(ir_node *node)
+{
+       ir_node *pred_proj, *pred, *result_conv;
+       ir_mode *pred_mode, *conv_mode;
+       int      conv_mode_bits;
+       int      pred_mode_bits;
 
-               /* clear remat flag */
-               set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
+       if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
+               return;
 
-               try_remove_from_sched(load);
-               try_remove_from_sched(store);
-               DBG_OPT_AM_D(load, store, irn);
+       assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
+       pred_proj = get_irn_n(node, n_ia32_Conv_I2I_val);
+       if(is_Proj(pred_proj))
+               pred = get_Proj_pred(pred_proj);
+       else
+               pred = pred_proj;
 
-               DB((mod, LEVEL_1, "merged with %+F and %+F into dest AM\n", load, store));
-               need_exchange_on_fail = 0;
-               source_possible = 0;
-       }
+       if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
+               return;
 
-       if (source_possible) {
-               /* normalize ops, we need the load on the right */
-               if(cand == IA32_AM_CAND_LEFT) {
-                       if(node_is_ia32_comm(irn)) {
-                               exchange_left_right(irn, &left, &right, 3, 2);
-                               need_exchange_on_fail ^= 1;
-                               cand = IA32_AM_CAND_RIGHT;
-                       } else {
-                               source_possible = 0;
+       /* we know that after a conv, the upper bits are sign extended
+        * so we only need the 2nd conv if it shrinks the mode */
+       conv_mode      = get_ia32_ls_mode(node);
+       conv_mode_bits = get_mode_size_bits(conv_mode);
+       pred_mode      = get_ia32_ls_mode(pred);
+       pred_mode_bits = get_mode_size_bits(pred_mode);
+
+       if(conv_mode_bits == pred_mode_bits
+                       && get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
+               result_conv = pred_proj;
+       } else if(conv_mode_bits <= pred_mode_bits) {
+               /* if 2nd conv is smaller then first conv, then we can always take the
+                * 2nd conv */
+               if(get_irn_n_edges(pred_proj) == 1) {
+                       result_conv = pred_proj;
+                       set_ia32_ls_mode(pred, conv_mode);
+
+                       /* Argh:We must change the opcode to 8bit AND copy the register constraints */
+                       if (get_mode_size_bits(conv_mode) == 8) {
+                               set_irn_op(pred, op_ia32_Conv_I2I8Bit);
+                               set_ia32_in_req_all(pred, get_ia32_in_req_all(node));
+                       }
+               } else {
+                       /* we don't want to end up with 2 loads, so we better do nothing */
+                       if(get_irn_mode(pred) == mode_T) {
+                               return;
                        }
-               }
-       }
-
-       if (source_possible) {
-               /* all conditions fullfilled, do transform */
-               assert(cand & IA32_AM_CAND_RIGHT);
-               load = get_Proj_pred(right);
 
-               if(get_irn_n_edges(load) > 1) {
-                       source_possible = 0;
-               }
-       }
+                       result_conv = exact_copy(pred);
+                       set_ia32_ls_mode(result_conv, conv_mode);
 
-       if (source_possible) {
-               addr_b = get_irn_n(load, 0);
-               addr_i = get_irn_n(load, 1);
-
-               /* set new base, index and attributes */
-               set_irn_n(irn, 0, addr_b);
-               set_irn_n(irn, 1, addr_i);
-               add_ia32_am_offs_int(irn, get_ia32_am_offs_int(load));
-               set_ia32_am_scale(irn, get_ia32_am_scale(load));
-               set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
-               set_ia32_op_type(irn, ia32_AddrModeS);
-               set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
-               set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
-
-               set_ia32_am_sc(irn, get_ia32_am_sc(load));
-               if (is_ia32_am_sc_sign(load))
-                       set_ia32_am_sc_sign(irn);
-
-               /* clear remat flag */
-               set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
-
-               if (is_ia32_use_frame(load)) {
-                       if(get_ia32_frame_ent(load) == NULL) {
-                               set_ia32_need_stackent(irn);
+                       /* Argh:We must change the opcode to 8bit AND copy the register constraints */
+                       if (get_mode_size_bits(conv_mode) == 8) {
+                               set_irn_op(result_conv, op_ia32_Conv_I2I8Bit);
+                               set_ia32_in_req_all(result_conv, get_ia32_in_req_all(node));
                        }
-                       set_ia32_use_frame(irn);
                }
-
-               /* connect to Load memory and disconnect Load */
-               if (get_irn_arity(irn) == 5) {
-                       /* binary AMop */
-                       set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3));
-                       set_irn_n(irn, 4, get_irn_n(load, 2));
+       } else {
+               /* if both convs have the same sign, then we can take the smaller one */
+               if(get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
+                       result_conv = pred_proj;
                } else {
-                       assert(get_irn_arity(irn) == 4);
-                       /* unary AMop */
-                       set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
-                       set_irn_n(irn, 3, get_irn_n(load, 2));
-               }
-
-               DBG_OPT_AM_S(load, irn);
-
-               /* If Load has a memory Proj, connect it to the op */
-               mem_proj = ia32_get_proj_for_mode(load, mode_M);
-               if (mem_proj != NULL) {
-                       ir_node *res_proj;
-                       ir_mode *mode = get_irn_mode(irn);
-
-                       res_proj = new_rd_Proj(get_irn_dbg_info(irn), irg,
-                                              get_nodes_block(irn), new_Unknown(mode_T),
-                                                                  mode, 0);
-                       set_irn_mode(irn, mode_T);
-                       edges_reroute(irn, res_proj, irg);
-                       set_Proj_pred(res_proj, irn);
-
-                       set_Proj_pred(mem_proj, irn);
-                       set_Proj_proj(mem_proj, 1);
-
-                       if(sched_is_scheduled(irn)) {
-                               sched_add_after(irn, res_proj);
-                               sched_add_after(irn, mem_proj);
+                       /* no optimisation possible if smaller conv is sign-extend */
+                       if(mode_is_signed(pred_mode)) {
+                               return;
                        }
+                       /* we can take the smaller conv if it is unsigned */
+                       result_conv = pred_proj;
                }
+       }
 
-               if(get_irn_n_edges(load) == 0) {
-                       try_remove_from_sched(load);
-               }
-               need_exchange_on_fail = 0;
+       /* kill the conv */
+       exchange(node, result_conv);
 
-               DB((mod, LEVEL_1, "merged with %+F into source AM\n", load));
+       if(get_irn_n_edges(pred_proj) == 0) {
+               kill_node(pred_proj);
+               if(pred != pred_proj)
+                       kill_node(pred);
        }
+       optimize_conv_conv(result_conv);
+}
 
-       /* was exchanged but optimize failed: exchange back */
-       if (need_exchange_on_fail) {
-               exchange_left_right(irn, &left, &right, 3, 2);
-       }
+static void optimize_node(ir_node *node, void *env)
+{
+       (void) env;
+
+       optimize_load_conv(node);
+       optimize_conv_store(node);
+       optimize_conv_conv(node);
 }
 
 /**
- * Performs address mode optimization.
+ * Performs conv and address mode optimization.
  */
-void ia32_optimize_addressmode(ia32_code_gen_t *cg) {
-       /* if we are supposed to do AM or LEA optimization: recalculate edges */
-       if (cg->opt & (IA32_OPT_DOAM | IA32_OPT_LEA)) {
-#if 0
-               edges_deactivate(cg->irg);
-               edges_activate(cg->irg);
-#endif
-       }
-       else {
-               /* no optimizations at all */
-               return;
-       }
-
-       /* beware: we cannot optimize LEA and AM in one run because */
-       /*         LEA optimization adds new nodes to the irg which */
-       /*         invalidates the phase data                       */
-
-       if (cg->opt & IA32_OPT_LEA) {
-               irg_walk_blkwise_graph(cg->irg, NULL, optimize_lea, cg);
-       }
+void ia32_optimize_graph(ia32_code_gen_t *cg)
+{
+       irg_walk_blkwise_graph(cg->irg, NULL, optimize_node, cg);
 
        if (cg->dump)
-               be_dump(cg->irg, "-lea", dump_ir_block_graph_sched);
-
-       if (cg->opt & IA32_OPT_DOAM) {
-               /* we need height information for am optimization */
-               heights_t *h = heights_new(cg->irg);
-               ia32_am_opt_env_t env;
-
-               env.cg = cg;
-               env.h  = h;
-
-               irg_walk_blkwise_graph(cg->irg, NULL, optimize_am, &env);
+               be_dump(cg->irg, "-opt", dump_ir_block_graph_sched);
+}
 
-               heights_free(h);
-       }
+void ia32_init_optimize(void)
+{
+       FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize");
 }