fixed some bugs
[libfirm] / ir / be / ia32 / ia32_optimize.c
index 5f53494..a0a9c5c 100644 (file)
 #include "tv.h"
 #include "irgmod.h"
 
-#include "../benode_t.h"
+#include "../be_t.h"
+#include "../beabi.h"
 
 #include "ia32_new_nodes.h"
 #include "bearch_ia32_t.h"
+#include "gen_ia32_regalloc_if.h"     /* the generated interface (register type and class defenitions) */
 
 #undef is_NoMem
 #define is_NoMem(irn) (get_irn_op(irn) == op_NoMem)
 
+static int be_is_NoReg(be_abi_irg_t *babi, const ir_node *irn) {
+       if (be_abi_get_callee_save_irn(babi, &ia32_gp_regs[REG_XXX]) == irn ||
+               be_abi_get_callee_save_irn(babi, &ia32_fp_regs[REG_XXXX]) == irn)
+       {
+               return 1;
+       }
+
+       return 0;
+}
+
 /**
  * creates a unique ident by adding a number to a tag
  *
@@ -161,7 +173,6 @@ void ia32_place_consts(ir_node *irn, void *env) {
 
        mode = get_irn_mode(irn);
 
-       tenv.arch_env = cg->arch_env;
        tenv.block    = get_nodes_block(irn);
        tenv.cg       = cg;
        tenv.irg      = cg->irg;
@@ -182,6 +193,11 @@ void ia32_place_consts(ir_node *irn, void *env) {
                        tenv.block = get_Block_cfgpred_block(get_nodes_block(irn), i);
                }
 
+               /* put the const into the block where the original const was */
+               if (! cg->opt.placecnst) {
+                       tenv.block = get_nodes_block(pred);
+               }
+
                switch (opc) {
                        case iro_Const:
                                cnst = gen_Const(&tenv);
@@ -212,26 +228,18 @@ void ia32_place_consts(ir_node *irn, void *env) {
  ******************************************************************/
 
 static int node_is_comm(const ir_node *irn) {
-       if (is_ia32_Add(irn)  ||
-               is_ia32_fAdd(irn) ||
-               is_ia32_Mul(irn)  ||
-               is_ia32_Mulh(irn) ||
-               is_ia32_fMul(irn) ||
-               is_ia32_And(irn)  ||
-               is_ia32_fAnd(irn) ||
-               is_ia32_Or(irn)   ||
-               is_ia32_fOr(irn)  ||
-               is_ia32_Eor(irn)  ||
-               is_ia32_fEor(irn) ||
-               is_ia32_Min(irn)  ||
-               is_ia32_fMin(irn) ||
-               is_ia32_Max(irn)  ||
-               is_ia32_fMax(irn))
-       {
-               return 1;
+       return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0;
+}
+
+static int ia32_get_irn_n_edges(const ir_node *irn) {
+       const ir_edge_t *edge;
+       int cnt = 0;
+
+       foreach_out_edge(irn, edge) {
+               cnt++;
        }
 
-       return 0;
+       return cnt;
 }
 
 /**
@@ -256,30 +264,134 @@ static ir_node *get_mem_proj(const ir_node *irn) {
 }
 
 /**
- * Determines if irn is a Proj and if is_op_func returns true for it's predecessor.
+ * Returns the Proj with number 0 connected to irn.
  */
-static int pred_is_specific_node(const ir_node *irn, int (*is_op_func)(const ir_node *n)) {
-       if (is_Proj(irn) && is_op_func(get_Proj_pred(irn))) {
+static ir_node *get_res_proj(const ir_node *irn) {
+       const ir_edge_t *edge;
+       ir_node         *src;
+
+       assert(get_irn_mode(irn) == mode_T && "expected mode_T node");
+
+       foreach_out_edge(irn, edge) {
+               src = get_edge_src_irn(edge);
+
+               assert(is_Proj(src) && "Proj expected");
+
+               if (get_Proj_proj(src) == 0)
+                       return src;
+       }
+
+       return NULL;
+}
+
+/**
+ * Determines if pred is a Proj and if is_op_func returns true for it's predecessor.
+ *
+ * @param pred       The node to be checked
+ * @param is_op_func The check-function
+ * @return 1 if conditions are fulfilled, 0 otherwise
+ */
+static int pred_is_specific_node(const ir_node *pred, int (*is_op_func)(const ir_node *n)) {
+       if (is_Proj(pred) && is_op_func(get_Proj_pred(pred))) {
                return 1;
        }
 
        return 0;
 }
 
+/**
+ * Determines if pred is a Proj and if is_op_func returns true for it's predecessor
+ * and if the predecessor is in block bl.
+ *
+ * @param bl         The block
+ * @param pred       The node to be checked
+ * @param is_op_func The check-function
+ * @return 1 if conditions are fulfilled, 0 otherwise
+ */
+static int pred_is_specific_nodeblock(const ir_node *bl, const ir_node *pred,
+       int (*is_op_func)(const ir_node *n))
+{
+       if (is_Proj(pred)) {
+               pred = get_Proj_pred(pred);
+               if ((bl == get_nodes_block(pred)) && is_op_func(pred)) {
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * Checks if irn is a candidate for address calculation or address mode.
+ *
+ * address calculation (AC):
+ * - none of the operand must be a Load  within the same block OR
+ * - all Loads must have more than one user                    OR
+ * - the irn has a frame entity (it's a former FrameAddr)
+ *
+ * address mode (AM):
+ * - at least one operand has to be a Load within the same block AND
+ * - the load must not have other users than the irn             AND
+ * - the irn must not have a frame entity set
+ *
+ * @param block       The block the Loads must/not be in
+ * @param irn         The irn to check
+ * @param check_addr  1 if to check for address calculation, 0 otherwise
+ * return 1 if irn is a candidate for AC or AM, 0 otherwise
+ */
+static int is_candidate(const ir_node *block, const ir_node *irn, int check_addr) {
+       ir_node *load_proj;
+       int      n, is_cand = check_addr;
+
+       if (pred_is_specific_nodeblock(block, get_irn_n(irn, 2), is_ia32_Load)) {
+               load_proj = get_irn_n(irn, 2);
+               n         = ia32_get_irn_n_edges(load_proj);
+               is_cand   = check_addr ? (n == 1 ? 0 : is_cand) : (n == 1 ? 1 : is_cand);
+       }
+
+       if (pred_is_specific_nodeblock(block, get_irn_n(irn, 3), is_ia32_Load)) {
+               load_proj = get_irn_n(irn, 3);
+               n         = ia32_get_irn_n_edges(load_proj);
+               is_cand   = check_addr ? (n == 1 ? 0 : is_cand) : (n == 1 ? 1 : is_cand);
+       }
+
+       is_cand = get_ia32_frame_ent(irn) ? (check_addr ? 1 : 0) : (check_addr ? 0 : 1);
+
+       return is_cand;
+}
+
+/**
+ * Compares the base and index addr and the load/store entities
+ * and returns 1 if they are equal.
+ */
+static int load_store_addr_is_equal(const ir_node *load, const ir_node *store,
+                                                                       const ir_node *addr_b, const ir_node *addr_i)
+{
+       int     is_equal = (addr_b == get_irn_n(load, 0)) && (addr_i == get_irn_n(load, 1));
+       entity *lent     = get_ia32_frame_ent(load);
+       entity *sent     = get_ia32_frame_ent(store);
+
+       /* are both entities set and equal? */
+       is_equal = (lent && sent && (lent == sent)) ? 1 : is_equal;
+
+       return is_equal;
+}
+
 /**
  * Folds Add or Sub to LEA if possible
  */
-static ir_node *fold_addr(ir_node *irn, firm_dbg_module_t *mod, ir_node *noreg) {
-       ir_graph *irg      = get_irn_irg(irn);
-       ir_mode  *mode     = get_irn_mode(irn);
-       dbg_info *dbg      = get_irn_dbg_info(irn);
-       ir_node  *block    = get_nodes_block(irn);
-       ir_node  *res      = irn;
-       char     *offs     = NULL;
-       char     *new_offs = NULL;
-       int       scale    = 0;
-       int       isadd    = 0;
-       int       dolea    = 0;
+static ir_node *fold_addr(be_abi_irg_t *babi, ir_node *irn, firm_dbg_module_t *mod, ir_node *noreg) {
+       ir_graph *irg       = get_irn_irg(irn);
+       ir_mode  *mode      = get_irn_mode(irn);
+       dbg_info *dbg       = get_irn_dbg_info(irn);
+       ir_node  *block     = get_nodes_block(irn);
+       ir_node  *res       = irn;
+       char     *offs      = NULL;
+       char     *offs_cnst = NULL;
+       char     *offs_lea  = NULL;
+       int       scale     = 0;
+       int       isadd     = 0;
+       int       dolea     = 0;
        ir_node  *left, *right, *temp;
        ir_node  *base, *index;
        ia32_am_flavour_t am_flav;
@@ -290,10 +402,18 @@ static ir_node *fold_addr(ir_node *irn, firm_dbg_module_t *mod, ir_node *noreg)
        left  = get_irn_n(irn, 2);
        right = get_irn_n(irn, 3);
 
-       /* "normalize" arguments in case of add */
-       if  (isadd) {
+       base    = left;
+       index   = noreg;
+       offs    = NULL;
+       scale   = 0;
+       am_flav = 0;
+
+       /* "normalize" arguments in case of add with two operands */
+       if  (isadd && ! be_is_NoReg(babi, right)) {
                /* put LEA == ia32_am_O as right operand */
                if (is_ia32_Lea(left) && get_ia32_am_flavour(left) == ia32_am_O) {
+                       set_irn_n(irn, 2, right);
+                       set_irn_n(irn, 3, left);
                        temp  = left;
                        left  = right;
                        right = temp;
@@ -301,59 +421,63 @@ static ir_node *fold_addr(ir_node *irn, firm_dbg_module_t *mod, ir_node *noreg)
 
                /* put LEA != ia32_am_O as left operand */
                if (is_ia32_Lea(right) && get_ia32_am_flavour(right) != ia32_am_O) {
+                       set_irn_n(irn, 2, right);
+                       set_irn_n(irn, 3, left);
                        temp  = left;
                        left  = right;
                        right = temp;
                }
 
-               /* put SHL as right operand */
-               if (pred_is_specific_node(left, is_ia32_Shl)) {
+               /* put SHL as left operand iff left is NOT a LEA */
+               if (! is_ia32_Lea(left) && pred_is_specific_node(right, is_ia32_Shl)) {
+                       set_irn_n(irn, 2, right);
+                       set_irn_n(irn, 3, left);
                        temp  = left;
                        left  = right;
                        right = temp;
                }
        }
 
-       /* Left operand could already be a LEA */
-       if (is_ia32_Lea(left)) {
-               DBG((mod, LEVEL_1, "\tgot LEA as left operand\n"));
+       /* check if operand is either const */
+       if (get_ia32_cnst(irn)) {
+               DBG((mod, LEVEL_1, "\tfound op with imm"));
+
+               offs_cnst = get_ia32_cnst(irn);
+               dolea     = 1;
+       }
 
-               base  = get_irn_n(left, 0);
-               index = get_irn_n(left, 1);
-               offs  = get_ia32_am_offs(left);
-               scale = get_ia32_am_scale(left);
+       /* determine the operand which needs to be checked */
+       if (be_is_NoReg(babi, right)) {
+               temp = left;
        }
        else {
-               base  = left;
-               index = noreg;
-               offs  = NULL;
-               scale = 0;
-
+               temp = right;
        }
 
-       /* check if operand is either const or right operand is AMConst (LEA with ia32_am_O) */
-       if (get_ia32_cnst(irn)) {
-               DBG((mod, LEVEL_1, "\tfound op with imm"));
-
-               new_offs = get_ia32_cnst(irn);
-               dolea    = 1;
-       }
-       else if (is_ia32_Lea(right) && get_ia32_am_flavour(right) == ia32_am_O) {
+       /* check if right operand is AMConst (LEA with ia32_am_O) */
+       if (is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
                DBG((mod, LEVEL_1, "\tgot op with LEA am_O"));
 
-               new_offs = get_ia32_am_offs(right);
+               offs_lea = get_ia32_am_offs(temp);
                dolea    = 1;
        }
-       /* we can only get an additional index if there isn't already one */
-       else if (isadd && be_is_NoReg(index)) {
+
+       if (isadd) {
                /* default for add -> make right operand to index */
                index = right;
                dolea = 1;
 
                DBG((mod, LEVEL_1, "\tgot LEA candidate with index %+F\n", index));
+
+               /* determine the operand which needs to be checked */
+               temp = left;
+               if (is_ia32_Lea(left)) {
+                       temp = right;
+               }
+
                /* check for SHL 1,2,3 */
-               if (pred_is_specific_node(right, is_ia32_Shl)) {
-                       temp = get_Proj_pred(right);
+               if (pred_is_specific_node(temp, is_ia32_Shl)) {
+                       temp = get_Proj_pred(temp);
 
                        if (get_ia32_Immop_tarval(temp)) {
                                scale = get_tarval_long(get_ia32_Immop_tarval(temp));
@@ -365,11 +489,46 @@ static ir_node *fold_addr(ir_node *irn, firm_dbg_module_t *mod, ir_node *noreg)
                                }
                        }
                }
+
+               /* fix base */
+               if (! be_is_NoReg(babi, index)) {
+                       /* if we have index, but left == right -> no base */
+                       if (left == right) {
+                               base = noreg;
+                       }
+                       else if (! is_ia32_Lea(left) && (index != right)) {
+                               /* index != right -> we found a good Shl           */
+                               /* left  != LEA   -> this Shl was the left operand */
+                               /* -> base is right operand                        */
+                               base = right;
+                       }
+               }
+       }
+
+       /* Try to assimilate a LEA as left operand */
+       if (is_ia32_Lea(left) && (get_ia32_am_flavour(left) != ia32_am_O)) {
+               am_flav = get_ia32_am_flavour(left);
+
+               /* If we have an Add with a real right operand (not NoReg) and  */
+               /* the LEA contains already an index calculation then we create */
+               /* a new LEA.                                                   */
+               /* If the LEA contains already a frame_entity then we also      */
+               /* create a new one  otherwise we would loose it.               */
+               if (isadd && ((!be_is_NoReg(babi, index) && (am_flav & ia32_am_I)) || get_ia32_frame_ent(left))) {
+                       DBG((mod, LEVEL_1, "\tleave old LEA, creating new one\n"));
+               }
+               else {
+                       DBG((mod, LEVEL_1, "\tgot LEA as left operand ... assimilating\n"));
+                       offs  = get_ia32_am_offs(left);
+                       base  = get_irn_n(left, 0);
+                       index = get_irn_n(left, 1);
+                       scale = get_ia32_am_scale(left);
+               }
        }
 
        /* ok, we can create a new LEA */
        if (dolea) {
-               res = new_rd_ia32_Lea(dbg, irg, block, base, index, mode);
+               res = new_rd_ia32_Lea(dbg, irg, block, base, index, mode_Is);
 
                /* add the old offset of a previous LEA */
                if (offs) {
@@ -378,26 +537,46 @@ static ir_node *fold_addr(ir_node *irn, firm_dbg_module_t *mod, ir_node *noreg)
 
                /* add the new offset */
                if (isadd) {
-                       if (new_offs) {
-                               add_ia32_am_offs(res, new_offs);
+                       if (offs_cnst) {
+                               add_ia32_am_offs(res, offs_cnst);
+                       }
+                       if (offs_lea) {
+                               add_ia32_am_offs(res, offs_lea);
                        }
                }
                else {
-                       sub_ia32_am_offs(res, new_offs);
+                       /* either lea_O-cnst, -cnst or -lea_O  */
+                       if (offs_cnst) {
+                               if (offs_lea) {
+                                       add_ia32_am_offs(res, offs_lea);
+                               }
+
+                               sub_ia32_am_offs(res, offs_cnst);
+                       }
+                       else {
+                               sub_ia32_am_offs(res, offs_lea);
+                       }
                }
 
+               /* copy the frame entity (could be set in case of Add */
+               /* which was a FrameAddr) */
+               set_ia32_frame_ent(res, get_ia32_frame_ent(irn));
+
+               if (is_ia32_use_frame(irn))
+                       set_ia32_use_frame(res);
+
                /* set scale */
                set_ia32_am_scale(res, scale);
 
                am_flav = ia32_am_N;
                /* determine new am flavour */
-               if (offs || new_offs) {
+               if (offs || offs_cnst || offs_lea) {
                        am_flav |= ia32_O;
                }
-               if (! be_is_NoReg(base)) {
+               if (! be_is_NoReg(babi, base)) {
                        am_flav |= ia32_B;
                }
-               if (! be_is_NoReg(index)) {
+               if (! be_is_NoReg(babi, index)) {
                        am_flav |= ia32_I;
                }
                if (scale > 0) {
@@ -409,6 +588,11 @@ static ir_node *fold_addr(ir_node *irn, firm_dbg_module_t *mod, ir_node *noreg)
 
                DBG((mod, LEVEL_1, "\tLEA [%+F + %+F * %d + %s]\n", base, index, scale, get_ia32_am_offs(res)));
 
+               /* get the result Proj of the Add/Sub */
+               irn = get_res_proj(irn);
+
+               assert(irn && "Couldn't find result proj");
+
                /* exchange the old op with the new LEA */
                exchange(irn, res);
        }
@@ -420,15 +604,16 @@ static ir_node *fold_addr(ir_node *irn, firm_dbg_module_t *mod, ir_node *noreg)
  * Optimizes a pattern around irn to address mode if possible.
  */
 void ia32_optimize_am(ir_node *irn, void *env) {
-       ia32_code_gen_t   *cg  = env;
-       ir_graph          *irg = cg->irg;
-       firm_dbg_module_t *mod = cg->mod;
-       ir_node           *res = irn;
+       ia32_code_gen_t   *cg   = env;
+       ir_graph          *irg  = cg->irg;
+       firm_dbg_module_t *mod  = cg->mod;
+       ir_node           *res  = irn;
+       be_abi_irg_t      *babi = cg->birg->abi;
        dbg_info          *dbg;
        ir_mode           *mode;
        ir_node           *block, *noreg_gp, *noreg_fp;
        ir_node           *left, *right, *temp;
-       ir_node           *store, *mem_proj;
+       ir_node           *store, *load, *mem_proj;
        ir_node           *succ, *addr_b, *addr_i;
        int                check_am_src = 0;
 
@@ -445,48 +630,45 @@ void ia32_optimize_am(ir_node *irn, void *env) {
 
        /* 1st part: check for address calculations and transform the into Lea */
 
-       /* Following cases can occur: */
-       /* - Sub (l, imm) -> LEA [base - offset] */
+       /* Following cases can occur:                                  */
+       /* - Sub (l, imm) -> LEA [base - offset]                       */
        /* - Sub (l, r == LEA with ia32_am_O)   -> LEA [base - offset] */
-       /* - Add (l, imm) -> LEA [base + offset] */
-       /* - Add (l, r == LEA with ia32_am_O)  -> LEA [base + offset] */
-       /* - Add (l == LEA with ia32_am_O, r)  -> LEA [base + offset] */
-       /* - Add (l, r) -> LEA [base + index * scale] */
-       /*              with scale > 1 iff l/r == shl (1,2,3) */
+       /* - Add (l, imm) -> LEA [base + offset]                       */
+       /* - Add (l, r == LEA with ia32_am_O)  -> LEA [base + offset]  */
+       /* - Add (l == LEA with ia32_am_O, r)  -> LEA [base + offset]  */
+       /* - Add (l, r) -> LEA [base + index * scale]                  */
+       /*              with scale > 1 iff l/r == shl (1,2,3)          */
 
        if (is_ia32_Sub(irn) || is_ia32_Add(irn)) {
                left  = get_irn_n(irn, 2);
                right = get_irn_n(irn, 3);
 
            /* Do not try to create a LEA if one of the operands is a Load. */
-               if (! pred_is_specific_node(left,  is_ia32_Load)  &&
-                       ! pred_is_specific_node(right, is_ia32_Load))
-               {
-                       res = fold_addr(irn, mod, noreg_gp);
+               /* check is irn is a candidate for address calculation */
+               if (is_candidate(block, irn, 1)) {
+                       res = fold_addr(babi, irn, mod, noreg_gp);
                }
        }
 
-       /* 2nd part: fold following patterns:
+       /* 2nd part: fold following patterns:                                               */
        /* - Load  -> LEA into Load  } TODO: If the LEA is used by more than one Load/Store */
        /* - Store -> LEA into Store }       it might be better to keep the LEA             */
-       /* - op -> Load into AMop with am_Source
-       /*   conditions:                */
-       /*     - op is am_Source capable AND   */
-       /*     - the Load is only used by this op AND */
-       /*     - the Load is in the same block */
-       /* - Store -> op -> Load  into AMop with am_Dest  */
-       /*   conditions:                */
-       /*     - op is am_Dest capable AND   */
-       /*     - the Store uses the same address as the Load AND */
-       /*     - the Load is only used by this op AND */
-       /*     - the Load and Store are in the same block AND  */
-       /*     - nobody else uses the result of the op */
+       /* - op -> Load into AMop with am_Source                                            */
+       /*   conditions:                                                                    */
+       /*     - op is am_Source capable AND                                                */
+       /*     - the Load is only used by this op AND                                       */
+       /*     - the Load is in the same block                                              */
+       /* - Store -> op -> Load  into AMop with am_Dest                                    */
+       /*   conditions:                                                                    */
+       /*     - op is am_Dest capable AND                                                  */
+       /*     - the Store uses the same address as the Load AND                            */
+       /*     - the Load is only used by this op AND                                       */
+       /*     - the Load and Store are in the same block AND                               */
+       /*     - nobody else uses the result of the op                                      */
 
        if ((res == irn) && (get_ia32_am_support(irn) != ia32_am_None) && !is_ia32_Lea(irn)) {
                /* 1st: check for Load/Store -> LEA   */
-               if (is_ia32_Load(irn)  || is_ia32_fLoad(irn) ||
-                       is_ia32_Store(irn) || is_ia32_fStore(irn))
-               {
+               if (is_ia32_Ld(irn) || is_ia32_St(irn)) {
                        left = get_irn_n(irn, 0);
 
                        if (is_ia32_Lea(left)) {
@@ -494,28 +676,31 @@ void ia32_optimize_am(ir_node *irn, void *env) {
                                add_ia32_am_offs(irn, get_ia32_am_offs(left));
                                set_ia32_am_scale(irn, get_ia32_am_scale(left));
                                set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
-                               set_ia32_op_type(irn, get_ia32_op_type(left));
+
+                               set_ia32_op_type(irn, is_ia32_St(irn) ? ia32_AddrModeD : ia32_AddrModeS);
 
                                /* set base and index */
                                set_irn_n(irn, 0, get_irn_n(left, 0));
                                set_irn_n(irn, 1, get_irn_n(left, 1));
                        }
                }
-               /* check if at least one operand is a Load */
-               else if (pred_is_specific_node(get_irn_n(irn, 2), is_ia32_Load)  ||
-                                pred_is_specific_node(get_irn_n(irn, 2), is_ia32_fLoad) ||
-                                pred_is_specific_node(get_irn_n(irn, 3), is_ia32_Load)  ||
-                                pred_is_specific_node(get_irn_n(irn, 3), is_ia32_fLoad))
-               {
+               /* check if the node is an address mode candidate */
+               else if (is_candidate(block, irn, 0)) {
+                       left  = get_irn_n(irn, 2);
+                       if (get_irn_arity(irn) == 4) {
+                               /* it's an "unary" operation */
+                               right = left;
+                       }
+                       else {
+                               right = get_irn_n(irn, 3);
+                       }
 
                        /* normalize commutative ops */
                        if (node_is_comm(irn)) {
-                               left  = get_irn_n(irn, 2);
-                               right = get_irn_n(irn, 3);
-
-                               /* assure that Left operand is always a Load if there is one */
-                               if (pred_is_specific_node(right, is_ia32_Load) ||
-                                       pred_is_specific_node(right, is_ia32_fLoad))
+                               /* Assure that right operand is always a Load if there is one    */
+                               /* because non-commutative ops can only use Dest AM if the right */
+                               /* operand is a load, so we only need to check right operand.    */
+                               if (pred_is_specific_nodeblock(block, left, is_ia32_Ld))
                                {
                                        set_irn_n(irn, 2, right);
                                        set_irn_n(irn, 3, left);
@@ -529,28 +714,25 @@ void ia32_optimize_am(ir_node *irn, void *env) {
                        /* check for Store -> op -> Load */
 
                        /* Store -> op -> Load optimization is only possible if supported by op */
-                       if (get_ia32_am_support(irn) & ia32_am_Dest) {
+                       /* and if right operand is a Load                                       */
+                       if ((get_ia32_am_support(irn) & ia32_am_Dest) &&
+                                pred_is_specific_nodeblock(block, right, is_ia32_Ld))
+                       {
 
                                /* An address mode capable op always has a result Proj.                  */
                                /* If this Proj is used by more than one other node, we don't need to    */
                                /* check further, otherwise we check for Store and remember the address, */
                                /* the Store points to. */
 
-                               succ = get_edge_src_irn(get_irn_out_edge_first(irn));
-                               assert(is_Proj(succ) && "successor of AM node is not Proj");
-
-                               if (get_Proj_proj(succ) != 0) {
-                                       succ = get_edge_src_irn(get_irn_out_edge_next(irn, get_irn_out_edge_first(irn)));
-                                       assert(is_Proj(succ) && "successor of AM node is not Proj");
-                                       assert(get_Proj_proj(succ) == 0 && "Couldn't find result proj");
-                               }
+                               succ = get_res_proj(irn);
+                               assert(succ && "Couldn't find result proj");
 
                                addr_b = NULL;
                                addr_i = NULL;
                                store  = NULL;
 
                                /* now check for users and Store */
-                               if (get_irn_n_edges(succ) == 1) {
+                               if (ia32_get_irn_n_edges(succ) == 1) {
                                        succ = get_edge_src_irn(get_irn_out_edge_first(succ));
 
                                        if (is_ia32_fStore(succ) || is_ia32_Store(succ)) {
@@ -572,19 +754,14 @@ void ia32_optimize_am(ir_node *irn, void *env) {
 
                                if (store) {
                                        /* we found a Store as single user: Now check for Load */
-                                       left  = get_irn_n(irn, 2);
-                                       right = get_irn_n(irn, 3);
 
-                                       /* Could be that the right operand is also a Load, so we make */
-                                       /* sure that the "interesting" Load is always the left one    */
-
-                                       /* right != NoMem means, we have a "binary" operation */
-                                       if (! is_NoMem(right) &&
-                                               (pred_is_specific_node(right, is_ia32_Load) ||
-                                                pred_is_specific_node(right, is_ia32_fLoad)))
+                                       /* Extra check for commutative ops with two Loads */
+                                       /* -> put the interesting Load right              */
+                                       if (node_is_comm(irn) &&
+                                               pred_is_specific_nodeblock(block, left, is_ia32_Ld))
                                        {
-                                               if ((addr_b == get_irn_n(get_Proj_pred(right), 0)) &&
-                                                       (addr_i == get_irn_n(get_Proj_pred(right), 1)))
+                                               if ((addr_b == get_irn_n(get_Proj_pred(left), 0)) &&
+                                                       (addr_i == get_irn_n(get_Proj_pred(left), 1)))
                                                {
                                                        /* We exchange left and right, so it's easier to kill     */
                                                        /* the correct Load later and to handle unary operations. */
@@ -598,35 +775,38 @@ void ia32_optimize_am(ir_node *irn, void *env) {
                                        }
 
                                        /* skip the Proj for easier access */
-                                       left  = get_Proj_pred(left);
+                                       load = get_Proj_pred(right);
 
                                        /* Compare Load and Store address */
-                                       if ((addr_b == get_irn_n(left, 0)) && (addr_i == get_irn_n(left, 1)))
-                                       {
-                                               /* Left Load is from same address, so we can */
+                                       if (load_store_addr_is_equal(load, store, addr_b, addr_i)) {
+                                               /* Right Load is from same address, so we can */
                                                /* disconnect the Load and Store here        */
 
                                                /* set new base, index and attributes */
                                                set_irn_n(irn, 0, addr_b);
                                                set_irn_n(irn, 1, addr_i);
-                                               add_ia32_am_offs(irn, get_ia32_am_offs(left));
-                                               set_ia32_am_scale(irn, get_ia32_am_scale(left));
-                                               set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
+                                               add_ia32_am_offs(irn, get_ia32_am_offs(load));
+                                               set_ia32_am_scale(irn, get_ia32_am_scale(load));
+                                               set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
                                                set_ia32_op_type(irn, ia32_AddrModeD);
+                                               set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
+                                               set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
 
-                                               /* connect to Load memory */
+                                               if (is_ia32_use_frame(load))
+                                                       set_ia32_use_frame(irn);
+
+                                               /* connect to Load memory and disconnect Load */
                                                if (get_irn_arity(irn) == 5) {
                                                        /* binary AMop */
-                                                       set_irn_n(irn, 4, get_irn_n(left, 2));
+                                                       set_irn_n(irn, 4, get_irn_n(load, 2));
+                                                       set_irn_n(irn, 3, noreg_gp);
                                                }
                                                else {
                                                        /* unary AMop */
-                                                       set_irn_n(irn, 3, get_irn_n(left, 2));
+                                                       set_irn_n(irn, 3, get_irn_n(load, 2));
+                                                       set_irn_n(irn, 2, noreg_gp);
                                                }
 
-                                               /* disconnect from Load */
-                                               set_irn_n(irn, 2, noreg_gp);
-
                                                /* connect the memory Proj of the Store to the op */
                                                mem_proj = get_mem_proj(store);
                                                set_Proj_pred(mem_proj, irn);
@@ -638,48 +818,69 @@ void ia32_optimize_am(ir_node *irn, void *env) {
                                        check_am_src = 1;
                                }
                        } /* if (support AM Dest) */
-                       else {
+                       else if (get_ia32_am_support(irn) & ia32_am_Source) {
                                /* op doesn't support am AM Dest -> check for AM Source */
                                check_am_src = 1;
                        }
 
-                       /* optimize op -> Load iff Load is only used by this op */
-                       if (check_am_src) {
-                               left = get_irn_n(irn, 2);
+                       /* normalize commutative ops */
+                       if (node_is_comm(irn)) {
+                               /* Assure that left operand is always a Load if there is one */
+                               /* because non-commutative ops can only use Source AM if the */
+                               /* left operand is a Load, so we only need to check the left */
+                               /* operand afterwards.                                       */
+                               if (pred_is_specific_nodeblock(block, right, is_ia32_Ld))       {
+                                       set_irn_n(irn, 2, right);
+                                       set_irn_n(irn, 3, left);
 
-                               if (get_irn_n_edges(left) == 1) {
-                                       left = get_Proj_pred(left);
+                                       temp  = left;
+                                       left  = right;
+                                       right = temp;
+                               }
+                       }
 
-                                       addr_b = get_irn_n(left, 0);
-                                       addr_i = get_irn_n(left, 1);
+                       /* optimize op -> Load iff Load is only used by this op   */
+                       /* and left operand is a Load which only used by this irn */
+                       if (check_am_src                                        &&
+                               pred_is_specific_nodeblock(block, left, is_ia32_Ld) &&
+                               (ia32_get_irn_n_edges(left) == 1))
+                       {
+                               left = get_Proj_pred(left);
 
-                                       /* set new base, index and attributes */
-                                       set_irn_n(irn, 0, addr_b);
-                                       set_irn_n(irn, 1, addr_i);
-                                       add_ia32_am_offs(irn, get_ia32_am_offs(left));
-                                       set_ia32_am_scale(irn, get_ia32_am_scale(left));
-                                       set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
-                                       set_ia32_op_type(irn, ia32_AddrModeS);
+                               addr_b = get_irn_n(left, 0);
+                               addr_i = get_irn_n(left, 1);
 
-                                       /* connect to Load memory */
-                                       if (get_irn_arity(irn) == 5) {
-                                               /* binary AMop */
-                                               set_irn_n(irn, 4, get_irn_n(left, 2));
-                                       }
-                                       else {
-                                               /* unary AMop */
-                                               set_irn_n(irn, 3, get_irn_n(left, 2));
-                                       }
+                               /* set new base, index and attributes */
+                               set_irn_n(irn, 0, addr_b);
+                               set_irn_n(irn, 1, addr_i);
+                               add_ia32_am_offs(irn, get_ia32_am_offs(left));
+                               set_ia32_am_scale(irn, get_ia32_am_scale(left));
+                               set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
+                               set_ia32_op_type(irn, ia32_AddrModeS);
+                               set_ia32_frame_ent(irn, get_ia32_frame_ent(left));
+                               set_ia32_ls_mode(irn, get_ia32_ls_mode(left));
 
-                                       /* disconnect from Load */
-                                       set_irn_n(irn, 2, noreg_gp);
+                               if (is_ia32_use_frame(left))
+                                       set_ia32_use_frame(irn);
 
-                                       /* If Load has a memory Proj, connect it to the op */
-                                       mem_proj = get_mem_proj(left);
-                                       if (mem_proj) {
-                                               set_Proj_pred(mem_proj, irn);
-                                               set_Proj_proj(mem_proj, 1);
-                                       }
+                               /* connect to Load memory */
+                               if (get_irn_arity(irn) == 5) {
+                                       /* binary AMop */
+                                       set_irn_n(irn, 4, get_irn_n(left, 2));
+                               }
+                               else {
+                                       /* unary AMop */
+                                       set_irn_n(irn, 3, get_irn_n(left, 2));
+                               }
+
+                               /* disconnect from Load */
+                               set_irn_n(irn, 2, noreg_gp);
+
+                               /* If Load has a memory Proj, connect it to the op */
+                               mem_proj = get_mem_proj(left);
+                               if (mem_proj) {
+                                       set_Proj_pred(mem_proj, irn);
+                                       set_Proj_proj(mem_proj, 1);
                                }
                        }
                }