X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fia32_optimize.c;h=a72419aaecc4e91a1addcc44d8dc3f99551f5bed;hb=86997185c0e4d9a49b8f036a8a6fbb5e470ff2b2;hp=bb89c6f6924834636bb5840210176706f8bfe787;hpb=ca1b1615fbe23a4d5cf72e7be5d2ee831615cbc2;p=libfirm diff --git a/ir/be/ia32/ia32_optimize.c b/ir/be/ia32/ia32_optimize.c index bb89c6f69..a72419aae 100644 --- a/ir/be/ia32/ia32_optimize.c +++ b/ir/be/ia32/ia32_optimize.c @@ -1,3 +1,13 @@ +/** + * Project: libFIRM + * File name: ir/be/ia32/ia32_optimize.c + * Purpose: Implements several optimizations for IA32 + * Author: Christian Wuerdig + * CVS-ID: $Id$ + * Copyright: (c) 2006 Universität Karlsruhe + * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE. + */ + #ifdef HAVE_CONFIG_H #include "config.h" #endif @@ -9,6 +19,8 @@ #include "iredges.h" #include "tv.h" #include "irgmod.h" +#include "irgwalk.h" +#include "height.h" #include "../be_t.h" #include "../beabi.h" @@ -19,6 +31,14 @@ #include "bearch_ia32_t.h" #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */ #include "ia32_transform.h" +#include "ia32_dbg_stat.h" + +typedef enum { + IA32_AM_CAND_NONE = 0, + IA32_AM_CAND_LEFT = 1, + IA32_AM_CAND_RIGHT = 2, + IA32_AM_CAND_BOTH = 3 +} ia32_am_cand_t; #undef is_NoMem #define is_NoMem(irn) (get_irn_op(irn) == op_NoMem) @@ -83,15 +103,17 @@ static ir_node *gen_SymConst(ia32_transform_env_t *env) { ir_node *block = env->block; if (mode_is_float(mode)) { + FP_USED(env->cg); if (USE_SSE2(env->cg)) - cnst = new_rd_ia32_fConst(dbg, irg, block, mode); + cnst = new_rd_ia32_xConst(dbg, irg, block, get_irg_no_mem(irg), mode); else - cnst = new_rd_ia32_vfConst(dbg, irg, block, mode); - } - else { - cnst = new_rd_ia32_Const(dbg, irg, block, mode); + cnst = new_rd_ia32_vfConst(dbg, irg, block, get_irg_no_mem(irg), mode); } + else + cnst = new_rd_ia32_Const(dbg, irg, block, get_irg_no_mem(irg), mode); + set_ia32_Const_attr(cnst, env->irn); + return cnst; } @@ -120,7 +142,7 @@ static ir_type *get_prim_type(pmap *types, ir_mode *mode) static entity *get_entity_for_tv(ia32_code_gen_t *cg, ir_node *cnst) { tarval *tv = get_Const_tarval(cnst); - pmap_entry *e = pmap_find(cg->tv_ent, tv); + pmap_entry *e = pmap_find(cg->isa->tv_ent, tv); entity *res; ir_graph *rem; @@ -128,9 +150,9 @@ static entity *get_entity_for_tv(ia32_code_gen_t *cg, ir_node *cnst) ir_mode *mode = get_irn_mode(cnst); ir_type *tp = get_Const_type(cnst); if (tp == firm_unknown_type) - tp = get_prim_type(cg->types, mode); + tp = get_prim_type(cg->isa->types, mode); - res = new_entity(get_glob_type(), unique_id("ia32FloatCnst_%u"), tp); + res = new_entity(get_glob_type(), unique_id(".LC%u"), tp); set_entity_ld_ident(res, get_entity_ident(res)); set_entity_visibility(res, visibility_local); @@ -143,6 +165,8 @@ static entity *get_entity_for_tv(ia32_code_gen_t *cg, ir_node *cnst) current_ir_graph = get_const_code_irg(); set_atomic_ent_value(res, new_Const_type(tv, tp)); current_ir_graph = rem; + + pmap_insert(cg->isa->tv_ent, tv, res); } else res = e->value; @@ -168,6 +192,7 @@ static ir_node *gen_Const(ia32_transform_env_t *env) { ir_mode *mode = env->mode; if (mode_is_float(mode)) { + FP_USED(env->cg); if (! USE_SSE2(env->cg)) { cnst_classify_t clss = classify_Const(node); @@ -183,7 +208,7 @@ static ir_node *gen_Const(ia32_transform_env_t *env) { cnst = gen_SymConst(env); } else { - cnst = new_rd_ia32_Const(dbg, irg, block, get_irn_mode(node)); + cnst = new_rd_ia32_Const(dbg, irg, block, get_irg_no_mem(irg), get_irn_mode(node)); set_ia32_Const_attr(cnst, node); } return cnst; @@ -236,7 +261,7 @@ void ia32_place_consts_set_modes(ir_node *irn, void *env) { } /* put the const into the block where the original const was */ - if (! cg->opt.placecnst) { + if (! (cg->opt & IA32_OPT_PLACECNST)) { tenv.block = get_nodes_block(pred); } @@ -367,7 +392,7 @@ static int is_CondJmp_replacement(ir_node *cand, ir_node *irn) { int same_args = 1; for (i = 0; i < n; i++) { - if (get_irn_n(cand, i) == get_irn_n(irn, i)) { + if (get_irn_n(cand, i) != get_irn_n(irn, i)) { same_args = 0; break; } @@ -391,13 +416,125 @@ static void ia32_optimize_CondJmp(ir_node *irn, ia32_code_gen_t *cg) { if (replace) { DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn)); + DBG_OPT_CJMP(irn); - set_irn_op(irn, op_ia32_CJmp); + set_irn_op(irn, op_ia32_CJmpAM); DB((cg->mod, LEVEL_1, "%+F\n", irn)); } } +/** + * Creates a Push from Store(IncSP(gp_reg_size)) + */ +static void ia32_create_Push(ir_node *irn, ia32_code_gen_t *cg) { + ir_node *sp = get_irn_n(irn, 0); + ir_node *val, *next, *push, *bl, *proj_M, *proj_res, *old_proj_M; + const ir_edge_t *edge; + + if (get_ia32_am_offs(irn) || !be_is_IncSP(sp)) + return; + + if (arch_get_irn_register(cg->arch_env, get_irn_n(irn, 1)) != + &ia32_gp_regs[REG_GP_NOREG]) + return; + + val = get_irn_n(irn, 2); + if (mode_is_float(get_irn_mode(val))) + return; + + if (be_get_IncSP_direction(sp) != be_stack_dir_expand || + be_get_IncSP_offset(sp) != get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode)) + return; + + /* ok, translate into Push */ + edge = get_irn_out_edge_first(irn); + old_proj_M = get_edge_src_irn(edge); + + next = sched_next(irn); + sched_remove(irn); + sched_remove(sp); + + bl = get_nodes_block(irn); + push = new_rd_ia32_Push(NULL, current_ir_graph, bl, + be_get_IncSP_pred(sp), val, be_get_IncSP_mem(sp)); + proj_res = new_r_Proj(current_ir_graph, bl, push, get_irn_mode(sp), pn_ia32_Push_stack); + proj_M = new_r_Proj(current_ir_graph, bl, push, mode_M, pn_ia32_Push_M); + + /* copy a possible constant from the store */ + set_ia32_id_cnst(push, get_ia32_id_cnst(irn)); + set_ia32_immop_type(push, get_ia32_immop_type(irn)); + + /* the push must have SP out register */ + arch_set_irn_register(cg->arch_env, push, arch_get_irn_register(cg->arch_env, sp)); + + exchange(old_proj_M, proj_M); + exchange(sp, proj_res); + sched_add_before(next, push); + sched_add_after(push, proj_res); +} + +/** + * Creates a Pop from IncSP(Load(sp)) + */ +static void ia32_create_Pop(ir_node *irn, ia32_code_gen_t *cg) { + ir_node *old_proj_M = be_get_IncSP_mem(irn); + ir_node *load = skip_Proj(old_proj_M); + ir_node *old_proj_res = NULL; + ir_node *bl, *pop, *next, *proj_res, *proj_sp, *proj_M; + const ir_edge_t *edge; + const arch_register_t *reg, *sp; + + if (! is_ia32_Load(load) || get_ia32_am_offs(load)) + return; + + if (arch_get_irn_register(cg->arch_env, get_irn_n(load, 1)) != + &ia32_gp_regs[REG_GP_NOREG]) + return; + if (arch_get_irn_register(cg->arch_env, get_irn_n(load, 0)) != cg->isa->arch_isa.sp) + return; + + /* ok, translate into pop */ + foreach_out_edge(load, edge) { + ir_node *succ = get_edge_src_irn(edge); + if (succ != old_proj_M) { + old_proj_res = succ; + break; + } + } + if (! old_proj_res) { + assert(0); + return; /* should not happen */ + } + + bl = get_nodes_block(load); + + /* IncSP is typically scheduled after the load, so remove it first */ + sched_remove(irn); + next = sched_next(old_proj_res); + sched_remove(old_proj_res); + sched_remove(load); + + reg = arch_get_irn_register(cg->arch_env, load); + sp = arch_get_irn_register(cg->arch_env, irn); + + pop = new_rd_ia32_Pop(NULL, current_ir_graph, bl, get_irn_n(irn, 0), get_irn_n(load, 2)); + proj_res = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(old_proj_res), pn_ia32_Pop_res); + proj_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(irn), pn_ia32_Pop_stack); + proj_M = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M); + + exchange(old_proj_M, proj_M); + exchange(old_proj_res, proj_res); + exchange(irn, proj_sp); + + arch_set_irn_register(cg->arch_env, proj_res, reg); + arch_set_irn_register(cg->arch_env, proj_sp, sp); + + sched_add_before(next, proj_sp); + sched_add_before(proj_sp, proj_res); + sched_add_before(proj_res,pop); +} + /** * Tries to optimize two following IncSP. */ @@ -405,17 +542,6 @@ static void ia32_optimize_IncSP(ir_node *irn, ia32_code_gen_t *cg) { ir_node *prev = be_get_IncSP_pred(irn); int real_uses = get_irn_n_edges(prev); - if (real_uses != 1) { - /* - This is a hack that should be removed if be_abi_fix_stack_nodes() - is fixed. Currently it leaves some IncSP's outside the chain ... - The previous IncSp is NOT our prev, but directly scheduled before ... - Impossible in a bug-free implementation :-) - */ - prev = sched_prev(irn); - real_uses = 1; - } - if (be_is_IncSP(prev) && real_uses == 1) { /* first IncSP has only one IncSP user, kill the first one */ unsigned prev_offs = be_get_IncSP_offset(prev); @@ -435,6 +561,9 @@ static void ia32_optimize_IncSP(ir_node *irn, ia32_code_gen_t *cg) { be_set_IncSP_offset(prev, 0); be_set_IncSP_offset(irn, (unsigned)new_ofs); be_set_IncSP_direction(irn, curr_dir); + + /* Omit the optimized IncSP */ + be_set_IncSP_pred(irn, be_get_IncSP_pred(prev)); } } @@ -448,8 +577,11 @@ void ia32_peephole_optimization(ir_node *irn, void *env) { ia32_optimize_TestJmp(irn, cg); else if (is_ia32_CondJmp(irn)) ia32_optimize_CondJmp(irn, cg); - else if (be_is_IncSP(irn)) - ia32_optimize_IncSP(irn, cg); + /* seems to be buggy when using Pushes */ +// else if (be_is_IncSP(irn)) +// ia32_optimize_IncSP(irn, cg); + else if (is_ia32_Store(irn)) + ia32_create_Push(irn, cg); } @@ -464,6 +596,11 @@ void ia32_peephole_optimization(ir_node *irn, void *env) { * ******************************************************************/ +typedef struct { + ia32_code_gen_t *cg; + heights_t *h; +} ia32_am_opt_env_t; + static int node_is_ia32_comm(const ir_node *irn) { return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0; } @@ -558,47 +695,104 @@ static int pred_is_specific_nodeblock(const ir_node *bl, const ir_node *pred, return 0; } - - /** - * Checks if irn is a candidate for address calculation or address mode. + * Checks if irn is a candidate for address calculation. * - * address calculation (AC): * - none of the operand must be a Load within the same block OR * - all Loads must have more than one user OR * - the irn has a frame entity (it's a former FrameAddr) * + * @param block The block the Loads must/mustnot be in + * @param irn The irn to check + * return 1 if irn is a candidate, 0 otherwise + */ +static int is_addr_candidate(const ir_node *block, const ir_node *irn) { + ir_node *in, *left, *right; + int n, is_cand = 1; + + left = get_irn_n(irn, 2); + right = get_irn_n(irn, 3); + + in = left; + + if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) { + n = ia32_get_irn_n_edges(in); + is_cand = (n == 1) ? 0 : is_cand; /* load with only one user: don't create LEA */ + } + + in = right; + + if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) { + n = ia32_get_irn_n_edges(in); + is_cand = (n == 1) ? 0 : is_cand; /* load with only one user: don't create LEA */ + } + + is_cand = get_ia32_frame_ent(irn) ? 1 : is_cand; + + return is_cand; +} + +/** + * Checks if irn is a candidate for address mode. + * * address mode (AM): * - at least one operand has to be a Load within the same block AND * - the load must not have other users than the irn AND * - the irn must not have a frame entity set * - * @param block The block the Loads must/not be in + * @param h The height information of the irg + * @param block The block the Loads must/mustnot be in * @param irn The irn to check - * @param check_addr 1 if to check for address calculation, 0 otherwise - * return 1 if irn is a candidate for AC or AM, 0 otherwise + * return 0 if irn is no candidate, 1 if left load can be used, 2 if right one, 3 for both */ -static int is_candidate(const ir_node *block, const ir_node *irn, int check_addr) { - ir_node *in; - int n, is_cand = check_addr; +static ia32_am_cand_t is_am_candidate(heights_t *h, const ir_node *block, ir_node *irn) { + ir_node *in, *load, *other, *left, *right; + int n, is_cand = 0, cand; + + if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn)) + return 0; + + left = get_irn_n(irn, 2); + right = get_irn_n(irn, 3); - in = get_irn_n(irn, 2); + in = left; if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) { n = ia32_get_irn_n_edges(in); - is_cand = check_addr ? (n == 1 ? 0 : is_cand) : (n == 1 ? 1 : is_cand); + is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */ + + load = get_Proj_pred(in); + other = right; + + /* If there is a data dependency of other irn from load: cannot use AM */ + if (get_nodes_block(other) == block) { + other = skip_Proj(other); + is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand; + } } - in = get_irn_n(irn, 3); + cand = is_cand ? IA32_AM_CAND_LEFT : IA32_AM_CAND_NONE; + in = right; + is_cand = 0; if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) { n = ia32_get_irn_n_edges(in); - is_cand = check_addr ? (n == 1 ? 0 : is_cand) : (n == 1 ? 1 : is_cand); + is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */ + + load = get_Proj_pred(in); + other = left; + + /* If there is a data dependency of other irn from load: cannot use load */ + if (get_nodes_block(other) == block) { + other = skip_Proj(other); + is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand; + } } - is_cand = get_ia32_frame_ent(irn) ? (check_addr ? 1 : 0) : is_cand; + cand = is_cand ? (cand | IA32_AM_CAND_RIGHT) : cand; - return is_cand; + /* if the irn has a frame entity: we do not use address mode */ + return get_ia32_frame_ent(irn) ? IA32_AM_CAND_NONE : cand; } /** @@ -634,6 +828,112 @@ static int load_store_addr_is_equal(const ir_node *load, const ir_node *store, return is_equal; } +typedef enum _ia32_take_lea_attr { + IA32_LEA_ATTR_NONE = 0, + IA32_LEA_ATTR_BASE = (1 << 0), + IA32_LEA_ATTR_INDEX = (1 << 1), + IA32_LEA_ATTR_OFFS = (1 << 2), + IA32_LEA_ATTR_SCALE = (1 << 3), + IA32_LEA_ATTR_AMSC = (1 << 4), + IA32_LEA_ATTR_FENT = (1 << 5) +} ia32_take_lea_attr; + +/** + * Decides if we have to keep the LEA operand or if we can assimilate it. + */ +static int do_new_lea(ir_node *irn, ir_node *base, ir_node *index, ir_node *lea, + int have_am_sc, ia32_code_gen_t *cg) +{ + ir_node *lea_base = get_irn_n(lea, 0); + ir_node *lea_idx = get_irn_n(lea, 1); + entity *irn_ent = get_ia32_frame_ent(irn); + entity *lea_ent = get_ia32_frame_ent(lea); + int ret_val = 0; + int is_noreg_base = be_is_NoReg(cg, base); + int is_noreg_index = be_is_NoReg(cg, index); + ia32_am_flavour_t am_flav = get_ia32_am_flavour(lea); + + /* If the Add and the LEA both have a different frame entity set: keep */ + if (irn_ent && lea_ent && (irn_ent != lea_ent)) + return IA32_LEA_ATTR_NONE; + else if (! irn_ent && lea_ent) + ret_val |= IA32_LEA_ATTR_FENT; + + /* If the Add and the LEA both have already an address mode symconst: keep */ + if (have_am_sc && get_ia32_am_sc(lea)) + return IA32_LEA_ATTR_NONE; + else if (get_ia32_am_sc(lea)) + ret_val |= IA32_LEA_ATTR_AMSC; + + /* Check the different base-index combinations */ + + if (! is_noreg_base && ! is_noreg_index) { + /* Assimilate if base is the lea and the LEA is just a Base + Offset calculation */ + if ((base == lea) && ! (am_flav & ia32_I ? 1 : 0)) { + if (am_flav & ia32_O) + ret_val |= IA32_LEA_ATTR_OFFS; + + ret_val |= IA32_LEA_ATTR_BASE; + } + else + return IA32_LEA_ATTR_NONE; + } + else if (! is_noreg_base && is_noreg_index) { + /* Base is set but index not */ + if (base == lea) { + /* Base points to LEA: assimilate everything */ + if (am_flav & ia32_O) + ret_val |= IA32_LEA_ATTR_OFFS; + if (am_flav & ia32_S) + ret_val |= IA32_LEA_ATTR_SCALE; + if (am_flav & ia32_I) + ret_val |= IA32_LEA_ATTR_INDEX; + + ret_val |= IA32_LEA_ATTR_BASE; + } + else if (am_flav & ia32_B ? 0 : 1) { + /* Base is not the LEA but the LEA is an index only calculation: assimilate */ + if (am_flav & ia32_O) + ret_val |= IA32_LEA_ATTR_OFFS; + if (am_flav & ia32_S) + ret_val |= IA32_LEA_ATTR_SCALE; + + ret_val |= IA32_LEA_ATTR_INDEX; + } + else + return IA32_LEA_ATTR_NONE; + } + else if (is_noreg_base && ! is_noreg_index) { + /* Index is set but not base */ + if (index == lea) { + /* Index points to LEA: assimilate everything */ + if (am_flav & ia32_O) + ret_val |= IA32_LEA_ATTR_OFFS; + if (am_flav & ia32_S) + ret_val |= IA32_LEA_ATTR_SCALE; + if (am_flav & ia32_B) + ret_val |= IA32_LEA_ATTR_BASE; + + ret_val |= IA32_LEA_ATTR_INDEX; + } + else if (am_flav & ia32_I ? 0 : 1) { + /* Index is not the LEA but the LEA is a base only calculation: assimilate */ + if (am_flav & ia32_O) + ret_val |= IA32_LEA_ATTR_OFFS; + if (am_flav & ia32_S) + ret_val |= IA32_LEA_ATTR_SCALE; + + ret_val |= IA32_LEA_ATTR_BASE; + } + else + return IA32_LEA_ATTR_NONE; + } + else { + assert(0 && "There must have been set base or index"); + } + + return ret_val; +} /** @@ -644,6 +944,9 @@ static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) { dbg_info *dbg = get_irn_dbg_info(irn); ir_node *block = get_nodes_block(irn); ir_node *res = irn; + ir_node *shift = NULL; + ir_node *lea_o = NULL; + ir_node *lea = NULL; char *offs = NULL; const char *offs_cnst = NULL; char *offs_lea = NULL; @@ -653,8 +956,9 @@ static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) { int have_am_sc = 0; int am_sc_sign = 0; ident *am_sc = NULL; + entity *lea_ent = NULL; ir_node *left, *right, *temp; - ir_node *base, *index, *orig_base, *orig_index; + ir_node *base, *index; ia32_am_flavour_t am_flav; DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;) @@ -735,6 +1039,7 @@ static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) { am_sc_sign = is_ia32_am_sc_sign(temp); have_am_sc = 1; dolea = 1; + lea_o = temp; } if (isadd) { @@ -752,7 +1057,8 @@ static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) { /* check for SHL 1,2,3 */ if (pred_is_specific_node(temp, is_ia32_Shl)) { - temp = get_Proj_pred(temp); + temp = get_Proj_pred(temp); + shift = temp; if (get_ia32_Immop_tarval(temp)) { scale = get_tarval_long(get_ia32_Immop_tarval(temp)); @@ -762,6 +1068,10 @@ static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) { DBG((mod, LEVEL_1, "\tgot scaled index %+F\n", index)); } + else { + scale = 0; + shift = NULL; + } } } @@ -782,45 +1092,36 @@ static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) { /* Try to assimilate a LEA as left operand */ if (is_ia32_Lea(left) && (get_ia32_am_flavour(left) != ia32_am_O)) { - am_flav = get_ia32_am_flavour(left); - - /* If we have an Add with a real right operand (not NoReg) and */ - /* the LEA contains already an index calculation then we create */ - /* a new LEA. */ - /* If the LEA contains already a frame_entity then we also */ - /* create a new one otherwise we would loose it. */ - if ((isadd && !be_is_NoReg(cg, index) && (am_flav & ia32_I)) || /* no new LEA if index already set */ - get_ia32_frame_ent(left) || /* no new LEA if stack access */ - (have_am_sc && get_ia32_am_sc(left))) /* no new LEA if AM symconst already present */ - { + /* check if we can assimilate the LEA */ + int take_attr = do_new_lea(irn, base, index, left, have_am_sc, cg); + + if (take_attr == IA32_LEA_ATTR_NONE) { DBG((mod, LEVEL_1, "\tleave old LEA, creating new one\n")); } else { - ir_node *assim_lea_idx, *assim_lea_base; - DBG((mod, LEVEL_1, "\tgot LEA as left operand ... assimilating\n")); - offs = get_ia32_am_offs(left); - am_sc = have_am_sc ? am_sc : get_ia32_am_sc(left); - have_am_sc = am_sc ? 1 : 0; - am_sc_sign = is_ia32_am_sc_sign(left); - scale = get_ia32_am_scale(left); - - assim_lea_base = get_irn_n(left, 0); - assim_lea_idx = get_irn_n(left, 1); - - if (be_is_NoReg(cg, assim_lea_base) && ! be_is_NoReg(cg, assim_lea_idx)) { - /* assimilate index */ - assert(be_is_NoReg(cg, index) && ! be_is_NoReg(cg, base) && "operand mismatch for LEA assimilation"); - index = assim_lea_idx; - } - else if (! be_is_NoReg(cg, assim_lea_base) && be_is_NoReg(cg, assim_lea_idx)) { - /* assimilate base */ - assert(! be_is_NoReg(cg, index) && (base == left) && "operand mismatch for LEA assimilation"); - base = assim_lea_base; - } - else { - assert(0 && "operand mismatch for LEA assimilation"); + lea = left; /* for statistics */ + + if (take_attr & IA32_LEA_ATTR_OFFS) + offs = get_ia32_am_offs(left); + + if (take_attr & IA32_LEA_ATTR_AMSC) { + am_sc = get_ia32_am_sc(left); + have_am_sc = 1; + am_sc_sign = is_ia32_am_sc_sign(left); } + + if (take_attr & IA32_LEA_ATTR_SCALE) + scale = get_ia32_am_scale(left); + + if (take_attr & IA32_LEA_ATTR_BASE) + base = get_irn_n(left, 0); + + if (take_attr & IA32_LEA_ATTR_INDEX) + index = get_irn_n(left, 1); + + if (take_attr & IA32_LEA_ATTR_FENT) + lea_ent = get_ia32_frame_ent(left); } } @@ -865,9 +1166,12 @@ static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) { /* copy the frame entity (could be set in case of Add */ /* which was a FrameAddr) */ - set_ia32_frame_ent(res, get_ia32_frame_ent(irn)); + if (lea_ent) + set_ia32_frame_ent(res, lea_ent); + else + set_ia32_frame_ent(res, get_ia32_frame_ent(irn)); - if (is_ia32_use_frame(irn)) + if (get_ia32_frame_ent(res)) set_ia32_use_frame(res); /* set scale */ @@ -895,6 +1199,24 @@ static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) { DBG((mod, LEVEL_1, "\tLEA [%+F + %+F * %d + %s]\n", base, index, scale, get_ia32_am_offs(res))); + /* we will exchange it, report here before the Proj is created */ + if (shift && lea && lea_o) + DBG_OPT_LEA4(irn, lea_o, lea, shift, res); + else if (shift && lea) + DBG_OPT_LEA3(irn, lea, shift, res); + else if (shift && lea_o) + DBG_OPT_LEA3(irn, lea_o, shift, res); + else if (lea && lea_o) + DBG_OPT_LEA3(irn, lea_o, lea, res); + else if (shift) + DBG_OPT_LEA2(irn, shift, res); + else if (lea) + DBG_OPT_LEA2(irn, lea, res); + else if (lea_o) + DBG_OPT_LEA2(irn, lea_o, res); + else + DBG_OPT_LEA1(irn, res); + /* get the result Proj of the Add/Sub */ irn = get_res_proj(irn); @@ -907,33 +1229,77 @@ static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) { return res; } + /** - * Optimizes a pattern around irn to address mode if possible. + * Merges a Load/Store node with a LEA. + * @param irn The Load/Store node + * @param lea The LEA */ -void ia32_optimize_am(ir_node *irn, void *env) { - ia32_code_gen_t *cg = env; - ir_node *res = irn; - dbg_info *dbg; - ir_mode *mode; - ir_node *block, *noreg_gp, *noreg_fp; - ir_node *left, *right, *temp; - ir_node *store, *load, *mem_proj; - ir_node *succ, *addr_b, *addr_i; - int check_am_src = 0; - DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;) +static void merge_loadstore_lea(ir_node *irn, ir_node *lea) { + entity *irn_ent = get_ia32_frame_ent(irn); + entity *lea_ent = get_ia32_frame_ent(lea); - if (! is_ia32_irn(irn)) + /* If the irn and the LEA both have a different frame entity set: do not merge */ + if (irn_ent && lea_ent && (irn_ent != lea_ent)) return; + else if (! irn_ent && lea_ent) { + set_ia32_frame_ent(irn, lea_ent); + set_ia32_use_frame(irn); + } - dbg = get_irn_dbg_info(irn); - mode = get_irn_mode(irn); - block = get_nodes_block(irn); - noreg_gp = ia32_new_NoReg_gp(cg); - noreg_fp = ia32_new_NoReg_fp(cg); + /* get the AM attributes from the LEA */ + add_ia32_am_offs(irn, get_ia32_am_offs(lea)); + set_ia32_am_scale(irn, get_ia32_am_scale(lea)); + set_ia32_am_flavour(irn, get_ia32_am_flavour(lea)); - DBG((mod, LEVEL_1, "checking for AM\n")); + set_ia32_am_sc(irn, get_ia32_am_sc(lea)); + if (is_ia32_am_sc_sign(lea)) + set_ia32_am_sc_sign(irn); + + set_ia32_op_type(irn, is_ia32_Ld(irn) ? ia32_AddrModeS : ia32_AddrModeD); + + /* set base and index */ + set_irn_n(irn, 0, get_irn_n(lea, 0)); + set_irn_n(irn, 1, get_irn_n(lea, 1)); + + /* clear remat flag */ + set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable); + + if (is_ia32_Ld(irn)) + DBG_OPT_LOAD_LEA(lea, irn); + else + DBG_OPT_STORE_LEA(lea, irn); + +} - /* 1st part: check for address calculations and transform the into Lea */ +/** + * Sets new_right index of irn to right and new_left index to left. + * Also exchange left and right + */ +static void exchange_left_right(ir_node *irn, ir_node **left, ir_node **right, int new_left, int new_right) { + ir_node *temp; + + set_irn_n(irn, new_right, *right); + set_irn_n(irn, new_left, *left); + + temp = *left; + *left = *right; + *right = temp; + + /* this is only needed for Compares, but currently ALL nodes + * have this attribute :-) */ + set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn))); +} + +/** + * Performs address calculation optimization (create LEAs if possible) + */ +static void optimize_lea(ir_node *irn, void *env) { + ia32_code_gen_t *cg = env; + ir_node *block, *noreg_gp, *left, *right; + + if (! is_ia32_irn(irn)) + return; /* Following cases can occur: */ /* - Sub (l, imm) -> LEA [base - offset] */ @@ -945,25 +1311,77 @@ void ia32_optimize_am(ir_node *irn, void *env) { /* with scale > 1 iff l/r == shl (1,2,3) */ if (is_ia32_Sub(irn) || is_ia32_Add(irn)) { - left = get_irn_n(irn, 2); - right = get_irn_n(irn, 3); + left = get_irn_n(irn, 2); + right = get_irn_n(irn, 3); + block = get_nodes_block(irn); + noreg_gp = ia32_new_NoReg_gp(cg); /* Do not try to create a LEA if one of the operands is a Load. */ /* check is irn is a candidate for address calculation */ - if (is_candidate(block, irn, 1)) { - DBG((mod, LEVEL_1, "\tfound address calculation candidate %+F ... ", irn)); + if (is_addr_candidate(block, irn)) { + ir_node *res; + + DBG((cg->mod, LEVEL_1, "\tfound address calculation candidate %+F ... ", irn)); res = fold_addr(cg, irn, noreg_gp); - if (res == irn) - DB((mod, LEVEL_1, "transformed into %+F\n", res)); + if (res != irn) + DB((cg->mod, LEVEL_1, "transformed into %+F\n", res)); else - DB((mod, LEVEL_1, "not transformed\n")); + DB((cg->mod, LEVEL_1, "not transformed\n")); } } + else if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn)) { + /* - Load -> LEA into Load } TODO: If the LEA is used by more than one Load/Store */ + /* - Store -> LEA into Store } it might be better to keep the LEA */ + left = get_irn_n(irn, 0); - /* 2nd part: fold following patterns: */ - /* - Load -> LEA into Load } TODO: If the LEA is used by more than one Load/Store */ - /* - Store -> LEA into Store } it might be better to keep the LEA */ + if (is_ia32_Lea(left)) { + const ir_edge_t *edge, *ne; + ir_node *src; + + /* merge all Loads/Stores connected to this LEA with the LEA */ + foreach_out_edge_safe(left, edge, ne) { + src = get_edge_src_irn(edge); + + if (src && (is_ia32_Ld(src) || is_ia32_St(src) || is_ia32_Store8Bit(src))) { + DBG((cg->mod, LEVEL_1, "\nmerging %+F into %+F\n", left, irn)); + if (! is_ia32_got_lea(src)) + merge_loadstore_lea(src, left); + set_ia32_got_lea(src); + } + } + } + } +} + + +/** + * Checks for address mode patterns and performs the + * necessary transformations. + * This function is called by a walker. + */ +static void optimize_am(ir_node *irn, void *env) { + ia32_am_opt_env_t *am_opt_env = env; + ia32_code_gen_t *cg = am_opt_env->cg; + heights_t *h = am_opt_env->h; + ir_node *block, *noreg_gp, *noreg_fp; + ir_node *left, *right; + ir_node *store, *load, *mem_proj; + ir_node *succ, *addr_b, *addr_i; + int check_am_src = 0; + int need_exchange_on_fail = 0; + DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;) + + if (! is_ia32_irn(irn)) + return; + + block = get_nodes_block(irn); + noreg_gp = ia32_new_NoReg_gp(cg); + noreg_fp = ia32_new_NoReg_fp(cg); + + DBG((mod, LEVEL_1, "checking for AM\n")); + + /* fold following patterns: */ /* - op -> Load into AMop with am_Source */ /* conditions: */ /* - op is am_Source capable AND */ @@ -977,252 +1395,274 @@ void ia32_optimize_am(ir_node *irn, void *env) { /* - the Load and Store are in the same block AND */ /* - nobody else uses the result of the op */ - if ((res == irn) && (get_ia32_am_support(irn) != ia32_am_None) && !is_ia32_Lea(irn)) { - /* 1st: check for Load/Store -> LEA */ - if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn)) { - left = get_irn_n(irn, 0); + if ((get_ia32_am_support(irn) != ia32_am_None) && ! is_ia32_Lea(irn)) { + ia32_am_cand_t cand = is_am_candidate(h, block, irn); + ia32_am_cand_t orig_cand = cand; - if (is_ia32_Lea(left)) { - DBG((mod, LEVEL_1, "\nmerging %+F into %+F\n", left, irn)); + /* cand == 1: load is left; cand == 2: load is right; */ - /* get the AM attributes from the LEA */ - add_ia32_am_offs(irn, get_ia32_am_offs(left)); - set_ia32_am_scale(irn, get_ia32_am_scale(left)); - set_ia32_am_flavour(irn, get_ia32_am_flavour(left)); + if (cand == IA32_AM_CAND_NONE) + return; - set_ia32_am_sc(irn, get_ia32_am_sc(left)); - if (is_ia32_am_sc_sign(left)) - set_ia32_am_sc_sign(irn); + DBG((mod, LEVEL_1, "\tfound address mode candidate %+F ... ", irn)); - set_ia32_op_type(irn, is_ia32_Ld(irn) ? ia32_AddrModeS : ia32_AddrModeD); + left = get_irn_n(irn, 2); + if (get_irn_arity(irn) == 4) { + /* it's an "unary" operation */ + right = left; + } + else { + right = get_irn_n(irn, 3); + } - /* set base and index */ - set_irn_n(irn, 0, get_irn_n(left, 0)); - set_irn_n(irn, 1, get_irn_n(left, 1)); + /* normalize commutative ops */ + if (node_is_ia32_comm(irn) && (cand == IA32_AM_CAND_LEFT)) { - /* clear remat flag */ - set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable); - } + /* Assure that right operand is always a Load if there is one */ + /* because non-commutative ops can only use Dest AM if the right */ + /* operand is a load, so we only need to check right operand. */ + + exchange_left_right(irn, &left, &right, 3, 2); + need_exchange_on_fail = 1; + + /* now: load is right */ + cand = IA32_AM_CAND_RIGHT; } - /* check if the node is an address mode candidate */ - else if (is_candidate(block, irn, 0)) { - DBG((mod, LEVEL_1, "\tfound address mode candidate %+F ... ", irn)); - - left = get_irn_n(irn, 2); - if (get_irn_arity(irn) == 4) { - /* it's an "unary" operation */ - right = left; - } - else { - right = get_irn_n(irn, 3); - } - /* normalize commutative ops */ - if (node_is_ia32_comm(irn)) { - /* Assure that right operand is always a Load if there is one */ - /* because non-commutative ops can only use Dest AM if the right */ - /* operand is a load, so we only need to check right operand. */ - if (pred_is_specific_nodeblock(block, left, is_ia32_Ld)) - { - set_irn_n(irn, 2, right); - set_irn_n(irn, 3, left); - - temp = left; - left = right; - right = temp; - } - } + /* check for Store -> op -> Load */ - /* check for Store -> op -> Load */ - - /* Store -> op -> Load optimization is only possible if supported by op */ - /* and if right operand is a Load */ - if ((get_ia32_am_support(irn) & ia32_am_Dest) && - pred_is_specific_nodeblock(block, right, is_ia32_Ld)) - { - - /* An address mode capable op always has a result Proj. */ - /* If this Proj is used by more than one other node, we don't need to */ - /* check further, otherwise we check for Store and remember the address, */ - /* the Store points to. */ - - succ = get_res_proj(irn); - assert(succ && "Couldn't find result proj"); - - addr_b = NULL; - addr_i = NULL; - store = NULL; - - /* now check for users and Store */ - if (ia32_get_irn_n_edges(succ) == 1) { - succ = get_edge_src_irn(get_irn_out_edge_first(succ)); - - if (is_ia32_fStore(succ) || is_ia32_Store(succ)) { - store = succ; - addr_b = get_irn_n(store, 0); - - /* Could be that the Store is connected to the address */ - /* calculating LEA while the Load is already transformed. */ - if (is_ia32_Lea(addr_b)) { - succ = addr_b; - addr_b = get_irn_n(succ, 0); - addr_i = get_irn_n(succ, 1); - } - else { - addr_i = noreg_gp; - } - } + /* Store -> op -> Load optimization is only possible if supported by op */ + /* and if right operand is a Load */ + if ((get_ia32_am_support(irn) & ia32_am_Dest) && (cand & IA32_AM_CAND_RIGHT)) + { + /* An address mode capable op always has a result Proj. */ + /* If this Proj is used by more than one other node, we don't need to */ + /* check further, otherwise we check for Store and remember the address, */ + /* the Store points to. */ + + succ = get_res_proj(irn); + assert(succ && "Couldn't find result proj"); + + addr_b = NULL; + addr_i = NULL; + store = NULL; + + /* now check for users and Store */ + if (ia32_get_irn_n_edges(succ) == 1) { + succ = get_edge_src_irn(get_irn_out_edge_first(succ)); + + if (is_ia32_xStore(succ) || is_ia32_Store(succ)) { + store = succ; + addr_b = get_irn_n(store, 0); + addr_i = get_irn_n(store, 1); } + } - if (store) { - /* we found a Store as single user: Now check for Load */ + if (store) { + /* we found a Store as single user: Now check for Load */ - /* Extra check for commutative ops with two Loads */ - /* -> put the interesting Load right */ - if (node_is_ia32_comm(irn) && - pred_is_specific_nodeblock(block, left, is_ia32_Ld)) + /* Extra check for commutative ops with two Loads */ + /* -> put the interesting Load right */ + if (node_is_ia32_comm(irn) && (cand == IA32_AM_CAND_BOTH)) { + if ((addr_b == get_irn_n(get_Proj_pred(left), 0)) && + (addr_i == get_irn_n(get_Proj_pred(left), 1))) { - if ((addr_b == get_irn_n(get_Proj_pred(left), 0)) && - (addr_i == get_irn_n(get_Proj_pred(left), 1))) - { - /* We exchange left and right, so it's easier to kill */ - /* the correct Load later and to handle unary operations. */ - set_irn_n(irn, 2, right); - set_irn_n(irn, 3, left); - - temp = left; - left = right; - right = temp; - } + /* We exchange left and right, so it's easier to kill */ + /* the correct Load later and to handle unary operations. */ + exchange_left_right(irn, &left, &right, 3, 2); + need_exchange_on_fail ^= 1; } + } - /* skip the Proj for easier access */ - load = get_Proj_pred(right); - - /* Compare Load and Store address */ - if (load_store_addr_is_equal(load, store, addr_b, addr_i)) { - /* Right Load is from same address, so we can */ - /* disconnect the Load and Store here */ - - /* set new base, index and attributes */ - set_irn_n(irn, 0, addr_b); - set_irn_n(irn, 1, addr_i); - add_ia32_am_offs(irn, get_ia32_am_offs(load)); - set_ia32_am_scale(irn, get_ia32_am_scale(load)); - set_ia32_am_flavour(irn, get_ia32_am_flavour(load)); - set_ia32_op_type(irn, ia32_AddrModeD); - set_ia32_frame_ent(irn, get_ia32_frame_ent(load)); - set_ia32_ls_mode(irn, get_ia32_ls_mode(load)); - - set_ia32_am_sc(irn, get_ia32_am_sc(load)); - if (is_ia32_am_sc_sign(load)) - set_ia32_am_sc_sign(irn); - - if (is_ia32_use_frame(load)) - set_ia32_use_frame(irn); - - /* connect to Load memory and disconnect Load */ - if (get_irn_arity(irn) == 5) { - /* binary AMop */ - set_irn_n(irn, 4, get_irn_n(load, 2)); - set_irn_n(irn, 3, noreg_gp); - } - else { - /* unary AMop */ - set_irn_n(irn, 3, get_irn_n(load, 2)); - set_irn_n(irn, 2, noreg_gp); - } - - /* connect the memory Proj of the Store to the op */ - mem_proj = get_mem_proj(store); - set_Proj_pred(mem_proj, irn); - set_Proj_proj(mem_proj, 1); - - /* clear remat flag */ - set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable); - - DB((mod, LEVEL_1, "merged with %+F and %+F into dest AM\n", load, store)); + /* skip the Proj for easier access */ + load = get_Proj_pred(right); + + /* Compare Load and Store address */ + if (load_store_addr_is_equal(load, store, addr_b, addr_i)) { + /* Right Load is from same address, so we can */ + /* disconnect the Load and Store here */ + + /* set new base, index and attributes */ + set_irn_n(irn, 0, addr_b); + set_irn_n(irn, 1, addr_i); + add_ia32_am_offs(irn, get_ia32_am_offs(load)); + set_ia32_am_scale(irn, get_ia32_am_scale(load)); + set_ia32_am_flavour(irn, get_ia32_am_flavour(load)); + set_ia32_op_type(irn, ia32_AddrModeD); + set_ia32_frame_ent(irn, get_ia32_frame_ent(load)); + set_ia32_ls_mode(irn, get_ia32_ls_mode(load)); + + set_ia32_am_sc(irn, get_ia32_am_sc(load)); + if (is_ia32_am_sc_sign(load)) + set_ia32_am_sc_sign(irn); + + if (is_ia32_use_frame(load)) + set_ia32_use_frame(irn); + + /* connect to Load memory and disconnect Load */ + if (get_irn_arity(irn) == 5) { + /* binary AMop */ + set_irn_n(irn, 4, get_irn_n(load, 2)); + set_irn_n(irn, 3, noreg_gp); + } + else { + /* unary AMop */ + set_irn_n(irn, 3, get_irn_n(load, 2)); + set_irn_n(irn, 2, noreg_gp); } - } /* if (store) */ - else if (get_ia32_am_support(irn) & ia32_am_Source) { - /* There was no store, check if we still can optimize for source address mode */ - check_am_src = 1; + + /* connect the memory Proj of the Store to the op */ + mem_proj = get_mem_proj(store); + set_Proj_pred(mem_proj, irn); + set_Proj_proj(mem_proj, 1); + + /* clear remat flag */ + set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable); + + DBG_OPT_AM_D(load, store, irn); + + DB((mod, LEVEL_1, "merged with %+F and %+F into dest AM\n", load, store)); + + need_exchange_on_fail = 0; } - } /* if (support AM Dest) */ + } /* if (store) */ else if (get_ia32_am_support(irn) & ia32_am_Source) { - /* op doesn't support am AM Dest -> check for AM Source */ + /* There was no store, check if we still can optimize for source address mode */ check_am_src = 1; } + } /* if (support AM Dest) */ + else if (get_ia32_am_support(irn) & ia32_am_Source) { + /* op doesn't support am AM Dest -> check for AM Source */ + check_am_src = 1; + } - /* normalize commutative ops */ - if (node_is_ia32_comm(irn)) { - /* Assure that left operand is always a Load if there is one */ - /* because non-commutative ops can only use Source AM if the */ - /* left operand is a Load, so we only need to check the left */ - /* operand afterwards. */ - if (pred_is_specific_nodeblock(block, right, is_ia32_Ld)) { - set_irn_n(irn, 2, right); - set_irn_n(irn, 3, left); - - temp = left; - left = right; - right = temp; - } - } + /* was exchanged but optimize failed: exchange back */ + if (need_exchange_on_fail) { + exchange_left_right(irn, &left, &right, 3, 2); + cand = orig_cand; + } - /* optimize op -> Load iff Load is only used by this op */ - /* and left operand is a Load which only used by this irn */ - if (check_am_src && - pred_is_specific_nodeblock(block, left, is_ia32_Ld) && - (ia32_get_irn_n_edges(left) == 1)) - { - left = get_Proj_pred(left); - - addr_b = get_irn_n(left, 0); - addr_i = get_irn_n(left, 1); - - /* set new base, index and attributes */ - set_irn_n(irn, 0, addr_b); - set_irn_n(irn, 1, addr_i); - add_ia32_am_offs(irn, get_ia32_am_offs(left)); - set_ia32_am_scale(irn, get_ia32_am_scale(left)); - set_ia32_am_flavour(irn, get_ia32_am_flavour(left)); - set_ia32_op_type(irn, ia32_AddrModeS); - set_ia32_frame_ent(irn, get_ia32_frame_ent(left)); - set_ia32_ls_mode(irn, get_ia32_ls_mode(left)); - - set_ia32_am_sc(irn, get_ia32_am_sc(left)); - if (is_ia32_am_sc_sign(left)) - set_ia32_am_sc_sign(irn); - - /* clear remat flag */ - set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable); - - if (is_ia32_use_frame(left)) - set_ia32_use_frame(irn); - - /* connect to Load memory */ - if (get_irn_arity(irn) == 5) { - /* binary AMop */ - set_irn_n(irn, 4, get_irn_n(left, 2)); - } - else { - /* unary AMop */ - set_irn_n(irn, 3, get_irn_n(left, 2)); - } + need_exchange_on_fail = 0; + + /* normalize commutative ops */ + if (check_am_src && node_is_ia32_comm(irn) && (cand == IA32_AM_CAND_RIGHT)) { + + /* Assure that left operand is always a Load if there is one */ + /* because non-commutative ops can only use Source AM if the */ + /* left operand is a Load, so we only need to check the left */ + /* operand afterwards. */ + + exchange_left_right(irn, &left, &right, 3, 2); + need_exchange_on_fail = 1; + + /* now: load is left */ + cand = IA32_AM_CAND_LEFT; + } + + /* optimize op -> Load iff Load is only used by this op */ + /* and left operand is a Load which only used by this irn */ + if (check_am_src && + (cand & IA32_AM_CAND_LEFT) && + (ia32_get_irn_n_edges(left) == 1)) + { + left = get_Proj_pred(left); + + addr_b = get_irn_n(left, 0); + addr_i = get_irn_n(left, 1); + + /* set new base, index and attributes */ + set_irn_n(irn, 0, addr_b); + set_irn_n(irn, 1, addr_i); + add_ia32_am_offs(irn, get_ia32_am_offs(left)); + set_ia32_am_scale(irn, get_ia32_am_scale(left)); + set_ia32_am_flavour(irn, get_ia32_am_flavour(left)); + set_ia32_op_type(irn, ia32_AddrModeS); + set_ia32_frame_ent(irn, get_ia32_frame_ent(left)); + set_ia32_ls_mode(irn, get_ia32_ls_mode(left)); + + set_ia32_am_sc(irn, get_ia32_am_sc(left)); + if (is_ia32_am_sc_sign(left)) + set_ia32_am_sc_sign(irn); + + /* clear remat flag */ + set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable); + + if (is_ia32_use_frame(left)) + set_ia32_use_frame(irn); + + /* connect to Load memory */ + if (get_irn_arity(irn) == 5) { + /* binary AMop */ + set_irn_n(irn, 4, get_irn_n(left, 2)); + + /* this is only needed for Compares, but currently ALL nodes + * have this attribute :-) */ + set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn))); + + /* disconnect from Load */ + /* (make second op -> first, set second in to noreg) */ + set_irn_n(irn, 2, get_irn_n(irn, 3)); + set_irn_n(irn, 3, noreg_gp); + } + else { + /* unary AMop */ + set_irn_n(irn, 3, get_irn_n(left, 2)); /* disconnect from Load */ set_irn_n(irn, 2, noreg_gp); + } - /* If Load has a memory Proj, connect it to the op */ - mem_proj = get_mem_proj(left); - if (mem_proj) { - set_Proj_pred(mem_proj, irn); - set_Proj_proj(mem_proj, 1); - } + DBG_OPT_AM_S(left, irn); - DB((mod, LEVEL_1, "merged with %+F into source AM\n", left)); + /* If Load has a memory Proj, connect it to the op */ + mem_proj = get_mem_proj(left); + if (mem_proj) { + set_Proj_pred(mem_proj, irn); + set_Proj_proj(mem_proj, 1); } + + DB((mod, LEVEL_1, "merged with %+F into source AM\n", left)); + } + else { + /* was exchanged but optimize failed: exchange back */ + if (need_exchange_on_fail) + exchange_left_right(irn, &left, &right, 3, 2); } } } + +/** + * Performs address mode optimization. + */ +void ia32_optimize_addressmode(ia32_code_gen_t *cg) { + /* if we are supposed to do AM or LEA optimization: recalculate edges */ + if (cg->opt & (IA32_OPT_DOAM | IA32_OPT_LEA)) { + edges_deactivate(cg->irg); + edges_activate(cg->irg); + } + else { + /* no optimizations at all */ + return; + } + + /* beware: we cannot optimize LEA and AM in one run because */ + /* LEA optimization adds new nodes to the irg which */ + /* invalidates the phase data */ + + if (cg->opt & IA32_OPT_LEA) { + irg_walk_blkwise_graph(cg->irg, NULL, optimize_lea, cg); + } + + if (cg->opt & IA32_OPT_DOAM) { + /* we need height information for am optimization */ + heights_t *h = heights_new(cg->irg); + ia32_am_opt_env_t env; + + env.cg = cg; + env.h = h; + + irg_walk_blkwise_graph(cg->irg, NULL, optimize_am, &env); + + heights_free(h); + } +}