X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fia32_optimize.c;h=555d8f78854aa7c43e49b97cf36d95fd27f1099a;hb=04f68446f4064c3f1ebcf94920bb1db8235fe485;hp=6d7c40fe6cb3431f0156b4480f863682aecd2165;hpb=f65c73689e58bdbec2209cb7014b53d32aea244c;p=libfirm diff --git a/ir/be/ia32/ia32_optimize.c b/ir/be/ia32/ia32_optimize.c index 6d7c40fe6..555d8f788 100644 --- a/ir/be/ia32/ia32_optimize.c +++ b/ir/be/ia32/ia32_optimize.c @@ -1,3 +1,28 @@ +/* + * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * + * This file is part of libFirm. + * + * This file may be distributed and/or modified under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation and appearing in the file LICENSE.GPL included in the + * packaging of this file. + * + * Licensees holding valid libFirm Professional Edition licenses may use + * this file in accordance with the libFirm Commercial License. + * Agreement provided with the Software. + * + * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE + * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE. + */ + +/** + * @file + * @brief Implements several optimizations for IA32. + * @author Christian Wuerdig + * @version $Id$ + */ #ifdef HAVE_CONFIG_H #include "config.h" #endif @@ -9,6 +34,9 @@ #include "iredges.h" #include "tv.h" #include "irgmod.h" +#include "irgwalk.h" +#include "height.h" +#include "irbitset.h" #include "../be_t.h" #include "../beabi.h" @@ -17,248 +45,43 @@ #include "ia32_new_nodes.h" #include "bearch_ia32_t.h" -#include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */ - -#undef is_NoMem -#define is_NoMem(irn) (get_irn_op(irn) == op_NoMem) - -typedef int is_op_func_t(const ir_node *n); - -/** - * checks if a node represents the NOREG value - */ -static int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) { - be_abi_irg_t *babi = cg->birg->abi; - const arch_register_t *fp_noreg = USE_SSE2(cg) ? - &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG]; - - return (be_abi_get_callee_save_irn(babi, &ia32_gp_regs[REG_GP_NOREG]) == irn) || - (be_abi_get_callee_save_irn(babi, fp_noreg) == irn); -} +#include "gen_ia32_regalloc_if_t.h" +#include "ia32_transform.h" +#include "ia32_dbg_stat.h" +#include "ia32_util.h" +DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) +#define AGGRESSIVE_AM -/************************************************* - * _____ _ _ - * / ____| | | | | - * | | ___ _ __ ___| |_ __ _ _ __ | |_ ___ - * | | / _ \| '_ \/ __| __/ _` | '_ \| __/ __| - * | |___| (_) | | | \__ \ || (_| | | | | |_\__ \ - * \_____\___/|_| |_|___/\__\__,_|_| |_|\__|___/ - * - *************************************************/ - -/** - * creates a unique ident by adding a number to a tag - * - * @param tag the tag string, must contain a %d if a number - * should be added - */ -static ident *unique_id(const char *tag) -{ - static unsigned id = 0; - char str[256]; - - snprintf(str, sizeof(str), tag, ++id); - return new_id_from_str(str); -} - +typedef enum { + IA32_AM_CAND_NONE = 0, /**< no addressmode possible with irn inputs */ + IA32_AM_CAND_LEFT = 1, /**< addressmode possible with left input */ + IA32_AM_CAND_RIGHT = 2, /**< addressmode possible with right input */ + IA32_AM_CAND_BOTH = 3 /**< addressmode possible with both inputs */ +} ia32_am_cand_t; +typedef int is_op_func_t(const ir_node *n); +typedef ir_node *load_func_t(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, ir_node *mem); /** - * Transforms a SymConst. - * - * @param mod the debug module - * @param block the block the new node should belong to - * @param node the ir SymConst node - * @param mode mode of the SymConst - * @return the created ia32 Const node - */ -static ir_node *gen_SymConst(ia32_transform_env_t *env) { - ir_node *cnst; - dbg_info *dbg = env->dbg; - ir_mode *mode = env->mode; - ir_graph *irg = env->irg; - ir_node *block = env->block; - - if (mode_is_float(mode)) { - if (USE_SSE2(env->cg)) - cnst = new_rd_ia32_fConst(dbg, irg, block, mode); - else - cnst = new_rd_ia32_vfConst(dbg, irg, block, mode); - } - else { - cnst = new_rd_ia32_Const(dbg, irg, block, mode); - } - set_ia32_Const_attr(cnst, env->irn); - return cnst; -} - -/** - * Get a primitive type for a mode. - */ -static ir_type *get_prim_type(pmap *types, ir_mode *mode) -{ - pmap_entry *e = pmap_find(types, mode); - ir_type *res; - - if (! e) { - char buf[64]; - snprintf(buf, sizeof(buf), "prim_type_%s", get_mode_name(mode)); - res = new_type_primitive(new_id_from_str(buf), mode); - pmap_insert(types, mode, res); - } - else - res = e->value; - return res; -} - -/** - * Get an entity that is initialized with a tarval - */ -static entity *get_entity_for_tv(ia32_code_gen_t *cg, ir_node *cnst) -{ - tarval *tv = get_Const_tarval(cnst); - pmap_entry *e = pmap_find(cg->tv_ent, tv); - entity *res; - ir_graph *rem; - - if (! e) { - ir_mode *mode = get_irn_mode(cnst); - ir_type *tp = get_Const_type(cnst); - if (tp == firm_unknown_type) - tp = get_prim_type(cg->types, mode); - - res = new_entity(get_glob_type(), unique_id("ia32FloatCnst_%u"), tp); - - set_entity_ld_ident(res, get_entity_ident(res)); - set_entity_visibility(res, visibility_local); - set_entity_variability(res, variability_constant); - set_entity_allocation(res, allocation_static); - - /* we create a new entity here: It's initialization must resist on the - const code irg */ - rem = current_ir_graph; - current_ir_graph = get_const_code_irg(); - set_atomic_ent_value(res, new_Const_type(tv, tp)); - current_ir_graph = rem; - } - else - res = e->value; - return res; -} - -/** - * Transforms a Const. - * - * @param mod the debug module - * @param block the block the new node should belong to - * @param node the ir Const node - * @param mode mode of the Const - * @return the created ia32 Const node + * checks if a node represents the NOREG value */ -static ir_node *gen_Const(ia32_transform_env_t *env) { - ir_node *cnst; - symconst_symbol sym; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_node *node = env->irn; - dbg_info *dbg = env->dbg; - ir_mode *mode = env->mode; - - if (mode_is_float(mode)) { - if (! USE_SSE2(env->cg)) { - cnst_classify_t clss = classify_Const(node); - - if (clss == CNST_NULL) - return new_rd_ia32_vfldz(dbg, irg, block, mode); - else if (clss == CNST_ONE) - return new_rd_ia32_vfld1(dbg, irg, block, mode); - } - sym.entity_p = get_entity_for_tv(env->cg, node); - - cnst = new_rd_SymConst(dbg, irg, block, sym, symconst_addr_ent); - env->irn = cnst; - cnst = gen_SymConst(env); - } - else { - cnst = new_rd_ia32_Const(dbg, irg, block, get_irn_mode(node)); - set_ia32_Const_attr(cnst, node); - } - return cnst; +static INLINE int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) { + return irn == cg->noreg_gp || irn == cg->noreg_xmm || irn == cg->noreg_vfp; } - - -/** - * Transforms (all) Const's into ia32_Const and places them in the - * block where they are used (or in the cfg-pred Block in case of Phi's). - * Additionally all reference nodes are changed into mode_Is nodes. - */ -void ia32_place_consts_set_modes(ir_node *irn, void *env) { - ia32_code_gen_t *cg = env; - ia32_transform_env_t tenv; - ir_mode *mode; - ir_node *pred, *cnst; - int i; - opcode opc; - - if (is_Block(irn)) - return; - - mode = get_irn_mode(irn); - - /* transform all reference nodes into mode_Is nodes */ - if (mode_is_reference(mode)) { - mode = mode_Is; - set_irn_mode(irn, mode); - } - - tenv.block = get_nodes_block(irn); - tenv.cg = cg; - tenv.irg = cg->irg; - DEBUG_ONLY(tenv.mod = cg->mod;) - - /* Loop over all predecessors and check for Sym/Const nodes */ - for (i = get_irn_arity(irn) - 1; i >= 0; --i) { - pred = get_irn_n(irn, i); - cnst = NULL; - opc = get_irn_opcode(pred); - tenv.irn = pred; - tenv.mode = get_irn_mode(pred); - tenv.dbg = get_irn_dbg_info(pred); - - /* If it's a Phi, then we need to create the */ - /* new Const in it's predecessor block */ - if (is_Phi(irn)) { - tenv.block = get_Block_cfgpred_block(get_nodes_block(irn), i); - } - - /* put the const into the block where the original const was */ - if (! cg->opt.placecnst) { - tenv.block = get_nodes_block(pred); - } - - switch (opc) { - case iro_Const: - cnst = gen_Const(&tenv); - break; - case iro_SymConst: - cnst = gen_SymConst(&tenv); - break; - default: - break; - } - - /* if we found a const, then set it */ - if (cnst) { - set_irn_n(irn, i, cnst); - } - } +void ia32_pre_transform_phase(ia32_code_gen_t *cg) { + /* + We need to transform the consts twice: + - the psi condition tree transformer needs existing constants to be ia32 constants + - the psi condition tree transformer inserts new firm constants which need to be transformed + */ + //ia32_transform_all_firm_consts(cg); + irg_walk_graph(cg->irg, NULL, ia32_transform_psi_cond_tree, cg); + //ia32_transform_all_firm_consts(cg); } - - /******************************************************************************************************** * _____ _ _ ____ _ _ _ _ _ * | __ \ | | | | / __ \ | | (_) (_) | | (_) @@ -274,8 +97,18 @@ void ia32_place_consts_set_modes(ir_node *irn, void *env) { * NOTE: THESE PEEPHOLE OPTIMIZATIONS MUST BE CALLED AFTER SCHEDULING AND REGISTER ALLOCATION. */ -static int ia32_cnst_compare(ir_node *n1, ir_node *n2) { - return get_ia32_id_cnst(n1) == get_ia32_id_cnst(n2); +static int ia32_const_equal(const ir_node *n1, const ir_node *n2) { + if(get_ia32_immop_type(n1) != get_ia32_immop_type(n2)) + return 0; + + if(get_ia32_immop_type(n1) == ia32_ImmConst) { + return get_ia32_Immop_tarval(n1) == get_ia32_Immop_tarval(n2); + } else if(get_ia32_immop_type(n1) == ia32_ImmSymConst) { + return get_ia32_Immop_symconst(n1) == get_ia32_Immop_symconst(n2); + } + + assert(get_ia32_immop_type(n1) == ia32_ImmNone); + return 1; } /** @@ -326,10 +159,10 @@ static int is_TestJmp_replacement(ir_node *cand, ir_node *irn) { } } - if (same_args) - return ia32_cnst_compare(cand, irn); + if (!same_args) + return 0; - return 0; + return ia32_const_equal(cand, irn); } /** @@ -343,14 +176,14 @@ static void ia32_optimize_TestJmp(ir_node *irn, ia32_code_gen_t *cg) { replace = cand ? is_TestJmp_replacement(cand, irn) : 0; if (replace) { - DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn)); + DBG((dbg, LEVEL_1, "replacing %+F by ", irn)); if (is_ia32_And(cand)) set_irn_op(irn, op_ia32_CJmpAM); else set_irn_op(irn, op_ia32_CJmp); - DB((cg->mod, LEVEL_1, "%+F\n", irn)); + DB((dbg, LEVEL_1, "%+F\n", irn)); } } @@ -362,20 +195,16 @@ static int is_CondJmp_cand(const ir_node *irn) { * Checks if the arguments of cand are the same of irn. */ static int is_CondJmp_replacement(ir_node *cand, ir_node *irn) { - int i, n = get_irn_arity(cand); - int same_args = 1; + int i, arity; - for (i = 0; i < n; i++) { - if (get_irn_n(cand, i) == get_irn_n(irn, i)) { - same_args = 0; - break; + arity = get_irn_arity(cand); + for (i = 0; i < arity; i++) { + if (get_irn_n(cand, i) != get_irn_n(irn, i)) { + return 0; } } - if (same_args) - return ia32_cnst_compare(cand, irn); - - return 0; + return ia32_const_equal(cand, irn); } /** @@ -389,14 +218,152 @@ static void ia32_optimize_CondJmp(ir_node *irn, ia32_code_gen_t *cg) { replace = cand ? is_CondJmp_replacement(cand, irn) : 0; if (replace) { - DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn)); + DBG((dbg, LEVEL_1, "replacing %+F by ", irn)); + DBG_OPT_CJMP(irn); - set_irn_op(irn, op_ia32_CJmp); + set_irn_op(irn, op_ia32_CJmpAM); - DB((cg->mod, LEVEL_1, "%+F\n", irn)); + DB((dbg, LEVEL_1, "%+F\n", irn)); } } +// only optimize up to 48 stores behind IncSPs +#define MAXPUSH_OPTIMIZE 48 + +/** + * Tries to create pushs from IncSP,Store combinations + */ +static void ia32_create_Pushs(ir_node *irn, ia32_code_gen_t *cg) { + int i; + int offset; + ir_node *node; + ir_node *stores[MAXPUSH_OPTIMIZE]; + ir_node *block = get_nodes_block(irn); + ir_graph *irg = cg->irg; + ir_node *curr_sp; + ir_mode *spmode = get_irn_mode(irn); + + memset(stores, 0, sizeof(stores)); + + assert(be_is_IncSP(irn)); + + offset = be_get_IncSP_offset(irn); + if(offset < 4) + return; + + /* + * We first walk the schedule after the IncSP node as long as we find + * suitable stores that could be transformed to a push. + * We save them into the stores array which is sorted by the frame offset/4 + * attached to the node + */ + for(node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) { + ir_node *mem; + int offset; + int storeslot; + + // it has to be a store + if(!is_ia32_Store(node)) + break; + + // it has to use our sp value + if(get_irn_n(node, 0) != irn) + continue; + // store has to be attached to NoMem + mem = get_irn_n(node, 3); + if(!is_NoMem(mem)) { + continue; + } + + if( (get_ia32_am_flavour(node) & ia32_am_IS) != 0) + break; + + offset = get_ia32_am_offs_int(node); + + storeslot = offset / 4; + if(storeslot >= MAXPUSH_OPTIMIZE) + continue; + + // storing into the same slot twice is bad (and shouldn't happen...) + if(stores[storeslot] != NULL) + break; + + // storing at half-slots is bad + if(offset % 4 != 0) + break; + + stores[storeslot] = node; + } + + curr_sp = get_irn_n(irn, 0); + + // walk the stores in inverse order and create pushs for them + i = (offset / 4) - 1; + if(i >= MAXPUSH_OPTIMIZE) { + i = MAXPUSH_OPTIMIZE - 1; + } + + for( ; i >= 0; --i) { + const arch_register_t *spreg; + ir_node *push; + ir_node *val, *mem, *mem_proj; + ir_node *store = stores[i]; + ir_node *noreg = ia32_new_NoReg_gp(cg); + + if(store == NULL || is_Bad(store)) + break; + + val = get_irn_n(store, 2); + mem = get_irn_n(store, 3); + spreg = arch_get_irn_register(cg->arch_env, curr_sp); + + // create a push + push = new_rd_ia32_Push(NULL, irg, block, noreg, noreg, val, curr_sp, mem); + + set_ia32_am_support(push, ia32_am_Source); + copy_ia32_Immop_attr(push, store); + + sched_add_before(irn, push); + + // create stackpointer proj + curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack); + arch_set_irn_register(cg->arch_env, curr_sp, spreg); + sched_add_before(irn, curr_sp); + + // create memory proj + mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M); + sched_add_before(irn, mem_proj); + + // use the memproj now + exchange(store, mem_proj); + + // we can remove the store now + sched_remove(store); + + offset -= 4; + } + + be_set_IncSP_offset(irn, offset); + + // can we remove the IncSP now? + if(offset == 0) { + const ir_edge_t *edge, *next; + + foreach_out_edge_safe(irn, edge, next) { + ir_node *arg = get_edge_src_irn(edge); + int pos = get_edge_src_pos(edge); + + set_irn_n(arg, pos, curr_sp); + } + + set_irn_n(irn, 0, new_Bad()); + sched_remove(irn); + } else { + set_irn_n(irn, 0, curr_sp); + } +} + +#if 0 /** * Tries to optimize two following IncSP. */ @@ -404,54 +371,48 @@ static void ia32_optimize_IncSP(ir_node *irn, ia32_code_gen_t *cg) { ir_node *prev = be_get_IncSP_pred(irn); int real_uses = get_irn_n_edges(prev); - if (real_uses != 1) { - /* - This is a hack that should be removed if be_abi_fix_stack_nodes() - is fixed. Currently it leaves some IncSP's outside the chain ... - The previous IncSp is NOT our prev, but directly scheduled before ... - Impossible in a bug-free implementation :-) - */ - prev = sched_prev(irn); - real_uses = 1; - } - if (be_is_IncSP(prev) && real_uses == 1) { /* first IncSP has only one IncSP user, kill the first one */ - unsigned prev_offs = be_get_IncSP_offset(prev); - be_stack_dir_t prev_dir = be_get_IncSP_direction(prev); - unsigned curr_offs = be_get_IncSP_offset(irn); - be_stack_dir_t curr_dir = be_get_IncSP_direction(irn); + int prev_offs = be_get_IncSP_offset(prev); + int curr_offs = be_get_IncSP_offset(irn); - int new_ofs = prev_offs * (prev_dir == be_stack_dir_expand ? -1 : +1) + - curr_offs * (curr_dir == be_stack_dir_expand ? -1 : +1); + be_set_IncSP_offset(prev, prev_offs + curr_offs); - if (new_ofs < 0) { - new_ofs = -new_ofs; - curr_dir = be_stack_dir_expand; - } - else - curr_dir = be_stack_dir_shrink; - be_set_IncSP_offset(prev, 0); - be_set_IncSP_offset(irn, (unsigned)new_ofs); - be_set_IncSP_direction(irn, curr_dir); + /* Omit the optimized IncSP */ + be_set_IncSP_pred(irn, be_get_IncSP_pred(prev)); + + set_irn_n(prev, 0, new_Bad()); + sched_remove(prev); } } +#endif /** * Performs Peephole Optimizations. */ -void ia32_peephole_optimization(ir_node *irn, void *env) { +static void ia32_peephole_optimize_node(ir_node *irn, void *env) { ia32_code_gen_t *cg = env; - if (is_ia32_TestJmp(irn)) - ia32_optimize_TestJmp(irn, cg); - else if (is_ia32_CondJmp(irn)) - ia32_optimize_CondJmp(irn, cg); - else if (be_is_IncSP(irn)) - ia32_optimize_IncSP(irn, cg); -} + /* AMD CPUs want explicit compare before conditional jump */ + if (! ARCH_AMD(cg->opt_arch)) { + if (is_ia32_TestJmp(irn)) + ia32_optimize_TestJmp(irn, cg); + else if (is_ia32_CondJmp(irn)) + ia32_optimize_CondJmp(irn, cg); + } + if (be_is_IncSP(irn)) { + // optimize_IncSP doesn't respect dependency edges yet... + //ia32_optimize_IncSP(irn, cg); + if (cg->opt & IA32_OPT_PUSHARGS) + ia32_create_Pushs(irn, cg); + } +} + +void ia32_peephole_optimization(ir_graph *irg, ia32_code_gen_t *cg) { + irg_walk_graph(irg, ia32_peephole_optimize_node, NULL, cg); +} /****************************************************************** * _ _ __ __ _ @@ -463,6 +424,11 @@ void ia32_peephole_optimization(ir_node *irn, void *env) { * ******************************************************************/ +typedef struct { + ia32_code_gen_t *cg; + heights_t *h; +} ia32_am_opt_env_t; + static int node_is_ia32_comm(const ir_node *irn) { return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0; } @@ -478,48 +444,6 @@ static int ia32_get_irn_n_edges(const ir_node *irn) { return cnt; } -/** - * Returns the first mode_M Proj connected to irn. - */ -static ir_node *get_mem_proj(const ir_node *irn) { - const ir_edge_t *edge; - ir_node *src; - - assert(get_irn_mode(irn) == mode_T && "expected mode_T node"); - - foreach_out_edge(irn, edge) { - src = get_edge_src_irn(edge); - - assert(is_Proj(src) && "Proj expected"); - - if (get_irn_mode(src) == mode_M) - return src; - } - - return NULL; -} - -/** - * Returns the first Proj with mode != mode_M connected to irn. - */ -static ir_node *get_res_proj(const ir_node *irn) { - const ir_edge_t *edge; - ir_node *src; - - assert(get_irn_mode(irn) == mode_T && "expected mode_T node"); - - foreach_out_edge(irn, edge) { - src = get_edge_src_irn(edge); - - assert(is_Proj(src) && "Proj expected"); - - if (get_irn_mode(src) != mode_M) - return src; - } - - return NULL; -} - /** * Determines if pred is a Proj and if is_op_func returns true for it's predecessor. * @@ -528,11 +452,7 @@ static ir_node *get_res_proj(const ir_node *irn) { * @return 1 if conditions are fulfilled, 0 otherwise */ static int pred_is_specific_node(const ir_node *pred, is_op_func_t *is_op_func) { - if (is_Proj(pred) && is_op_func(get_Proj_pred(pred))) { - return 1; - } - - return 0; + return is_op_func(pred); } /** @@ -557,47 +477,144 @@ static int pred_is_specific_nodeblock(const ir_node *bl, const ir_node *pred, return 0; } - - /** - * Checks if irn is a candidate for address calculation or address mode. + * Checks if irn is a candidate for address calculation. We avoid transforming + * adds to leas if they have a load as pred, because then we can use AM mode + * for the add later. * - * address calculation (AC): * - none of the operand must be a Load within the same block OR * - all Loads must have more than one user OR - * - the irn has a frame entity (it's a former FrameAddr) + * + * @param block The block the Loads must/mustnot be in + * @param irn The irn to check + * return 1 if irn is a candidate, 0 otherwise + */ +static int is_addr_candidate(const ir_node *irn) { +#ifndef AGGRESSIVE_AM + const ir_node *block = get_nodes_block(irn); + ir_node *left, *right; + int n; + + left = get_irn_n(irn, 2); + right = get_irn_n(irn, 3); + + if (pred_is_specific_nodeblock(block, left, is_ia32_Ld)) { + n = ia32_get_irn_n_edges(left); + /* load with only one user: don't create LEA */ + if(n == 1) + return 0; + } + + if (pred_is_specific_nodeblock(block, right, is_ia32_Ld)) { + n = ia32_get_irn_n_edges(right); + if(n == 1) + return 0; + } +#endif + + return 1; +} + +/** + * Checks if irn is a candidate for address mode. * * address mode (AM): * - at least one operand has to be a Load within the same block AND * - the load must not have other users than the irn AND * - the irn must not have a frame entity set * - * @param block The block the Loads must/not be in + * @param cg The ia32 code generator + * @param h The height information of the irg + * @param block The block the Loads must/mustnot be in * @param irn The irn to check - * @param check_addr 1 if to check for address calculation, 0 otherwise - * return 1 if irn is a candidate for AC or AM, 0 otherwise + * return 0 if irn is no candidate, 1 if left load can be used, 2 if right one, 3 for both */ -static int is_candidate(const ir_node *block, const ir_node *irn, int check_addr) { - ir_node *in; - int n, is_cand = check_addr; +static ia32_am_cand_t is_am_candidate(ia32_code_gen_t *cg, heights_t *h, const ir_node *block, ir_node *irn) { + ir_node *in, *load, *other, *left, *right; + int is_cand = 0, cand; + int arity; + + if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn) || is_ia32_vfild(irn) || is_ia32_vfist(irn) || + is_ia32_GetST0(irn) || is_ia32_SetST0(irn) || is_ia32_xStoreSimple(irn)) + return 0; + + if(get_ia32_frame_ent(irn) != NULL) + return IA32_AM_CAND_NONE; - in = get_irn_n(irn, 2); + left = get_irn_n(irn, 2); + arity = get_irn_arity(irn); + assert(arity == 5 || arity == 4); + if(arity == 5) { + /* binary op */ + right = get_irn_n(irn, 3); + } else { + /* unary op */ + right = left; + } + + in = left; if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) { +#ifndef AGGRESSIVE_AM + int n; n = ia32_get_irn_n_edges(in); - is_cand = check_addr ? (n == 1 ? 0 : is_cand) : (n == 1 ? 1 : is_cand); + is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */ +#else + is_cand = 1; +#endif + + load = get_Proj_pred(in); + other = right; + + /* 8bit Loads are not supported (for binary ops), + * they cannot be used with every register */ + if (get_irn_arity(irn) != 4 && get_mode_size_bits(get_ia32_ls_mode(load)) < 16) { + assert(get_irn_arity(irn) == 5); + is_cand = 0; + } + + /* If there is a data dependency of other irn from load: cannot use AM */ + if (is_cand && get_nodes_block(other) == block) { + other = skip_Proj(other); + is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand; + /* this could happen in loops */ + is_cand = heights_reachable_in_block(h, load, irn) ? 0 : is_cand; + } } - in = get_irn_n(irn, 3); + cand = is_cand ? IA32_AM_CAND_LEFT : IA32_AM_CAND_NONE; + in = right; + is_cand = 0; if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) { +#ifndef AGGRESSIVE_AM + int n; n = ia32_get_irn_n_edges(in); - is_cand = check_addr ? (n == 1 ? 0 : is_cand) : (n == 1 ? 1 : is_cand); + is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */ +#else + is_cand = 1; +#endif + + load = get_Proj_pred(in); + other = left; + + /* 8bit Loads are not supported, they cannot be used with every register */ + if (get_mode_size_bits(get_ia32_ls_mode(load)) < 16) + is_cand = 0; + + /* If there is a data dependency of other irn from load: cannot use load */ + if (is_cand && get_nodes_block(other) == block) { + other = skip_Proj(other); + is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand; + /* this could happen in loops */ + is_cand = heights_reachable_in_block(h, load, irn) ? 0 : is_cand; + } } - is_cand = get_ia32_frame_ent(irn) ? (check_addr ? 1 : 0) : is_cand; + cand = is_cand ? (cand | IA32_AM_CAND_RIGHT) : cand; - return is_cand; + /* if the irn has a frame entity: we do not use address mode */ + return cand; } /** @@ -607,55 +624,196 @@ static int is_candidate(const ir_node *block, const ir_node *irn, int check_addr static int load_store_addr_is_equal(const ir_node *load, const ir_node *store, const ir_node *addr_b, const ir_node *addr_i) { - int is_equal = (addr_b == get_irn_n(load, 0)) && (addr_i == get_irn_n(load, 1)); - entity *lent = get_ia32_frame_ent(load); - entity *sent = get_ia32_frame_ent(store); - ident *lid = get_ia32_am_sc(load); - ident *sid = get_ia32_am_sc(store); - char *loffs = get_ia32_am_offs(load); - char *soffs = get_ia32_am_offs(store); - - /* are both entities set and equal? */ - if (is_equal && (lent || sent)) - is_equal = lent && sent && (lent == sent); - - /* are address mode idents set and equal? */ - if (is_equal && (lid || sid)) - is_equal = lid && sid && (lid == sid); - - /* are offsets set and equal */ - if (is_equal && (loffs || soffs)) - is_equal = loffs && soffs && strcmp(loffs, soffs) == 0; - - /* are the load and the store of the same mode? */ - is_equal = is_equal ? get_ia32_ls_mode(load) == get_ia32_ls_mode(store) : 0; - - return is_equal; + if(get_irn_n(load, 0) != addr_b) + return 0; + if(get_irn_n(load, 1) != addr_i) + return 0; + + if(get_ia32_frame_ent(load) != get_ia32_frame_ent(store)) + return 0; + + if(get_ia32_am_sc(load) != get_ia32_am_sc(store)) + return 0; + if(is_ia32_am_sc_sign(load) != is_ia32_am_sc_sign(store)) + return 0; + if(get_ia32_am_offs_int(load) != get_ia32_am_offs_int(store)) + return 0; + if(get_ia32_ls_mode(load) != get_ia32_ls_mode(store)) + return 0; + + return 1; } +typedef enum _ia32_take_lea_attr { + IA32_LEA_ATTR_NONE = 0, + IA32_LEA_ATTR_BASE = (1 << 0), + IA32_LEA_ATTR_INDEX = (1 << 1), + IA32_LEA_ATTR_OFFS = (1 << 2), + IA32_LEA_ATTR_SCALE = (1 << 3), + IA32_LEA_ATTR_AMSC = (1 << 4), + IA32_LEA_ATTR_FENT = (1 << 5) +} ia32_take_lea_attr; + +/** + * Decides if we have to keep the LEA operand or if we can assimilate it. + */ +static int do_new_lea(ir_node *irn, ir_node *base, ir_node *index, ir_node *lea, + int have_am_sc, ia32_code_gen_t *cg) +{ + ir_entity *irn_ent = get_ia32_frame_ent(irn); + ir_entity *lea_ent = get_ia32_frame_ent(lea); + int ret_val = 0; + int is_noreg_base = be_is_NoReg(cg, base); + int is_noreg_index = be_is_NoReg(cg, index); + ia32_am_flavour_t am_flav = get_ia32_am_flavour(lea); + + /* If the Add and the LEA both have a different frame entity set: keep */ + if (irn_ent && lea_ent && (irn_ent != lea_ent)) + return IA32_LEA_ATTR_NONE; + else if (! irn_ent && lea_ent) + ret_val |= IA32_LEA_ATTR_FENT; + + /* If the Add and the LEA both have already an address mode symconst: keep */ + if (have_am_sc && get_ia32_am_sc(lea)) + return IA32_LEA_ATTR_NONE; + else if (get_ia32_am_sc(lea)) + ret_val |= IA32_LEA_ATTR_AMSC; + + /* Check the different base-index combinations */ + + if (! is_noreg_base && ! is_noreg_index) { + /* Assimilate if base is the lea and the LEA is just a Base + Offset calculation */ + if ((base == lea) && ! (am_flav & ia32_I ? 1 : 0)) { + if (am_flav & ia32_O) + ret_val |= IA32_LEA_ATTR_OFFS; + + ret_val |= IA32_LEA_ATTR_BASE; + } + else + return IA32_LEA_ATTR_NONE; + } + else if (! is_noreg_base && is_noreg_index) { + /* Base is set but index not */ + if (base == lea) { + /* Base points to LEA: assimilate everything */ + if (am_flav & ia32_O) + ret_val |= IA32_LEA_ATTR_OFFS; + if (am_flav & ia32_S) + ret_val |= IA32_LEA_ATTR_SCALE; + if (am_flav & ia32_I) + ret_val |= IA32_LEA_ATTR_INDEX; + + ret_val |= IA32_LEA_ATTR_BASE; + } + else if (am_flav & ia32_B ? 0 : 1) { + /* Base is not the LEA but the LEA is an index only calculation: assimilate */ + if (am_flav & ia32_O) + ret_val |= IA32_LEA_ATTR_OFFS; + if (am_flav & ia32_S) + ret_val |= IA32_LEA_ATTR_SCALE; + + ret_val |= IA32_LEA_ATTR_INDEX; + } + else + return IA32_LEA_ATTR_NONE; + } + else if (is_noreg_base && ! is_noreg_index) { + /* Index is set but not base */ + if (index == lea) { + /* Index points to LEA: assimilate everything */ + if (am_flav & ia32_O) + ret_val |= IA32_LEA_ATTR_OFFS; + if (am_flav & ia32_S) + ret_val |= IA32_LEA_ATTR_SCALE; + if (am_flav & ia32_B) + ret_val |= IA32_LEA_ATTR_BASE; + + ret_val |= IA32_LEA_ATTR_INDEX; + } + else if (am_flav & ia32_I ? 0 : 1) { + /* Index is not the LEA but the LEA is a base only calculation: assimilate */ + if (am_flav & ia32_O) + ret_val |= IA32_LEA_ATTR_OFFS; + if (am_flav & ia32_S) + ret_val |= IA32_LEA_ATTR_SCALE; + + ret_val |= IA32_LEA_ATTR_BASE; + } + else + return IA32_LEA_ATTR_NONE; + } + else { + assert(0 && "There must have been set base or index"); + } + + return ret_val; +} +/** + * Adds res before irn into schedule if irn was scheduled. + * @param irn The schedule point + * @param res The node to be scheduled + */ +static INLINE void try_add_to_sched(ir_node *irn, ir_node *res) { + if (sched_is_scheduled(irn)) + sched_add_before(irn, res); +} + +/** + * Removes node from schedule if it is not used anymore. If irn is a mode_T node + * all it's Projs are removed as well. + * @param irn The irn to be removed from schedule + */ +static INLINE void try_remove_from_sched(ir_node *node) { + int i, arity; + + if(get_irn_mode(node) == mode_T) { + const ir_edge_t *edge, *next; + foreach_out_edge_safe(node, edge, next) { + ir_node *proj = get_edge_src_irn(edge); + try_remove_from_sched(proj); + } + } + + if(get_irn_n_edges(node) != 0) + return; + + if (sched_is_scheduled(node)) { + sched_remove(node); + } + + arity = get_irn_arity(node); + for(i = 0; i < arity; ++i) { + set_irn_n(node, i, new_Bad()); + } +} /** * Folds Add or Sub to LEA if possible */ -static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) { +static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn) { ir_graph *irg = get_irn_irg(irn); - dbg_info *dbg = get_irn_dbg_info(irn); + dbg_info *dbg_info = get_irn_dbg_info(irn); ir_node *block = get_nodes_block(irn); ir_node *res = irn; - char *offs = NULL; - const char *offs_cnst = NULL; - char *offs_lea = NULL; + ir_node *shift = NULL; + ir_node *lea_o = NULL; + ir_node *lea = NULL; + long offs = 0; + long offs_cnst = 0; + long offs_lea = 0; int scale = 0; int isadd = 0; int dolea = 0; int have_am_sc = 0; int am_sc_sign = 0; - ident *am_sc = NULL; + ir_entity *am_sc = NULL; + ir_entity *lea_ent = NULL; + ir_node *noreg = ia32_new_NoReg_gp(cg); ir_node *left, *right, *temp; ir_node *base, *index; + int consumed_left_shift; ia32_am_flavour_t am_flav; - DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;) if (is_ia32_Add(irn)) isadd = 1; @@ -695,71 +853,78 @@ static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) { base = left; index = noreg; - offs = NULL; + offs = 0; scale = 0; am_flav = 0; /* check for operation with immediate */ if (is_ia32_ImmConst(irn)) { - DBG((mod, LEVEL_1, "\tfound op with imm const")); + tarval *tv = get_ia32_Immop_tarval(irn); + + DBG((dbg, LEVEL_1, "\tfound op with imm const")); - offs_cnst = get_ia32_cnst(irn); + offs_cnst = get_tarval_long(tv); dolea = 1; } - else if (is_ia32_ImmSymConst(irn)) { - DBG((mod, LEVEL_1, "\tfound op with imm symconst")); + else if (isadd && is_ia32_ImmSymConst(irn)) { + DBG((dbg, LEVEL_1, "\tfound op with imm symconst")); have_am_sc = 1; dolea = 1; - am_sc = get_ia32_id_cnst(irn); + am_sc = get_ia32_Immop_symconst(irn); am_sc_sign = is_ia32_am_sc_sign(irn); } /* determine the operand which needs to be checked */ - if (be_is_NoReg(cg, right)) { - temp = left; - } - else { - temp = right; - } + temp = be_is_NoReg(cg, right) ? left : right; /* check if right operand is AMConst (LEA with ia32_am_O) */ /* but we can only eat it up if there is no other symconst */ /* because the linker won't accept two symconsts */ if (! have_am_sc && is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) { - DBG((mod, LEVEL_1, "\tgot op with LEA am_O")); + DBG((dbg, LEVEL_1, "\tgot op with LEA am_O")); - offs_lea = get_ia32_am_offs(temp); + offs_lea = get_ia32_am_offs_int(temp); am_sc = get_ia32_am_sc(temp); am_sc_sign = is_ia32_am_sc_sign(temp); have_am_sc = 1; dolea = 1; + lea_o = temp; + + if (temp == base) + base = noreg; + else if (temp == right) + right = noreg; } if (isadd) { /* default for add -> make right operand to index */ - index = right; - dolea = 1; + index = right; + dolea = 1; + consumed_left_shift = -1; - DBG((mod, LEVEL_1, "\tgot LEA candidate with index %+F\n", index)); + DBG((dbg, LEVEL_1, "\tgot LEA candidate with index %+F\n", index)); /* determine the operand which needs to be checked */ temp = left; if (is_ia32_Lea(left)) { temp = right; + consumed_left_shift = 0; } /* check for SHL 1,2,3 */ if (pred_is_specific_node(temp, is_ia32_Shl)) { - temp = get_Proj_pred(temp); - if (get_ia32_Immop_tarval(temp)) { - scale = get_tarval_long(get_ia32_Immop_tarval(temp)); + if (is_ia32_ImmConst(temp)) { + long shiftval = get_tarval_long(get_ia32_Immop_tarval(temp)); - if (scale <= 3) { - index = get_irn_n(temp, 2); + if (shiftval <= 3) { + index = get_irn_n(temp, 2); + consumed_left_shift = consumed_left_shift < 0 ? 1 : 0; + shift = temp; + scale = shiftval; - DBG((mod, LEVEL_1, "\tgot scaled index %+F\n", index)); + DBG((dbg, LEVEL_1, "\tgot scaled index %+F\n", index)); } } } @@ -770,71 +935,66 @@ static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) { if (left == right) { base = noreg; } - else if (! is_ia32_Lea(left) && (index != right)) { - /* index != right -> we found a good Shl */ - /* left != LEA -> this Shl was the left operand */ - /* -> base is right operand */ - base = right; + else if (consumed_left_shift == 1) { + /* -> base is right operand */ + base = (right == lea_o) ? noreg : right; } } } /* Try to assimilate a LEA as left operand */ if (is_ia32_Lea(left) && (get_ia32_am_flavour(left) != ia32_am_O)) { - am_flav = get_ia32_am_flavour(left); - - /* If we have an Add with a real right operand (not NoReg) and */ - /* the LEA contains already an index calculation then we create */ - /* a new LEA. */ - /* If the LEA contains already a frame_entity then we also */ - /* create a new one otherwise we would loose it. */ - if ((isadd && !be_is_NoReg(cg, index) && (am_flav & ia32_am_I)) || /* no new LEA if index already set */ - get_ia32_frame_ent(left) || /* no new LEA if stack access */ - (have_am_sc && get_ia32_am_sc(left))) /* no new LEA if AM symconst already present */ - { - DBG((mod, LEVEL_1, "\tleave old LEA, creating new one\n")); + /* check if we can assimilate the LEA */ + int take_attr = do_new_lea(irn, base, index, left, have_am_sc, cg); + + if (take_attr == IA32_LEA_ATTR_NONE) { + DBG((dbg, LEVEL_1, "\tleave old LEA, creating new one\n")); } else { - DBG((mod, LEVEL_1, "\tgot LEA as left operand ... assimilating\n")); - offs = get_ia32_am_offs(left); - am_sc = have_am_sc ? am_sc : get_ia32_am_sc(left); - have_am_sc = am_sc ? 1 : 0; - am_sc_sign = is_ia32_am_sc_sign(left); - base = get_irn_n(left, 0); - index = get_irn_n(left, 1); - scale = get_ia32_am_scale(left); + DBG((dbg, LEVEL_1, "\tgot LEA as left operand ... assimilating\n")); + lea = left; /* for statistics */ + + if (take_attr & IA32_LEA_ATTR_OFFS) + offs = get_ia32_am_offs_int(left); + + if (take_attr & IA32_LEA_ATTR_AMSC) { + am_sc = get_ia32_am_sc(left); + have_am_sc = 1; + am_sc_sign = is_ia32_am_sc_sign(left); + } + + if (take_attr & IA32_LEA_ATTR_SCALE) + scale = get_ia32_am_scale(left); + + if (take_attr & IA32_LEA_ATTR_BASE) + base = get_irn_n(left, 0); + + if (take_attr & IA32_LEA_ATTR_INDEX) + index = get_irn_n(left, 1); + + if (take_attr & IA32_LEA_ATTR_FENT) + lea_ent = get_ia32_frame_ent(left); } } /* ok, we can create a new LEA */ if (dolea) { - res = new_rd_ia32_Lea(dbg, irg, block, base, index, mode_Is); + res = new_rd_ia32_Lea(dbg_info, irg, block, base, index); /* add the old offset of a previous LEA */ - if (offs) { - add_ia32_am_offs(res, offs); - } + add_ia32_am_offs_int(res, offs); /* add the new offset */ if (isadd) { - if (offs_cnst) { - add_ia32_am_offs(res, offs_cnst); - } - if (offs_lea) { - add_ia32_am_offs(res, offs_lea); - } - } - else { + add_ia32_am_offs_int(res, offs_cnst); + add_ia32_am_offs_int(res, offs_lea); + } else { /* either lea_O-cnst, -cnst or -lea_O */ - if (offs_cnst) { - if (offs_lea) { - add_ia32_am_offs(res, offs_lea); - } - - sub_ia32_am_offs(res, offs_cnst); - } - else { - sub_ia32_am_offs(res, offs_lea); + if (offs_cnst != 0) { + add_ia32_am_offs_int(res, offs_lea); + add_ia32_am_offs_int(res, -offs_cnst); + } else { + add_ia32_am_offs_int(res, offs_lea); } } @@ -847,17 +1007,21 @@ static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) { /* copy the frame entity (could be set in case of Add */ /* which was a FrameAddr) */ - set_ia32_frame_ent(res, get_ia32_frame_ent(irn)); - - if (is_ia32_use_frame(irn)) + if (lea_ent != NULL) { + set_ia32_frame_ent(res, lea_ent); set_ia32_use_frame(res); + } else { + set_ia32_frame_ent(res, get_ia32_frame_ent(irn)); + if(is_ia32_use_frame(irn)) + set_ia32_use_frame(res); + } /* set scale */ set_ia32_am_scale(res, scale); am_flav = ia32_am_N; /* determine new am flavour */ - if (offs || offs_cnst || offs_lea) { + if (offs || offs_cnst || offs_lea || have_am_sc) { am_flav |= ia32_O; } if (! be_is_NoReg(cg, base)) { @@ -873,47 +1037,124 @@ static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) { set_ia32_op_type(res, ia32_AddrModeS); - DBG((mod, LEVEL_1, "\tLEA [%+F + %+F * %d + %s]\n", base, index, scale, get_ia32_am_offs(res))); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn)); - /* get the result Proj of the Add/Sub */ - irn = get_res_proj(irn); + DBG((dbg, LEVEL_1, "\tLEA [%+F + %+F * %d + %d]\n", base, index, scale, get_ia32_am_offs_int(res))); assert(irn && "Couldn't find result proj"); + /* get the result Proj of the Add/Sub */ + try_add_to_sched(irn, res); + /* exchange the old op with the new LEA */ + try_remove_from_sched(irn); exchange(irn, res); + + /* we will exchange it, report here before the Proj is created */ + if (shift && lea && lea_o) { + try_remove_from_sched(shift); + try_remove_from_sched(lea); + try_remove_from_sched(lea_o); + DBG_OPT_LEA4(irn, lea_o, lea, shift, res); + } else if (shift && lea) { + try_remove_from_sched(shift); + try_remove_from_sched(lea); + DBG_OPT_LEA3(irn, lea, shift, res); + } else if (shift && lea_o) { + try_remove_from_sched(shift); + try_remove_from_sched(lea_o); + DBG_OPT_LEA3(irn, lea_o, shift, res); + } else if (lea && lea_o) { + try_remove_from_sched(lea); + try_remove_from_sched(lea_o); + DBG_OPT_LEA3(irn, lea_o, lea, res); + } else if (shift) { + try_remove_from_sched(shift); + DBG_OPT_LEA2(irn, shift, res); + } else if (lea) { + try_remove_from_sched(lea); + DBG_OPT_LEA2(irn, lea, res); + } else if (lea_o) { + try_remove_from_sched(lea_o); + DBG_OPT_LEA2(irn, lea_o, res); + } else { + DBG_OPT_LEA1(irn, res); + } } return res; } + /** - * Optimizes a pattern around irn to address mode if possible. + * Merges a Load/Store node with a LEA. + * @param irn The Load/Store node + * @param lea The LEA */ -void ia32_optimize_am(ir_node *irn, void *env) { - ia32_code_gen_t *cg = env; - ir_node *res = irn; - dbg_info *dbg; - ir_mode *mode; - ir_node *block, *noreg_gp, *noreg_fp; - ir_node *left, *right, *temp; - ir_node *store, *load, *mem_proj; - ir_node *succ, *addr_b, *addr_i; - int check_am_src = 0; - DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;) +static void merge_loadstore_lea(ir_node *irn, ir_node *lea) { + ir_entity *irn_ent = get_ia32_frame_ent(irn); + ir_entity *lea_ent = get_ia32_frame_ent(lea); - if (! is_ia32_irn(irn)) + /* If the irn and the LEA both have a different frame entity set: do not merge */ + if (irn_ent != NULL && lea_ent != NULL && (irn_ent != lea_ent)) return; + else if (irn_ent == NULL && lea_ent != NULL) { + set_ia32_frame_ent(irn, lea_ent); + set_ia32_use_frame(irn); + } + + /* get the AM attributes from the LEA */ + add_ia32_am_offs_int(irn, get_ia32_am_offs_int(lea)); + set_ia32_am_scale(irn, get_ia32_am_scale(lea)); + set_ia32_am_flavour(irn, get_ia32_am_flavour(lea)); + + set_ia32_am_sc(irn, get_ia32_am_sc(lea)); + if (is_ia32_am_sc_sign(lea)) + set_ia32_am_sc_sign(irn); + + set_ia32_op_type(irn, is_ia32_Ld(irn) ? ia32_AddrModeS : ia32_AddrModeD); + + /* set base and index */ + set_irn_n(irn, 0, get_irn_n(lea, 0)); + set_irn_n(irn, 1, get_irn_n(lea, 1)); + + try_remove_from_sched(lea); + + /* clear remat flag */ + set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable); + + if (is_ia32_Ld(irn)) + DBG_OPT_LOAD_LEA(lea, irn); + else + DBG_OPT_STORE_LEA(lea, irn); - dbg = get_irn_dbg_info(irn); - mode = get_irn_mode(irn); - block = get_nodes_block(irn); - noreg_gp = ia32_new_NoReg_gp(cg); - noreg_fp = ia32_new_NoReg_fp(cg); +} - DBG((mod, LEVEL_1, "checking for AM\n")); +/** + * Sets new_right index of irn to right and new_left index to left. + * Also exchange left and right + */ +static void exchange_left_right(ir_node *irn, ir_node **left, ir_node **right, int new_left, int new_right) { + ir_node *temp; - /* 1st part: check for address calculations and transform the into Lea */ + set_irn_n(irn, new_right, *right); + set_irn_n(irn, new_left, *left); + + temp = *left; + *left = *right; + *right = temp; + + /* this is only needed for Compares, but currently ALL nodes + * have this attribute :-) */ + set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn))); +} + +/** + * Performs address calculation optimization (create LEAs if possible) + */ +static void optimize_lea(ia32_code_gen_t *cg, ir_node *irn) { + if (! is_ia32_irn(irn)) + return; /* Following cases can occur: */ /* - Sub (l, imm) -> LEA [base - offset] */ @@ -923,27 +1164,132 @@ void ia32_optimize_am(ir_node *irn, void *env) { /* - Add (l == LEA with ia32_am_O, r) -> LEA [base + offset] */ /* - Add (l, r) -> LEA [base + index * scale] */ /* with scale > 1 iff l/r == shl (1,2,3) */ - if (is_ia32_Sub(irn) || is_ia32_Add(irn)) { - left = get_irn_n(irn, 2); - right = get_irn_n(irn, 3); + ir_node *res; + + if(!is_addr_candidate(irn)) + return; + + DBG((dbg, LEVEL_1, "\tfound address calculation candidate %+F ... ", irn)); + res = fold_addr(cg, irn); - /* Do not try to create a LEA if one of the operands is a Load. */ - /* check is irn is a candidate for address calculation */ - if (is_candidate(block, irn, 1)) { - DBG((mod, LEVEL_1, "\tfound address calculation candidate %+F ... ", irn)); - res = fold_addr(cg, irn, noreg_gp); + if (res != irn) + DB((dbg, LEVEL_1, "transformed into %+F\n", res)); + else + DB((dbg, LEVEL_1, "not transformed\n")); + } else if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn)) { + /* - Load -> LEA into Load } TODO: If the LEA is used by more than one Load/Store */ + /* - Store -> LEA into Store } it might be better to keep the LEA */ + ir_node *left = get_irn_n(irn, 0); - if (res == irn) - DB((mod, LEVEL_1, "transformed into %+F\n", res)); - else - DB((mod, LEVEL_1, "not transformed\n")); + if (is_ia32_Lea(left)) { + const ir_edge_t *edge, *ne; + ir_node *src; + + /* merge all Loads/Stores connected to this LEA with the LEA */ + foreach_out_edge_safe(left, edge, ne) { + src = get_edge_src_irn(edge); + + if (src && (get_edge_src_pos(edge) == 0) && (is_ia32_Ld(src) || is_ia32_St(src) || is_ia32_Store8Bit(src))) { + DBG((dbg, LEVEL_1, "\nmerging %+F into %+F\n", left, irn)); + if (! is_ia32_got_lea(src)) + merge_loadstore_lea(src, left); + set_ia32_got_lea(src); + } + } } } +} + +static void optimize_conv_store(ia32_code_gen_t *cg, ir_node *node) +{ + ir_node *pred; + + if(!is_ia32_Store(node) && !is_ia32_Store8Bit(node)) + return; - /* 2nd part: fold following patterns: */ - /* - Load -> LEA into Load } TODO: If the LEA is used by more than one Load/Store */ - /* - Store -> LEA into Store } it might be better to keep the LEA */ + pred = get_irn_n(node, 2); + if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred)) + return; + + if(get_ia32_ls_mode(pred) != get_ia32_ls_mode(node)) + return; + + /* unnecessary conv, the store already does the conversion */ + set_irn_n(node, 2, get_irn_n(pred, 2)); + if(get_irn_n_edges(pred) == 0) { + be_kill_node(pred); + } +} + +static void optimize_load_conv(ia32_code_gen_t *cg, ir_node *node) +{ + ir_node *pred, *predpred; + + if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node)) + return; + + pred = get_irn_n(node, 2); + if(!is_Proj(pred)) + return; + + predpred = get_Proj_pred(pred); + if(!is_ia32_Load(predpred)) + return; + + /* unnecessary conv, the load already did the conversion */ + exchange(node, pred); +} + +static void optimize_node(ir_node *node, void *env) +{ + ia32_code_gen_t *cg = env; + + optimize_load_conv(cg, node); + optimize_conv_store(cg, node); + optimize_lea(cg, node); +} + +/** + * Checks for address mode patterns and performs the + * necessary transformations. + * This function is called by a walker. + */ +static void optimize_am(ir_node *irn, void *env) { + ia32_am_opt_env_t *am_opt_env = env; + ia32_code_gen_t *cg = am_opt_env->cg; + ir_graph *irg = get_irn_irg(irn); + heights_t *h = am_opt_env->h; + ir_node *block, *left, *right; + ir_node *store, *load, *mem_proj; + ir_node *addr_b, *addr_i; + int need_exchange_on_fail = 0; + ia32_am_type_t am_support; + ia32_am_cand_t cand; + ia32_am_cand_t orig_cand; + int dest_possible; + int source_possible; + + static const arch_register_req_t dest_out_reg_req_0 = { + arch_register_req_type_none, + NULL, /* regclass */ + NULL, /* limit bitset */ + -1, /* same pos */ + -1 /* different pos */ + }; + static const arch_register_req_t *dest_am_out_reqs[] = { + &dest_out_reg_req_0 + }; + + if (!is_ia32_irn(irn) || is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn)) + return; + if (is_ia32_Lea(irn)) + return; + + am_support = get_ia32_am_support(irn); + block = get_nodes_block(irn); + + /* fold following patterns: */ /* - op -> Load into AMop with am_Source */ /* conditions: */ /* - op is am_Source capable AND */ @@ -956,253 +1302,299 @@ void ia32_optimize_am(ir_node *irn, void *env) { /* - the Load is only used by this op AND */ /* - the Load and Store are in the same block AND */ /* - nobody else uses the result of the op */ + if (get_ia32_am_support(irn) == ia32_am_None) + return; - if ((res == irn) && (get_ia32_am_support(irn) != ia32_am_None) && !is_ia32_Lea(irn)) { - /* 1st: check for Load/Store -> LEA */ - if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn)) { - left = get_irn_n(irn, 0); + cand = is_am_candidate(cg, h, block, irn); + if (cand == IA32_AM_CAND_NONE) + return; - if (is_ia32_Lea(left)) { - DBG((mod, LEVEL_1, "\nmerging %+F into %+F\n", left, irn)); + orig_cand = cand; + DBG((dbg, LEVEL_1, "\tfound address mode candidate %+F (candleft %d candright %d)... \n", irn, + cand & IA32_AM_CAND_LEFT, cand & IA32_AM_CAND_RIGHT)); - /* get the AM attributes from the LEA */ - add_ia32_am_offs(irn, get_ia32_am_offs(left)); - set_ia32_am_scale(irn, get_ia32_am_scale(left)); - set_ia32_am_flavour(irn, get_ia32_am_flavour(left)); + left = get_irn_n(irn, 2); + if (get_irn_arity(irn) == 4) { + /* it's an "unary" operation */ + right = left; + assert(cand == IA32_AM_CAND_BOTH); + } else { + right = get_irn_n(irn, 3); + } + + dest_possible = am_support & ia32_am_Dest ? 1 : 0; + source_possible = am_support & ia32_am_Source ? 1 : 0; - set_ia32_am_sc(irn, get_ia32_am_sc(left)); - if (is_ia32_am_sc_sign(left)) - set_ia32_am_sc_sign(irn); + DBG((dbg, LEVEL_2, "\tdest_possible %d source_possible %d ... \n", dest_possible, source_possible)); - set_ia32_op_type(irn, is_ia32_Ld(irn) ? ia32_AddrModeS : ia32_AddrModeD); + if (dest_possible) { + addr_b = NULL; + addr_i = NULL; + store = NULL; - /* set base and index */ - set_irn_n(irn, 0, get_irn_n(left, 0)); - set_irn_n(irn, 1, get_irn_n(left, 1)); + /* we should only have 1 user which is a store */ + if (ia32_get_irn_n_edges(irn) == 1) { + ir_node *succ = get_edge_src_irn(get_irn_out_edge_first(irn)); - /* clear remat flag */ - set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable); + if (is_ia32_xStore(succ) || is_ia32_Store(succ)) { + store = succ; + addr_b = get_irn_n(store, 0); + addr_i = get_irn_n(store, 1); } } - /* check if the node is an address mode candidate */ - else if (is_candidate(block, irn, 0)) { - DBG((mod, LEVEL_1, "\tfound address mode candidate %+F ... ", irn)); - - left = get_irn_n(irn, 2); - if (get_irn_arity(irn) == 4) { - /* it's an "unary" operation */ - right = left; - } - else { - right = get_irn_n(irn, 3); + + if (store == NULL) { + DBG((dbg, LEVEL_2, "\tno store found, not using dest_mode\n")); + dest_possible = 0; + } + } + + if (dest_possible) { + /* normalize nodes, we need the interesting load on the left side */ + if (cand & IA32_AM_CAND_RIGHT) { + load = get_Proj_pred(right); + if (load_store_addr_is_equal(load, store, addr_b, addr_i) + && node_is_ia32_comm(irn)) { + DBG((dbg, LEVEL_2, "\texchanging left/right\n")); + exchange_left_right(irn, &left, &right, 3, 2); + need_exchange_on_fail ^= 1; + if (cand == IA32_AM_CAND_RIGHT) + cand = IA32_AM_CAND_LEFT; } + } + } - /* normalize commutative ops */ - if (node_is_ia32_comm(irn)) { - /* Assure that right operand is always a Load if there is one */ - /* because non-commutative ops can only use Dest AM if the right */ - /* operand is a load, so we only need to check right operand. */ - if (pred_is_specific_nodeblock(block, left, is_ia32_Ld)) - { - set_irn_n(irn, 2, right); - set_irn_n(irn, 3, left); - - temp = left; - left = right; - right = temp; - } + if (dest_possible) { + if(cand & IA32_AM_CAND_LEFT && is_Proj(left)) { + load = get_Proj_pred(left); + +#ifndef AGGRESSIVE_AM + /* we have to be the only user of the load */ + if (get_irn_n_edges(left) > 1) { + DBG((dbg, LEVEL_2, "\tmatching load has too may users, not using dest_mode\n")); + dest_possible = 0; } +#endif + } else { + DBG((dbg, LEVEL_2, "\tno matching load found, not using dest_mode")); + dest_possible = 0; + } + } - /* check for Store -> op -> Load */ - - /* Store -> op -> Load optimization is only possible if supported by op */ - /* and if right operand is a Load */ - if ((get_ia32_am_support(irn) & ia32_am_Dest) && - pred_is_specific_nodeblock(block, right, is_ia32_Ld)) - { - - /* An address mode capable op always has a result Proj. */ - /* If this Proj is used by more than one other node, we don't need to */ - /* check further, otherwise we check for Store and remember the address, */ - /* the Store points to. */ - - succ = get_res_proj(irn); - assert(succ && "Couldn't find result proj"); - - addr_b = NULL; - addr_i = NULL; - store = NULL; - - /* now check for users and Store */ - if (ia32_get_irn_n_edges(succ) == 1) { - succ = get_edge_src_irn(get_irn_out_edge_first(succ)); - - if (is_ia32_fStore(succ) || is_ia32_Store(succ)) { - store = succ; - addr_b = get_irn_n(store, 0); - - /* Could be that the Store is connected to the address */ - /* calculating LEA while the Load is already transformed. */ - if (is_ia32_Lea(addr_b)) { - succ = addr_b; - addr_b = get_irn_n(succ, 0); - addr_i = get_irn_n(succ, 1); - } - else { - addr_i = noreg_gp; - } - } - } + if (dest_possible) { + /* the store has to use the loads memory or the same memory + * as the load */ + ir_node *loadmem = get_irn_n(load, 2); + ir_node *storemem = get_irn_n(store, 3); + assert(get_irn_mode(loadmem) == mode_M); + assert(get_irn_mode(storemem) == mode_M); + /* TODO there could be a sync between store and load... */ + if(storemem != loadmem && (!is_Proj(storemem) || get_Proj_pred(storemem) != load)) { + DBG((dbg, LEVEL_2, "\tload/store using different memories, not using dest_mode")); + dest_possible = 0; + } + } - if (store) { - /* we found a Store as single user: Now check for Load */ - - /* Extra check for commutative ops with two Loads */ - /* -> put the interesting Load right */ - if (node_is_ia32_comm(irn) && - pred_is_specific_nodeblock(block, left, is_ia32_Ld)) - { - if ((addr_b == get_irn_n(get_Proj_pred(left), 0)) && - (addr_i == get_irn_n(get_Proj_pred(left), 1))) - { - /* We exchange left and right, so it's easier to kill */ - /* the correct Load later and to handle unary operations. */ - set_irn_n(irn, 2, right); - set_irn_n(irn, 3, left); - - temp = left; - left = right; - right = temp; - } - } - - /* skip the Proj for easier access */ - load = get_Proj_pred(right); - - /* Compare Load and Store address */ - if (load_store_addr_is_equal(load, store, addr_b, addr_i)) { - /* Right Load is from same address, so we can */ - /* disconnect the Load and Store here */ - - /* set new base, index and attributes */ - set_irn_n(irn, 0, addr_b); - set_irn_n(irn, 1, addr_i); - add_ia32_am_offs(irn, get_ia32_am_offs(load)); - set_ia32_am_scale(irn, get_ia32_am_scale(load)); - set_ia32_am_flavour(irn, get_ia32_am_flavour(load)); - set_ia32_op_type(irn, ia32_AddrModeD); - set_ia32_frame_ent(irn, get_ia32_frame_ent(load)); - set_ia32_ls_mode(irn, get_ia32_ls_mode(load)); - - set_ia32_am_sc(irn, get_ia32_am_sc(load)); - if (is_ia32_am_sc_sign(load)) - set_ia32_am_sc_sign(irn); - - if (is_ia32_use_frame(load)) - set_ia32_use_frame(irn); - - /* connect to Load memory and disconnect Load */ - if (get_irn_arity(irn) == 5) { - /* binary AMop */ - set_irn_n(irn, 4, get_irn_n(load, 2)); - set_irn_n(irn, 3, noreg_gp); - } - else { - /* unary AMop */ - set_irn_n(irn, 3, get_irn_n(load, 2)); - set_irn_n(irn, 2, noreg_gp); - } - - /* connect the memory Proj of the Store to the op */ - mem_proj = get_mem_proj(store); - set_Proj_pred(mem_proj, irn); - set_Proj_proj(mem_proj, 1); - - /* clear remat flag */ - set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable); - - DB((mod, LEVEL_1, "merged with %+F and %+F into dest AM\n", load, store)); - } - } /* if (store) */ - else if (get_ia32_am_support(irn) & ia32_am_Source) { - /* There was no store, check if we still can optimize for source address mode */ - check_am_src = 1; - } - } /* if (support AM Dest) */ - else if (get_ia32_am_support(irn) & ia32_am_Source) { - /* op doesn't support am AM Dest -> check for AM Source */ - check_am_src = 1; + if (dest_possible) { + /* Compare Load and Store address */ + if (!load_store_addr_is_equal(load, store, addr_b, addr_i)) { + DBG((dbg, LEVEL_2, "\taddresses not equal, not using dest_mode")); + dest_possible = 0; + } + } + + if (dest_possible) { + /* all conditions fullfilled, do the transformation */ + assert(cand & IA32_AM_CAND_LEFT); + + /* set new base, index and attributes */ + set_irn_n(irn, 0, addr_b); + set_irn_n(irn, 1, addr_i); + add_ia32_am_offs_int(irn, get_ia32_am_offs_int(load)); + set_ia32_am_scale(irn, get_ia32_am_scale(load)); + set_ia32_am_flavour(irn, get_ia32_am_flavour(load)); + set_ia32_op_type(irn, ia32_AddrModeD); + set_ia32_frame_ent(irn, get_ia32_frame_ent(load)); + set_ia32_ls_mode(irn, get_ia32_ls_mode(load)); + + set_ia32_am_sc(irn, get_ia32_am_sc(load)); + if (is_ia32_am_sc_sign(load)) + set_ia32_am_sc_sign(irn); + + /* connect to Load memory and disconnect Load */ + if (get_irn_arity(irn) == 5) { + /* binary AMop */ + set_irn_n(irn, 4, get_irn_n(load, 2)); + set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2)); + } else { + /* unary AMop */ + set_irn_n(irn, 3, get_irn_n(load, 2)); + set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2)); + } + + /* change node mode and out register requirements */ + set_irn_mode(irn, mode_M); + set_ia32_out_req_all(irn, dest_am_out_reqs); + + /* connect the memory Proj of the Store to the op */ + edges_reroute(store, irn, irg); + + /* clear remat flag */ + set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable); + + try_remove_from_sched(store); + try_remove_from_sched(load); + DBG_OPT_AM_D(load, store, irn); + + DB((dbg, LEVEL_1, "merged with %+F and %+F into dest AM\n", load, store)); + need_exchange_on_fail = 0; + source_possible = 0; + } + + if (source_possible) { + /* normalize ops, we need the load on the right */ + if(cand == IA32_AM_CAND_LEFT) { + if(node_is_ia32_comm(irn)) { + exchange_left_right(irn, &left, &right, 3, 2); + need_exchange_on_fail ^= 1; + cand = IA32_AM_CAND_RIGHT; + } else { + source_possible = 0; } + } + } - /* normalize commutative ops */ - if (node_is_ia32_comm(irn)) { - /* Assure that left operand is always a Load if there is one */ - /* because non-commutative ops can only use Source AM if the */ - /* left operand is a Load, so we only need to check the left */ - /* operand afterwards. */ - if (pred_is_specific_nodeblock(block, right, is_ia32_Ld)) { - set_irn_n(irn, 2, right); - set_irn_n(irn, 3, left); - - temp = left; - left = right; - right = temp; - } + if (source_possible) { + /* all conditions fullfilled, do transform */ + assert(cand & IA32_AM_CAND_RIGHT); + load = get_Proj_pred(right); + + if(get_irn_n_edges(load) > 1) { + source_possible = 0; + } + } + + if (source_possible) { + addr_b = get_irn_n(load, 0); + addr_i = get_irn_n(load, 1); + + /* set new base, index and attributes */ + set_irn_n(irn, 0, addr_b); + set_irn_n(irn, 1, addr_i); + add_ia32_am_offs_int(irn, get_ia32_am_offs_int(load)); + set_ia32_am_scale(irn, get_ia32_am_scale(load)); + set_ia32_am_flavour(irn, get_ia32_am_flavour(load)); + set_ia32_op_type(irn, ia32_AddrModeS); + set_ia32_frame_ent(irn, get_ia32_frame_ent(load)); + set_ia32_ls_mode(irn, get_ia32_ls_mode(load)); + + set_ia32_am_sc(irn, get_ia32_am_sc(load)); + if (is_ia32_am_sc_sign(load)) + set_ia32_am_sc_sign(irn); + + /* clear remat flag */ + set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable); + + if (is_ia32_use_frame(load)) { + if(get_ia32_frame_ent(load) == NULL) { + set_ia32_need_stackent(irn); } + set_ia32_use_frame(irn); + } - /* optimize op -> Load iff Load is only used by this op */ - /* and left operand is a Load which only used by this irn */ - if (check_am_src && - pred_is_specific_nodeblock(block, left, is_ia32_Ld) && - (ia32_get_irn_n_edges(left) == 1)) - { - left = get_Proj_pred(left); - - addr_b = get_irn_n(left, 0); - addr_i = get_irn_n(left, 1); - - /* set new base, index and attributes */ - set_irn_n(irn, 0, addr_b); - set_irn_n(irn, 1, addr_i); - add_ia32_am_offs(irn, get_ia32_am_offs(left)); - set_ia32_am_scale(irn, get_ia32_am_scale(left)); - set_ia32_am_flavour(irn, get_ia32_am_flavour(left)); - set_ia32_op_type(irn, ia32_AddrModeS); - set_ia32_frame_ent(irn, get_ia32_frame_ent(left)); - set_ia32_ls_mode(irn, get_ia32_ls_mode(left)); - - set_ia32_am_sc(irn, get_ia32_am_sc(left)); - if (is_ia32_am_sc_sign(left)) - set_ia32_am_sc_sign(irn); - - /* clear remat flag */ - set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable); - - if (is_ia32_use_frame(left)) - set_ia32_use_frame(irn); - - /* connect to Load memory */ - if (get_irn_arity(irn) == 5) { - /* binary AMop */ - set_irn_n(irn, 4, get_irn_n(left, 2)); - } - else { - /* unary AMop */ - set_irn_n(irn, 3, get_irn_n(left, 2)); - } + /* connect to Load memory and disconnect Load */ + if (get_irn_arity(irn) == 5) { + /* binary AMop */ + set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3)); + set_irn_n(irn, 4, get_irn_n(load, 2)); + } else { + assert(get_irn_arity(irn) == 4); + /* unary AMop */ + set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2)); + set_irn_n(irn, 3, get_irn_n(load, 2)); + } - /* disconnect from Load */ - set_irn_n(irn, 2, noreg_gp); + DBG_OPT_AM_S(load, irn); - /* If Load has a memory Proj, connect it to the op */ - mem_proj = get_mem_proj(left); - if (mem_proj) { - set_Proj_pred(mem_proj, irn); - set_Proj_proj(mem_proj, 1); - } + /* If Load has a memory Proj, connect it to the op */ + mem_proj = ia32_get_proj_for_mode(load, mode_M); + if (mem_proj != NULL) { + ir_node *res_proj; + ir_mode *mode = get_irn_mode(irn); + + res_proj = new_rd_Proj(get_irn_dbg_info(irn), irg, + get_nodes_block(irn), new_Unknown(mode_T), + mode, 0); + set_irn_mode(irn, mode_T); + edges_reroute(irn, res_proj, irg); + set_Proj_pred(res_proj, irn); - DB((mod, LEVEL_1, "merged with %+F into source AM\n", left)); + set_Proj_pred(mem_proj, irn); + set_Proj_proj(mem_proj, 1); + + if(sched_is_scheduled(irn)) { + sched_add_after(irn, res_proj); + sched_add_after(irn, mem_proj); } } + + if(get_irn_n_edges(load) == 0) { + try_remove_from_sched(load); + } + need_exchange_on_fail = 0; + + DB((dbg, LEVEL_1, "merged with %+F into source AM\n", load)); } + + /* was exchanged but optimize failed: exchange back */ + if (need_exchange_on_fail) { + exchange_left_right(irn, &left, &right, 3, 2); + } +} + +/** + * Performs conv and address mode optimization. + */ +void ia32_optimize_graph(ia32_code_gen_t *cg) { + /* if we are supposed to do AM or LEA optimization: recalculate edges */ + if (! (cg->opt & (IA32_OPT_DOAM | IA32_OPT_LEA))) { + /* no optimizations at all */ + return; + } + + /* beware: we cannot optimize LEA and AM in one run because */ + /* LEA optimization adds new nodes to the irg which */ + /* invalidates the phase data */ + + if (cg->opt & IA32_OPT_LEA) { + irg_walk_blkwise_graph(cg->irg, NULL, optimize_node, cg); + } + + if (cg->dump) + be_dump(cg->irg, "-lea", dump_ir_block_graph_sched); + + /* hack for now, so these don't get created during optimize, because then + * they will be unknown to the heights module + */ + ia32_new_NoReg_gp(cg); + ia32_new_NoReg_fp(cg); + ia32_new_NoReg_vfp(cg); + + if (cg->opt & IA32_OPT_DOAM) { + /* we need height information for am optimization */ + heights_t *h = heights_new(cg->irg); + ia32_am_opt_env_t env; + + env.cg = cg; + env.h = h; + + irg_walk_blkwise_graph(cg->irg, NULL, optimize_am, &env); + + heights_free(h); + } +} + +void ia32_init_optimize(void) +{ + FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize"); }