3 * File name: ir/be/ia32/ia32_optimize.c
4 * Purpose: Implements several optimizations for IA32
5 * Author: Christian Wuerdig
7 * Copyright: (c) 2006 Universität Karlsruhe
8 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
18 #include "firm_types.h"
28 #include "../benode_t.h"
29 #include "../besched_t.h"
31 #include "ia32_new_nodes.h"
32 #include "bearch_ia32_t.h"
33 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
34 #include "ia32_transform.h"
35 #include "ia32_dbg_stat.h"
36 #include "ia32_util.h"
39 IA32_AM_CAND_NONE = 0,
40 IA32_AM_CAND_LEFT = 1,
41 IA32_AM_CAND_RIGHT = 2,
46 #define is_NoMem(irn) (get_irn_op(irn) == op_NoMem)
48 typedef int is_op_func_t(const ir_node *n);
49 typedef ir_node *load_func_t(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, ir_node *mem);
52 * checks if a node represents the NOREG value
54 static int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) {
55 be_abi_irg_t *babi = cg->birg->abi;
56 const arch_register_t *fp_noreg = USE_SSE2(cg) ?
57 &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG];
59 return (be_abi_get_callee_save_irn(babi, &ia32_gp_regs[REG_GP_NOREG]) == irn) ||
60 (be_abi_get_callee_save_irn(babi, fp_noreg) == irn);
65 /*************************************************
68 * | | ___ _ __ ___| |_ __ _ _ __ | |_ ___
69 * | | / _ \| '_ \/ __| __/ _` | '_ \| __/ __|
70 * | |___| (_) | | | \__ \ || (_| | | | | |_\__ \
71 * \_____\___/|_| |_|___/\__\__,_|_| |_|\__|___/
73 *************************************************/
76 * creates a unique ident by adding a number to a tag
78 * @param tag the tag string, must contain a %d if a number
81 static ident *unique_id(const char *tag)
83 static unsigned id = 0;
86 snprintf(str, sizeof(str), tag, ++id);
87 return new_id_from_str(str);
91 * Transforms a SymConst.
93 * @param mod the debug module
94 * @param block the block the new node should belong to
95 * @param node the ir SymConst node
96 * @param mode mode of the SymConst
97 * @return the created ia32 Const node
99 static ir_node *gen_SymConst(ia32_transform_env_t *env) {
100 dbg_info *dbg = env->dbg;
101 ir_mode *mode = env->mode;
102 ir_graph *irg = env->irg;
103 ir_node *block = env->block;
106 if (mode_is_float(mode)) {
108 if (USE_SSE2(env->cg))
109 cnst = new_rd_ia32_xConst(dbg, irg, block, get_irg_no_mem(irg), mode);
111 cnst = new_rd_ia32_vfConst(dbg, irg, block, get_irg_no_mem(irg), mode);
114 cnst = new_rd_ia32_Const(dbg, irg, block, get_irg_no_mem(irg), mode);
116 set_ia32_Const_attr(cnst, env->irn);
122 * Get a primitive type for a mode.
124 static ir_type *get_prim_type(pmap *types, ir_mode *mode)
126 pmap_entry *e = pmap_find(types, mode);
131 snprintf(buf, sizeof(buf), "prim_type_%s", get_mode_name(mode));
132 res = new_type_primitive(new_id_from_str(buf), mode);
133 pmap_insert(types, mode, res);
141 * Get an entity that is initialized with a tarval
143 static entity *get_entity_for_tv(ia32_code_gen_t *cg, ir_node *cnst)
145 tarval *tv = get_Const_tarval(cnst);
146 pmap_entry *e = pmap_find(cg->isa->tv_ent, tv);
151 ir_mode *mode = get_irn_mode(cnst);
152 ir_type *tp = get_Const_type(cnst);
153 if (tp == firm_unknown_type)
154 tp = get_prim_type(cg->isa->types, mode);
156 res = new_entity(get_glob_type(), unique_id(".LC%u"), tp);
158 set_entity_ld_ident(res, get_entity_ident(res));
159 set_entity_visibility(res, visibility_local);
160 set_entity_variability(res, variability_constant);
161 set_entity_allocation(res, allocation_static);
163 /* we create a new entity here: It's initialization must resist on the
165 rem = current_ir_graph;
166 current_ir_graph = get_const_code_irg();
167 set_atomic_ent_value(res, new_Const_type(tv, tp));
168 current_ir_graph = rem;
170 pmap_insert(cg->isa->tv_ent, tv, res);
178 * Transforms a Const.
180 * @param mod the debug module
181 * @param block the block the new node should belong to
182 * @param node the ir Const node
183 * @param mode mode of the Const
184 * @return the created ia32 Const node
186 static ir_node *gen_Const(ia32_transform_env_t *env) {
187 ir_node *cnst, *load;
189 ir_graph *irg = env->irg;
190 ir_node *block = env->block;
191 ir_node *node = env->irn;
192 dbg_info *dbg = env->dbg;
193 ir_mode *mode = env->mode;
195 if (mode_is_float(mode)) {
197 if (! USE_SSE2(env->cg)) {
198 cnst_classify_t clss = classify_Const(node);
200 if (clss == CNST_NULL)
201 return new_rd_ia32_vfldz(dbg, irg, block, mode);
202 else if (clss == CNST_ONE)
203 return new_rd_ia32_vfld1(dbg, irg, block, mode);
205 sym.entity_p = get_entity_for_tv(env->cg, node);
208 cnst = new_rd_SymConst(dbg, irg, block, sym, symconst_addr_ent);
209 load = new_r_Load(irg, block, get_irg_no_mem(irg), cnst, mode);
210 load = new_r_Proj(irg, block, load, mode, pn_Load_res);
213 cnst = gen_SymConst(env);
214 set_Load_ptr(get_Proj_pred(load), cnst);
218 cnst = new_rd_ia32_Const(dbg, irg, block, get_irg_no_mem(irg), get_irn_mode(node));
219 set_ia32_Const_attr(cnst, node);
225 * Transforms (all) Const's into ia32_Const and places them in the
226 * block where they are used (or in the cfg-pred Block in case of Phi's).
227 * Additionally all reference nodes are changed into mode_Is nodes.
228 * NOTE: irn must be a firm constant!
230 static void ia32_transform_const(ir_node *irn, void *env) {
231 ia32_code_gen_t *cg = env;
232 ir_node *cnst = NULL;
233 ia32_transform_env_t tenv;
237 tenv.mode = get_irn_mode(irn);
238 tenv.dbg = get_irn_dbg_info(irn);
240 DEBUG_ONLY(tenv.mod = cg->mod;)
242 /* place const either in the smallest dominator of all its users or the original block */
243 if (cg->opt & IA32_OPT_PLACECNST)
244 tenv.block = node_users_smallest_common_dominator(irn, 1);
246 tenv.block = get_nodes_block(irn);
248 switch (get_irn_opcode(irn)) {
250 cnst = gen_Const(&tenv);
253 cnst = gen_SymConst(&tenv);
256 assert(0 && "Wrong usage of ia32_transform_const!");
259 assert(cnst && "Could not create ia32 Const");
261 /* set the new ia32 const */
266 * Transform all firm consts and assure, we visit each const only once.
268 static void ia32_place_consts_walker(ir_node *irn, void *env) {
269 ia32_code_gen_t *cg = env;
271 if(!is_Const(irn) && !is_SymConst(irn))
274 ia32_transform_const(irn, cg);
278 * Replace reference modes with mode_Iu and preserve store value modes.
280 static void ia32_set_modes(ir_node *irn, void *env) {
284 /* transform all reference nodes into mode_Iu nodes */
285 if (mode_is_reference(get_irn_mode(irn))) {
286 set_irn_mode(irn, mode_Iu);
291 * Walks over the graph, transforms all firm consts into ia32 consts
292 * and places them into the "best" block.
293 * @param cg The ia32 codegenerator object
295 static void ia32_transform_all_firm_consts(ia32_code_gen_t *cg) {
296 irg_walk_graph(cg->irg, NULL, ia32_place_consts_walker, cg);
299 /* Place all consts and change pointer arithmetics into unsigned integer arithmetics. */
300 void ia32_pre_transform_phase(ia32_code_gen_t *cg) {
302 We need to transform the consts twice:
303 - the psi condition tree transformer needs existing constants to be ia32 constants
304 - the psi condition tree transformer inserts new firm constants which need to be transformed
306 ia32_transform_all_firm_consts(cg);
307 irg_walk_graph(cg->irg, ia32_set_modes, ia32_transform_psi_cond_tree, cg);
308 ia32_transform_all_firm_consts(cg);
311 /********************************************************************************************************
312 * _____ _ _ ____ _ _ _ _ _
313 * | __ \ | | | | / __ \ | | (_) (_) | | (_)
314 * | |__) |__ ___ _ __ | |__ ___ | | ___ | | | |_ __ | |_ _ _ __ ___ _ ______ _| |_ _ ___ _ __
315 * | ___/ _ \/ _ \ '_ \| '_ \ / _ \| |/ _ \ | | | | '_ \| __| | '_ ` _ \| |_ / _` | __| |/ _ \| '_ \
316 * | | | __/ __/ |_) | | | | (_) | | __/ | |__| | |_) | |_| | | | | | | |/ / (_| | |_| | (_) | | | |
317 * |_| \___|\___| .__/|_| |_|\___/|_|\___| \____/| .__/ \__|_|_| |_| |_|_/___\__,_|\__|_|\___/|_| |_|
320 ********************************************************************************************************/
323 * NOTE: THESE PEEPHOLE OPTIMIZATIONS MUST BE CALLED AFTER SCHEDULING AND REGISTER ALLOCATION.
326 static int ia32_cnst_compare(ir_node *n1, ir_node *n2) {
327 return get_ia32_id_cnst(n1) == get_ia32_id_cnst(n2);
331 * Checks for potential CJmp/CJmpAM optimization candidates.
333 static ir_node *ia32_determine_cjmp_cand(ir_node *irn, is_op_func_t *is_op_func) {
334 ir_node *cand = NULL;
335 ir_node *prev = sched_prev(irn);
337 if (is_Block(prev)) {
338 if (get_Block_n_cfgpreds(prev) == 1)
339 prev = get_Block_cfgpred(prev, 0);
344 /* The predecessor must be a ProjX. */
345 if (prev && is_Proj(prev) && get_irn_mode(prev) == mode_X) {
346 prev = get_Proj_pred(prev);
348 if (is_op_func(prev))
355 static int is_TestJmp_cand(const ir_node *irn) {
356 return is_ia32_TestJmp(irn) || is_ia32_And(irn);
360 * Checks if two consecutive arguments of cand matches
361 * the two arguments of irn (TestJmp).
363 static int is_TestJmp_replacement(ir_node *cand, ir_node *irn) {
364 ir_node *in1 = get_irn_n(irn, 0);
365 ir_node *in2 = get_irn_n(irn, 1);
366 int i, n = get_irn_arity(cand);
369 for (i = 0; i < n - 1; i++) {
370 if (get_irn_n(cand, i) == in1 &&
371 get_irn_n(cand, i + 1) == in2)
379 return ia32_cnst_compare(cand, irn);
385 * Tries to replace a TestJmp by a CJmp or CJmpAM (in case of And)
387 static void ia32_optimize_TestJmp(ir_node *irn, ia32_code_gen_t *cg) {
388 ir_node *cand = ia32_determine_cjmp_cand(irn, is_TestJmp_cand);
391 /* we found a possible candidate */
392 replace = cand ? is_TestJmp_replacement(cand, irn) : 0;
395 DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn));
397 if (is_ia32_And(cand))
398 set_irn_op(irn, op_ia32_CJmpAM);
400 set_irn_op(irn, op_ia32_CJmp);
402 DB((cg->mod, LEVEL_1, "%+F\n", irn));
406 static int is_CondJmp_cand(const ir_node *irn) {
407 return is_ia32_CondJmp(irn) || is_ia32_Sub(irn);
411 * Checks if the arguments of cand are the same of irn.
413 static int is_CondJmp_replacement(ir_node *cand, ir_node *irn) {
414 int i, n = get_irn_arity(cand);
417 for (i = 0; i < n; i++) {
418 if (get_irn_n(cand, i) != get_irn_n(irn, i)) {
425 return ia32_cnst_compare(cand, irn);
431 * Tries to replace a CondJmp by a CJmpAM
433 static void ia32_optimize_CondJmp(ir_node *irn, ia32_code_gen_t *cg) {
434 ir_node *cand = ia32_determine_cjmp_cand(irn, is_CondJmp_cand);
437 /* we found a possible candidate */
438 replace = cand ? is_CondJmp_replacement(cand, irn) : 0;
441 DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn));
444 set_irn_op(irn, op_ia32_CJmpAM);
446 DB((cg->mod, LEVEL_1, "%+F\n", irn));
451 * Creates a Push from Store(IncSP(gp_reg_size))
453 static void ia32_create_Push(ir_node *irn, ia32_code_gen_t *cg) {
454 ir_node *sp = get_irn_n(irn, 0);
455 ir_graph *irg = cg->irg;
456 ir_node *val, *next, *push, *bl, *proj_M, *proj_res, *old_proj_M, *mem;
457 const ir_edge_t *edge;
460 /* do not create push if store has already an offset assigned or base is not a IncSP */
461 if (get_ia32_am_offs(irn) || ! be_is_IncSP(sp))
464 /* do not create push if index is not NOREG */
465 if (arch_get_irn_register(cg->arch_env, get_irn_n(irn, 1)) !=
466 &ia32_gp_regs[REG_GP_NOREG])
469 /* do not create push for floating point */
470 val = get_irn_n(irn, 2);
471 if (mode_is_float(get_irn_mode(val)))
474 /* do not create push if IncSp doesn't expand stack or expand size is different from register size */
475 if (be_get_IncSP_offset(sp) != get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode))
478 /* do not create push, if there is a path (inside the block) from the push value to IncSP */
479 h = heights_new(cg->irg);
480 if (get_nodes_block(val) == get_nodes_block(sp) &&
481 heights_reachable_in_block(h, val, sp))
488 /* ok, translate into Push */
489 edge = get_irn_out_edge_first(irn);
490 old_proj_M = get_edge_src_irn(edge);
491 bl = get_nodes_block(irn);
493 next = sched_next(irn);
499 if the IncSP points to NoMem -> just use the memory input from store
500 if IncSP points to somewhere else -> sync memory of IncSP and Store
502 mem = get_irn_n(irn, 3);
503 push = new_rd_ia32_Push(NULL, irg, bl, be_get_IncSP_pred(sp), val, mem);
504 proj_res = new_r_Proj(irg, bl, push, get_irn_mode(sp), pn_ia32_Push_stack);
505 proj_M = new_r_Proj(irg, bl, push, mode_M, pn_ia32_Push_M);
507 /* copy a possible constant from the store */
508 set_ia32_id_cnst(push, get_ia32_id_cnst(irn));
509 set_ia32_immop_type(push, get_ia32_immop_type(irn));
511 /* the push must have SP out register */
512 arch_set_irn_register(cg->arch_env, push, arch_get_irn_register(cg->arch_env, sp));
514 exchange(old_proj_M, proj_M);
515 exchange(sp, proj_res);
516 sched_add_before(next, push);
517 sched_add_after(push, proj_res);
521 * Creates a Pop from IncSP(Load(sp))
523 static void ia32_create_Pop(ir_node *irn, ia32_code_gen_t *cg) {
528 * Tries to optimize two following IncSP.
530 static void ia32_optimize_IncSP(ir_node *irn, ia32_code_gen_t *cg) {
531 ir_node *prev = be_get_IncSP_pred(irn);
532 int real_uses = get_irn_n_edges(prev);
534 if (be_is_IncSP(prev) && real_uses == 1) {
535 /* first IncSP has only one IncSP user, kill the first one */
536 int prev_offs = be_get_IncSP_offset(prev);
537 int curr_offs = be_get_IncSP_offset(irn);
539 be_set_IncSP_offset(prev, prev_offs + curr_offs);
541 /* Omit the optimized IncSP */
542 be_set_IncSP_pred(irn, be_get_IncSP_pred(prev));
547 * Performs Peephole Optimizations.
549 void ia32_peephole_optimization(ir_node *irn, void *env) {
550 ia32_code_gen_t *cg = env;
552 /* AMD CPUs want explicit compare before conditional jump */
553 if (! ARCH_AMD(cg->opt_arch)) {
554 if (is_ia32_TestJmp(irn))
555 ia32_optimize_TestJmp(irn, cg);
556 else if (is_ia32_CondJmp(irn))
557 ia32_optimize_CondJmp(irn, cg);
559 /* seems to be buggy when using Pushes */
560 else if (be_is_IncSP(irn))
561 ia32_optimize_IncSP(irn, cg);
562 else if (is_ia32_Store(irn))
563 ia32_create_Push(irn, cg);
568 /******************************************************************
570 * /\ | | | | | \/ | | |
571 * / \ __| | __| |_ __ ___ ___ ___| \ / | ___ __| | ___
572 * / /\ \ / _` |/ _` | '__/ _ \/ __/ __| |\/| |/ _ \ / _` |/ _ \
573 * / ____ \ (_| | (_| | | | __/\__ \__ \ | | | (_) | (_| | __/
574 * /_/ \_\__,_|\__,_|_| \___||___/___/_| |_|\___/ \__,_|\___|
576 ******************************************************************/
583 static int node_is_ia32_comm(const ir_node *irn) {
584 return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0;
587 static int ia32_get_irn_n_edges(const ir_node *irn) {
588 const ir_edge_t *edge;
591 foreach_out_edge(irn, edge) {
599 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor.
601 * @param pred The node to be checked
602 * @param is_op_func The check-function
603 * @return 1 if conditions are fulfilled, 0 otherwise
605 static int pred_is_specific_node(const ir_node *pred, is_op_func_t *is_op_func) {
606 if (is_Proj(pred) && is_op_func(get_Proj_pred(pred))) {
614 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor
615 * and if the predecessor is in block bl.
617 * @param bl The block
618 * @param pred The node to be checked
619 * @param is_op_func The check-function
620 * @return 1 if conditions are fulfilled, 0 otherwise
622 static int pred_is_specific_nodeblock(const ir_node *bl, const ir_node *pred,
623 int (*is_op_func)(const ir_node *n))
626 pred = get_Proj_pred(pred);
627 if ((bl == get_nodes_block(pred)) && is_op_func(pred)) {
636 * Checks if irn is a candidate for address calculation.
638 * - none of the operand must be a Load within the same block OR
639 * - all Loads must have more than one user OR
640 * - the irn has a frame entity (it's a former FrameAddr)
642 * @param block The block the Loads must/mustnot be in
643 * @param irn The irn to check
644 * return 1 if irn is a candidate, 0 otherwise
646 static int is_addr_candidate(const ir_node *block, const ir_node *irn) {
647 ir_node *in, *left, *right;
650 left = get_irn_n(irn, 2);
651 right = get_irn_n(irn, 3);
655 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
656 n = ia32_get_irn_n_edges(in);
657 is_cand = (n == 1) ? 0 : is_cand; /* load with only one user: don't create LEA */
662 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
663 n = ia32_get_irn_n_edges(in);
664 is_cand = (n == 1) ? 0 : is_cand; /* load with only one user: don't create LEA */
667 is_cand = get_ia32_frame_ent(irn) ? 1 : is_cand;
673 * Checks if irn is a candidate for address mode.
676 * - at least one operand has to be a Load within the same block AND
677 * - the load must not have other users than the irn AND
678 * - the irn must not have a frame entity set
680 * @param cg The ia32 code generator
681 * @param h The height information of the irg
682 * @param block The block the Loads must/mustnot be in
683 * @param irn The irn to check
684 * return 0 if irn is no candidate, 1 if left load can be used, 2 if right one, 3 for both
686 static ia32_am_cand_t is_am_candidate(ia32_code_gen_t *cg, heights_t *h, const ir_node *block, ir_node *irn) {
687 ir_node *in, *load, *other, *left, *right;
688 int n, is_cand = 0, cand;
690 if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn) || is_ia32_vfild(irn) || is_ia32_vfist(irn) ||
691 is_ia32_GetST0(irn) || is_ia32_SetST0(irn) || is_ia32_xStoreSimple(irn))
694 left = get_irn_n(irn, 2);
695 right = get_irn_n(irn, 3);
699 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
700 n = ia32_get_irn_n_edges(in);
701 is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */
703 load = get_Proj_pred(in);
706 /* 8bit Loads are not supported, they cannot be used with every register */
707 if (get_mode_size_bits(get_ia32_ls_mode(load)) < 16)
710 /* If there is a data dependency of other irn from load: cannot use AM */
711 if (is_cand && get_nodes_block(other) == block) {
712 other = skip_Proj(other);
713 is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand;
714 /* this could happen in loops */
715 is_cand = heights_reachable_in_block(h, load, irn) ? 0 : is_cand;
719 cand = is_cand ? IA32_AM_CAND_LEFT : IA32_AM_CAND_NONE;
723 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
724 n = ia32_get_irn_n_edges(in);
725 is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */
727 load = get_Proj_pred(in);
730 /* 8bit Loads are not supported, they cannot be used with every register */
731 if (get_mode_size_bits(get_ia32_ls_mode(load)) < 16)
734 /* If there is a data dependency of other irn from load: cannot use load */
735 if (is_cand && get_nodes_block(other) == block) {
736 other = skip_Proj(other);
737 is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand;
738 /* this could happen in loops */
739 is_cand = heights_reachable_in_block(h, load, irn) ? 0 : is_cand;
743 cand = is_cand ? (cand | IA32_AM_CAND_RIGHT) : cand;
745 /* check some special cases */
746 if (USE_SSE2(cg) && is_ia32_Conv_I2FP(irn)) {
747 /* SSE Conv I -> FP cvtsi2s(s|d) can only load 32 bit values */
748 if (get_mode_size_bits(get_ia32_tgt_mode(irn)) != 32)
749 cand = IA32_AM_CAND_NONE;
751 else if (is_ia32_Conv_I2I(irn)) {
752 /* we cannot load an N bit value and implicitly convert it into an M bit value if N > M */
753 if (get_mode_size_bits(get_ia32_src_mode(irn)) > get_mode_size_bits(get_ia32_tgt_mode(irn)))
754 cand = IA32_AM_CAND_NONE;
757 /* if the irn has a frame entity: we do not use address mode */
758 return get_ia32_frame_ent(irn) ? IA32_AM_CAND_NONE : cand;
762 * Compares the base and index addr and the load/store entities
763 * and returns 1 if they are equal.
765 static int load_store_addr_is_equal(const ir_node *load, const ir_node *store,
766 const ir_node *addr_b, const ir_node *addr_i)
768 int is_equal = (addr_b == get_irn_n(load, 0)) && (addr_i == get_irn_n(load, 1));
769 entity *lent = get_ia32_frame_ent(load);
770 entity *sent = get_ia32_frame_ent(store);
771 ident *lid = get_ia32_am_sc(load);
772 ident *sid = get_ia32_am_sc(store);
773 char *loffs = get_ia32_am_offs(load);
774 char *soffs = get_ia32_am_offs(store);
776 /* are both entities set and equal? */
777 if (is_equal && (lent || sent))
778 is_equal = lent && sent && (lent == sent);
780 /* are address mode idents set and equal? */
781 if (is_equal && (lid || sid))
782 is_equal = lid && sid && (lid == sid);
784 /* are offsets set and equal */
785 if (is_equal && (loffs || soffs))
786 is_equal = loffs && soffs && strcmp(loffs, soffs) == 0;
788 /* are the load and the store of the same mode? */
789 is_equal = is_equal ? get_ia32_ls_mode(load) == get_ia32_ls_mode(store) : 0;
794 typedef enum _ia32_take_lea_attr {
795 IA32_LEA_ATTR_NONE = 0,
796 IA32_LEA_ATTR_BASE = (1 << 0),
797 IA32_LEA_ATTR_INDEX = (1 << 1),
798 IA32_LEA_ATTR_OFFS = (1 << 2),
799 IA32_LEA_ATTR_SCALE = (1 << 3),
800 IA32_LEA_ATTR_AMSC = (1 << 4),
801 IA32_LEA_ATTR_FENT = (1 << 5)
802 } ia32_take_lea_attr;
805 * Decides if we have to keep the LEA operand or if we can assimilate it.
807 static int do_new_lea(ir_node *irn, ir_node *base, ir_node *index, ir_node *lea,
808 int have_am_sc, ia32_code_gen_t *cg)
810 entity *irn_ent = get_ia32_frame_ent(irn);
811 entity *lea_ent = get_ia32_frame_ent(lea);
813 int is_noreg_base = be_is_NoReg(cg, base);
814 int is_noreg_index = be_is_NoReg(cg, index);
815 ia32_am_flavour_t am_flav = get_ia32_am_flavour(lea);
817 /* If the Add and the LEA both have a different frame entity set: keep */
818 if (irn_ent && lea_ent && (irn_ent != lea_ent))
819 return IA32_LEA_ATTR_NONE;
820 else if (! irn_ent && lea_ent)
821 ret_val |= IA32_LEA_ATTR_FENT;
823 /* If the Add and the LEA both have already an address mode symconst: keep */
824 if (have_am_sc && get_ia32_am_sc(lea))
825 return IA32_LEA_ATTR_NONE;
826 else if (get_ia32_am_sc(lea))
827 ret_val |= IA32_LEA_ATTR_AMSC;
829 /* Check the different base-index combinations */
831 if (! is_noreg_base && ! is_noreg_index) {
832 /* Assimilate if base is the lea and the LEA is just a Base + Offset calculation */
833 if ((base == lea) && ! (am_flav & ia32_I ? 1 : 0)) {
834 if (am_flav & ia32_O)
835 ret_val |= IA32_LEA_ATTR_OFFS;
837 ret_val |= IA32_LEA_ATTR_BASE;
840 return IA32_LEA_ATTR_NONE;
842 else if (! is_noreg_base && is_noreg_index) {
843 /* Base is set but index not */
845 /* Base points to LEA: assimilate everything */
846 if (am_flav & ia32_O)
847 ret_val |= IA32_LEA_ATTR_OFFS;
848 if (am_flav & ia32_S)
849 ret_val |= IA32_LEA_ATTR_SCALE;
850 if (am_flav & ia32_I)
851 ret_val |= IA32_LEA_ATTR_INDEX;
853 ret_val |= IA32_LEA_ATTR_BASE;
855 else if (am_flav & ia32_B ? 0 : 1) {
856 /* Base is not the LEA but the LEA is an index only calculation: assimilate */
857 if (am_flav & ia32_O)
858 ret_val |= IA32_LEA_ATTR_OFFS;
859 if (am_flav & ia32_S)
860 ret_val |= IA32_LEA_ATTR_SCALE;
862 ret_val |= IA32_LEA_ATTR_INDEX;
865 return IA32_LEA_ATTR_NONE;
867 else if (is_noreg_base && ! is_noreg_index) {
868 /* Index is set but not base */
870 /* Index points to LEA: assimilate everything */
871 if (am_flav & ia32_O)
872 ret_val |= IA32_LEA_ATTR_OFFS;
873 if (am_flav & ia32_S)
874 ret_val |= IA32_LEA_ATTR_SCALE;
875 if (am_flav & ia32_B)
876 ret_val |= IA32_LEA_ATTR_BASE;
878 ret_val |= IA32_LEA_ATTR_INDEX;
880 else if (am_flav & ia32_I ? 0 : 1) {
881 /* Index is not the LEA but the LEA is a base only calculation: assimilate */
882 if (am_flav & ia32_O)
883 ret_val |= IA32_LEA_ATTR_OFFS;
884 if (am_flav & ia32_S)
885 ret_val |= IA32_LEA_ATTR_SCALE;
887 ret_val |= IA32_LEA_ATTR_BASE;
890 return IA32_LEA_ATTR_NONE;
893 assert(0 && "There must have been set base or index");
900 * Adds res before irn into schedule if irn was scheduled.
901 * @param irn The schedule point
902 * @param res The node to be scheduled
904 static INLINE void try_add_to_sched(ir_node *irn, ir_node *res) {
905 if (sched_is_scheduled(irn))
906 sched_add_before(irn, res);
910 * Removes irn from schedule if it was scheduled. If irn is a mode_T node
911 * all it's Projs are removed as well.
912 * @param irn The irn to be removed from schedule
914 static INLINE void try_remove_from_sched(ir_node *irn) {
915 if (sched_is_scheduled(irn)) {
916 if (get_irn_mode(irn) == mode_T) {
917 const ir_edge_t *edge;
918 foreach_out_edge(irn, edge) {
919 ir_node *proj = get_edge_src_irn(edge);
920 if (sched_is_scheduled(proj))
929 * Folds Add or Sub to LEA if possible
931 static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) {
932 ir_graph *irg = get_irn_irg(irn);
933 dbg_info *dbg = get_irn_dbg_info(irn);
934 ir_node *block = get_nodes_block(irn);
936 ir_node *shift = NULL;
937 ir_node *lea_o = NULL;
940 const char *offs_cnst = NULL;
941 char *offs_lea = NULL;
948 entity *lea_ent = NULL;
949 ir_node *left, *right, *temp;
950 ir_node *base, *index;
951 int consumed_left_shift;
952 ia32_am_flavour_t am_flav;
953 DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;)
955 if (is_ia32_Add(irn))
958 left = get_irn_n(irn, 2);
959 right = get_irn_n(irn, 3);
961 /* "normalize" arguments in case of add with two operands */
962 if (isadd && ! be_is_NoReg(cg, right)) {
963 /* put LEA == ia32_am_O as right operand */
964 if (is_ia32_Lea(left) && get_ia32_am_flavour(left) == ia32_am_O) {
965 set_irn_n(irn, 2, right);
966 set_irn_n(irn, 3, left);
972 /* put LEA != ia32_am_O as left operand */
973 if (is_ia32_Lea(right) && get_ia32_am_flavour(right) != ia32_am_O) {
974 set_irn_n(irn, 2, right);
975 set_irn_n(irn, 3, left);
981 /* put SHL as left operand iff left is NOT a LEA */
982 if (! is_ia32_Lea(left) && pred_is_specific_node(right, is_ia32_Shl)) {
983 set_irn_n(irn, 2, right);
984 set_irn_n(irn, 3, left);
997 /* check for operation with immediate */
998 if (is_ia32_ImmConst(irn)) {
999 DBG((mod, LEVEL_1, "\tfound op with imm const"));
1001 offs_cnst = get_ia32_cnst(irn);
1004 else if (is_ia32_ImmSymConst(irn)) {
1005 DBG((mod, LEVEL_1, "\tfound op with imm symconst"));
1009 am_sc = get_ia32_id_cnst(irn);
1010 am_sc_sign = is_ia32_am_sc_sign(irn);
1013 /* determine the operand which needs to be checked */
1014 temp = be_is_NoReg(cg, right) ? left : right;
1016 /* check if right operand is AMConst (LEA with ia32_am_O) */
1017 /* but we can only eat it up if there is no other symconst */
1018 /* because the linker won't accept two symconsts */
1019 if (! have_am_sc && is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
1020 DBG((mod, LEVEL_1, "\tgot op with LEA am_O"));
1022 offs_lea = get_ia32_am_offs(temp);
1023 am_sc = get_ia32_am_sc(temp);
1024 am_sc_sign = is_ia32_am_sc_sign(temp);
1031 else if (temp == right)
1036 /* default for add -> make right operand to index */
1039 consumed_left_shift = -1;
1041 DBG((mod, LEVEL_1, "\tgot LEA candidate with index %+F\n", index));
1043 /* determine the operand which needs to be checked */
1045 if (is_ia32_Lea(left)) {
1047 consumed_left_shift = 0;
1050 /* check for SHL 1,2,3 */
1051 if (pred_is_specific_node(temp, is_ia32_Shl)) {
1052 temp = get_Proj_pred(temp);
1055 if (get_ia32_Immop_tarval(temp)) {
1056 scale = get_tarval_long(get_ia32_Immop_tarval(temp));
1059 index = get_irn_n(temp, 2);
1060 consumed_left_shift = consumed_left_shift < 0 ? 1 : 0;
1062 DBG((mod, LEVEL_1, "\tgot scaled index %+F\n", index));
1072 if (! be_is_NoReg(cg, index)) {
1073 /* if we have index, but left == right -> no base */
1074 if (left == right) {
1077 else if (consumed_left_shift == 1) {
1078 /* -> base is right operand */
1079 base = (right == lea_o) ? noreg : right;
1084 /* Try to assimilate a LEA as left operand */
1085 if (is_ia32_Lea(left) && (get_ia32_am_flavour(left) != ia32_am_O)) {
1086 /* check if we can assimilate the LEA */
1087 int take_attr = do_new_lea(irn, base, index, left, have_am_sc, cg);
1089 if (take_attr == IA32_LEA_ATTR_NONE) {
1090 DBG((mod, LEVEL_1, "\tleave old LEA, creating new one\n"));
1093 DBG((mod, LEVEL_1, "\tgot LEA as left operand ... assimilating\n"));
1094 lea = left; /* for statistics */
1096 if (take_attr & IA32_LEA_ATTR_OFFS)
1097 offs = get_ia32_am_offs(left);
1099 if (take_attr & IA32_LEA_ATTR_AMSC) {
1100 am_sc = get_ia32_am_sc(left);
1102 am_sc_sign = is_ia32_am_sc_sign(left);
1105 if (take_attr & IA32_LEA_ATTR_SCALE)
1106 scale = get_ia32_am_scale(left);
1108 if (take_attr & IA32_LEA_ATTR_BASE)
1109 base = get_irn_n(left, 0);
1111 if (take_attr & IA32_LEA_ATTR_INDEX)
1112 index = get_irn_n(left, 1);
1114 if (take_attr & IA32_LEA_ATTR_FENT)
1115 lea_ent = get_ia32_frame_ent(left);
1119 /* ok, we can create a new LEA */
1121 res = new_rd_ia32_Lea(dbg, irg, block, base, index, mode_Is);
1123 /* add the old offset of a previous LEA */
1125 add_ia32_am_offs(res, offs);
1128 /* add the new offset */
1131 add_ia32_am_offs(res, offs_cnst);
1134 add_ia32_am_offs(res, offs_lea);
1138 /* either lea_O-cnst, -cnst or -lea_O */
1141 add_ia32_am_offs(res, offs_lea);
1144 sub_ia32_am_offs(res, offs_cnst);
1147 sub_ia32_am_offs(res, offs_lea);
1151 /* set the address mode symconst */
1153 set_ia32_am_sc(res, am_sc);
1155 set_ia32_am_sc_sign(res);
1158 /* copy the frame entity (could be set in case of Add */
1159 /* which was a FrameAddr) */
1161 set_ia32_frame_ent(res, lea_ent);
1163 set_ia32_frame_ent(res, get_ia32_frame_ent(irn));
1165 if (get_ia32_frame_ent(res))
1166 set_ia32_use_frame(res);
1169 set_ia32_am_scale(res, scale);
1171 am_flav = ia32_am_N;
1172 /* determine new am flavour */
1173 if (offs || offs_cnst || offs_lea || have_am_sc) {
1176 if (! be_is_NoReg(cg, base)) {
1179 if (! be_is_NoReg(cg, index)) {
1185 set_ia32_am_flavour(res, am_flav);
1187 set_ia32_op_type(res, ia32_AddrModeS);
1189 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
1191 DBG((mod, LEVEL_1, "\tLEA [%+F + %+F * %d + %s]\n", base, index, scale, get_ia32_am_offs(res)));
1193 /* we will exchange it, report here before the Proj is created */
1194 if (shift && lea && lea_o) {
1195 try_remove_from_sched(shift);
1196 try_remove_from_sched(lea);
1197 try_remove_from_sched(lea_o);
1198 DBG_OPT_LEA4(irn, lea_o, lea, shift, res);
1200 else if (shift && lea) {
1201 try_remove_from_sched(shift);
1202 try_remove_from_sched(lea);
1203 DBG_OPT_LEA3(irn, lea, shift, res);
1205 else if (shift && lea_o) {
1206 try_remove_from_sched(shift);
1207 try_remove_from_sched(lea_o);
1208 DBG_OPT_LEA3(irn, lea_o, shift, res);
1210 else if (lea && lea_o) {
1211 try_remove_from_sched(lea);
1212 try_remove_from_sched(lea_o);
1213 DBG_OPT_LEA3(irn, lea_o, lea, res);
1216 try_remove_from_sched(shift);
1217 DBG_OPT_LEA2(irn, shift, res);
1220 try_remove_from_sched(lea);
1221 DBG_OPT_LEA2(irn, lea, res);
1224 try_remove_from_sched(lea_o);
1225 DBG_OPT_LEA2(irn, lea_o, res);
1228 DBG_OPT_LEA1(irn, res);
1230 /* get the result Proj of the Add/Sub */
1231 try_add_to_sched(irn, res);
1232 try_remove_from_sched(irn);
1233 irn = ia32_get_res_proj(irn);
1235 assert(irn && "Couldn't find result proj");
1237 /* exchange the old op with the new LEA */
1246 * Merges a Load/Store node with a LEA.
1247 * @param irn The Load/Store node
1248 * @param lea The LEA
1250 static void merge_loadstore_lea(ir_node *irn, ir_node *lea) {
1251 entity *irn_ent = get_ia32_frame_ent(irn);
1252 entity *lea_ent = get_ia32_frame_ent(lea);
1254 /* If the irn and the LEA both have a different frame entity set: do not merge */
1255 if (irn_ent && lea_ent && (irn_ent != lea_ent))
1257 else if (! irn_ent && lea_ent) {
1258 set_ia32_frame_ent(irn, lea_ent);
1259 set_ia32_use_frame(irn);
1262 /* get the AM attributes from the LEA */
1263 add_ia32_am_offs(irn, get_ia32_am_offs(lea));
1264 set_ia32_am_scale(irn, get_ia32_am_scale(lea));
1265 set_ia32_am_flavour(irn, get_ia32_am_flavour(lea));
1267 set_ia32_am_sc(irn, get_ia32_am_sc(lea));
1268 if (is_ia32_am_sc_sign(lea))
1269 set_ia32_am_sc_sign(irn);
1271 set_ia32_op_type(irn, is_ia32_Ld(irn) ? ia32_AddrModeS : ia32_AddrModeD);
1273 /* set base and index */
1274 set_irn_n(irn, 0, get_irn_n(lea, 0));
1275 set_irn_n(irn, 1, get_irn_n(lea, 1));
1277 try_remove_from_sched(lea);
1279 /* clear remat flag */
1280 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1282 if (is_ia32_Ld(irn))
1283 DBG_OPT_LOAD_LEA(lea, irn);
1285 DBG_OPT_STORE_LEA(lea, irn);
1290 * Sets new_right index of irn to right and new_left index to left.
1291 * Also exchange left and right
1293 static void exchange_left_right(ir_node *irn, ir_node **left, ir_node **right, int new_left, int new_right) {
1296 set_irn_n(irn, new_right, *right);
1297 set_irn_n(irn, new_left, *left);
1303 /* this is only needed for Compares, but currently ALL nodes
1304 * have this attribute :-) */
1305 set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn)));
1309 * Performs address calculation optimization (create LEAs if possible)
1311 static void optimize_lea(ir_node *irn, void *env) {
1312 ia32_code_gen_t *cg = env;
1313 ir_node *block, *noreg_gp, *left, *right;
1315 if (! is_ia32_irn(irn))
1318 /* Following cases can occur: */
1319 /* - Sub (l, imm) -> LEA [base - offset] */
1320 /* - Sub (l, r == LEA with ia32_am_O) -> LEA [base - offset] */
1321 /* - Add (l, imm) -> LEA [base + offset] */
1322 /* - Add (l, r == LEA with ia32_am_O) -> LEA [base + offset] */
1323 /* - Add (l == LEA with ia32_am_O, r) -> LEA [base + offset] */
1324 /* - Add (l, r) -> LEA [base + index * scale] */
1325 /* with scale > 1 iff l/r == shl (1,2,3) */
1327 if (is_ia32_Sub(irn) || is_ia32_Add(irn)) {
1328 left = get_irn_n(irn, 2);
1329 right = get_irn_n(irn, 3);
1330 block = get_nodes_block(irn);
1331 noreg_gp = ia32_new_NoReg_gp(cg);
1333 /* Do not try to create a LEA if one of the operands is a Load. */
1334 /* check is irn is a candidate for address calculation */
1335 if (is_addr_candidate(block, irn)) {
1338 DBG((cg->mod, LEVEL_1, "\tfound address calculation candidate %+F ... ", irn));
1339 res = fold_addr(cg, irn, noreg_gp);
1342 DB((cg->mod, LEVEL_1, "transformed into %+F\n", res));
1344 DB((cg->mod, LEVEL_1, "not transformed\n"));
1347 else if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn)) {
1348 /* - Load -> LEA into Load } TODO: If the LEA is used by more than one Load/Store */
1349 /* - Store -> LEA into Store } it might be better to keep the LEA */
1350 left = get_irn_n(irn, 0);
1352 if (is_ia32_Lea(left)) {
1353 const ir_edge_t *edge, *ne;
1356 /* merge all Loads/Stores connected to this LEA with the LEA */
1357 foreach_out_edge_safe(left, edge, ne) {
1358 src = get_edge_src_irn(edge);
1360 if (src && (get_edge_src_pos(edge) == 0) && (is_ia32_Ld(src) || is_ia32_St(src) || is_ia32_Store8Bit(src))) {
1361 DBG((cg->mod, LEVEL_1, "\nmerging %+F into %+F\n", left, irn));
1362 if (! is_ia32_got_lea(src))
1363 merge_loadstore_lea(src, left);
1364 set_ia32_got_lea(src);
1373 * Checks for address mode patterns and performs the
1374 * necessary transformations.
1375 * This function is called by a walker.
1377 static void optimize_am(ir_node *irn, void *env) {
1378 ia32_am_opt_env_t *am_opt_env = env;
1379 ia32_code_gen_t *cg = am_opt_env->cg;
1380 heights_t *h = am_opt_env->h;
1381 ir_node *block, *noreg_gp, *noreg_fp;
1382 ir_node *left, *right;
1383 ir_node *store, *load, *mem_proj;
1384 ir_node *succ, *addr_b, *addr_i;
1385 int check_am_src = 0;
1386 int need_exchange_on_fail = 0;
1387 DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;)
1389 if (! is_ia32_irn(irn) || is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn))
1392 block = get_nodes_block(irn);
1393 noreg_gp = ia32_new_NoReg_gp(cg);
1394 noreg_fp = ia32_new_NoReg_fp(cg);
1396 DBG((mod, LEVEL_1, "checking for AM\n"));
1398 /* fold following patterns: */
1399 /* - op -> Load into AMop with am_Source */
1401 /* - op is am_Source capable AND */
1402 /* - the Load is only used by this op AND */
1403 /* - the Load is in the same block */
1404 /* - Store -> op -> Load into AMop with am_Dest */
1406 /* - op is am_Dest capable AND */
1407 /* - the Store uses the same address as the Load AND */
1408 /* - the Load is only used by this op AND */
1409 /* - the Load and Store are in the same block AND */
1410 /* - nobody else uses the result of the op */
1412 if ((get_ia32_am_support(irn) != ia32_am_None) && ! is_ia32_Lea(irn)) {
1413 ia32_am_cand_t cand = is_am_candidate(cg, h, block, irn);
1414 ia32_am_cand_t orig_cand = cand;
1416 /* cand == 1: load is left; cand == 2: load is right; */
1418 if (cand == IA32_AM_CAND_NONE)
1421 DBG((mod, LEVEL_1, "\tfound address mode candidate %+F ... ", irn));
1423 left = get_irn_n(irn, 2);
1424 if (get_irn_arity(irn) == 4) {
1425 /* it's an "unary" operation */
1429 right = get_irn_n(irn, 3);
1432 /* normalize commutative ops */
1433 if (node_is_ia32_comm(irn) && (cand == IA32_AM_CAND_RIGHT)) {
1435 /* Assure that left operand is always a Load if there is one */
1436 /* because non-commutative ops can only use Dest AM if the left */
1437 /* operand is a load, so we only need to check left operand. */
1439 exchange_left_right(irn, &left, &right, 3, 2);
1440 need_exchange_on_fail = 1;
1442 /* now: load is right */
1443 cand = IA32_AM_CAND_LEFT;
1446 /* check for Store -> op -> Load */
1448 /* Store -> op -> Load optimization is only possible if supported by op */
1449 /* and if right operand is a Load */
1450 if ((get_ia32_am_support(irn) & ia32_am_Dest) && (cand & IA32_AM_CAND_LEFT))
1452 /* An address mode capable op always has a result Proj. */
1453 /* If this Proj is used by more than one other node, we don't need to */
1454 /* check further, otherwise we check for Store and remember the address, */
1455 /* the Store points to. */
1457 succ = ia32_get_res_proj(irn);
1458 assert(succ && "Couldn't find result proj");
1464 /* now check for users and Store */
1465 if (ia32_get_irn_n_edges(succ) == 1) {
1466 succ = get_edge_src_irn(get_irn_out_edge_first(succ));
1468 if (is_ia32_xStore(succ) || is_ia32_Store(succ)) {
1470 addr_b = get_irn_n(store, 0);
1471 addr_i = get_irn_n(store, 1);
1476 /* we found a Store as single user: Now check for Load */
1478 /* skip the Proj for easier access */
1479 load = is_Proj(right) ? (is_ia32_Load(get_Proj_pred(right)) ? get_Proj_pred(right) : NULL) : NULL;
1481 /* Extra check for commutative ops with two Loads */
1482 /* -> put the interesting Load left */
1483 if (load && node_is_ia32_comm(irn) && (cand == IA32_AM_CAND_BOTH)) {
1484 if (load_store_addr_is_equal(load, store, addr_b, addr_i)) {
1485 /* We exchange left and right, so it's easier to kill */
1486 /* the correct Load later and to handle unary operations. */
1487 exchange_left_right(irn, &left, &right, 3, 2);
1488 need_exchange_on_fail ^= 1;
1492 /* skip the Proj for easier access */
1493 load = get_Proj_pred(left);
1495 /* Compare Load and Store address */
1496 if (load_store_addr_is_equal(load, store, addr_b, addr_i)) {
1497 /* Left Load is from same address, so we can */
1498 /* disconnect the Load and Store here */
1500 /* set new base, index and attributes */
1501 set_irn_n(irn, 0, addr_b);
1502 set_irn_n(irn, 1, addr_i);
1503 add_ia32_am_offs(irn, get_ia32_am_offs(load));
1504 set_ia32_am_scale(irn, get_ia32_am_scale(load));
1505 set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
1506 set_ia32_op_type(irn, ia32_AddrModeD);
1507 set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
1508 set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
1510 set_ia32_am_sc(irn, get_ia32_am_sc(load));
1511 if (is_ia32_am_sc_sign(load))
1512 set_ia32_am_sc_sign(irn);
1514 if (is_ia32_use_frame(load))
1515 set_ia32_use_frame(irn);
1517 /* connect to Load memory and disconnect Load */
1518 if (get_irn_arity(irn) == 5) {
1520 set_irn_n(irn, 4, get_irn_n(load, 2));
1521 set_irn_n(irn, 2, noreg_gp);
1525 set_irn_n(irn, 3, get_irn_n(load, 2));
1526 set_irn_n(irn, 2, noreg_gp);
1529 /* connect the memory Proj of the Store to the op */
1530 mem_proj = ia32_get_proj_for_mode(store, mode_M);
1531 set_Proj_pred(mem_proj, irn);
1532 set_Proj_proj(mem_proj, 1);
1534 /* clear remat flag */
1535 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1537 try_remove_from_sched(load);
1538 try_remove_from_sched(store);
1539 DBG_OPT_AM_D(load, store, irn);
1541 DB((mod, LEVEL_1, "merged with %+F and %+F into dest AM\n", load, store));
1543 need_exchange_on_fail = 0;
1546 else if (get_ia32_am_support(irn) & ia32_am_Source) {
1547 /* There was no store, check if we still can optimize for source address mode */
1550 } /* if (support AM Dest) */
1551 else if (get_ia32_am_support(irn) & ia32_am_Source) {
1552 /* op doesn't support am AM Dest -> check for AM Source */
1556 /* was exchanged but optimize failed: exchange back */
1557 if (need_exchange_on_fail) {
1558 exchange_left_right(irn, &left, &right, 3, 2);
1562 need_exchange_on_fail = 0;
1564 /* normalize commutative ops */
1565 if (check_am_src && node_is_ia32_comm(irn) && (cand == IA32_AM_CAND_LEFT)) {
1567 /* Assure that right operand is always a Load if there is one */
1568 /* because non-commutative ops can only use Source AM if the */
1569 /* right operand is a Load, so we only need to check the right */
1570 /* operand afterwards. */
1572 exchange_left_right(irn, &left, &right, 3, 2);
1573 need_exchange_on_fail = 1;
1575 /* now: load is left */
1576 cand = IA32_AM_CAND_RIGHT;
1579 /* optimize op -> Load iff Load is only used by this op */
1580 /* and right operand is a Load which only used by this irn */
1582 (cand & IA32_AM_CAND_RIGHT) &&
1583 (get_irn_arity(irn) == 5) &&
1584 (ia32_get_irn_n_edges(right) == 1))
1586 right = get_Proj_pred(right);
1588 addr_b = get_irn_n(right, 0);
1589 addr_i = get_irn_n(right, 1);
1591 /* set new base, index and attributes */
1592 set_irn_n(irn, 0, addr_b);
1593 set_irn_n(irn, 1, addr_i);
1594 add_ia32_am_offs(irn, get_ia32_am_offs(right));
1595 set_ia32_am_scale(irn, get_ia32_am_scale(right));
1596 set_ia32_am_flavour(irn, get_ia32_am_flavour(right));
1597 set_ia32_op_type(irn, ia32_AddrModeS);
1598 set_ia32_frame_ent(irn, get_ia32_frame_ent(right));
1599 set_ia32_ls_mode(irn, get_ia32_ls_mode(right));
1601 set_ia32_am_sc(irn, get_ia32_am_sc(right));
1602 if (is_ia32_am_sc_sign(right))
1603 set_ia32_am_sc_sign(irn);
1605 /* clear remat flag */
1606 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1608 if (is_ia32_use_frame(right))
1609 set_ia32_use_frame(irn);
1611 /* connect to Load memory */
1612 set_irn_n(irn, 4, get_irn_n(right, 2));
1614 /* this is only needed for Compares, but currently ALL nodes
1615 * have this attribute :-) */
1616 set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn)));
1618 /* disconnect from Load */
1619 set_irn_n(irn, 3, noreg_gp);
1621 DBG_OPT_AM_S(right, irn);
1623 /* If Load has a memory Proj, connect it to the op */
1624 mem_proj = ia32_get_proj_for_mode(right, mode_M);
1626 set_Proj_pred(mem_proj, irn);
1627 set_Proj_proj(mem_proj, 1);
1630 try_remove_from_sched(right);
1632 DB((mod, LEVEL_1, "merged with %+F into source AM\n", right));
1635 /* was exchanged but optimize failed: exchange back */
1636 if (need_exchange_on_fail)
1637 exchange_left_right(irn, &left, &right, 3, 2);
1643 * Performs address mode optimization.
1645 void ia32_optimize_addressmode(ia32_code_gen_t *cg) {
1646 /* if we are supposed to do AM or LEA optimization: recalculate edges */
1647 if (cg->opt & (IA32_OPT_DOAM | IA32_OPT_LEA)) {
1648 edges_deactivate(cg->irg);
1649 edges_activate(cg->irg);
1652 /* no optimizations at all */
1656 /* beware: we cannot optimize LEA and AM in one run because */
1657 /* LEA optimization adds new nodes to the irg which */
1658 /* invalidates the phase data */
1660 if (cg->opt & IA32_OPT_LEA) {
1661 irg_walk_blkwise_graph(cg->irg, NULL, optimize_lea, cg);
1665 be_dump(cg->irg, "-lea", dump_ir_block_graph_sched);
1667 if (cg->opt & IA32_OPT_DOAM) {
1668 /* we need height information for am optimization */
1669 heights_t *h = heights_new(cg->irg);
1670 ia32_am_opt_env_t env;
1675 irg_walk_blkwise_graph(cg->irg, NULL, optimize_am, &env);