8 #include "firm_types.h"
15 #include "../benode_t.h"
16 #include "../besched_t.h"
18 #include "ia32_new_nodes.h"
19 #include "bearch_ia32_t.h"
20 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
21 #include "ia32_transform.h"
24 #define is_NoMem(irn) (get_irn_op(irn) == op_NoMem)
26 typedef int is_op_func_t(const ir_node *n);
29 * checks if a node represents the NOREG value
31 static int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) {
32 be_abi_irg_t *babi = cg->birg->abi;
33 const arch_register_t *fp_noreg = USE_SSE2(cg) ?
34 &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG];
36 return (be_abi_get_callee_save_irn(babi, &ia32_gp_regs[REG_GP_NOREG]) == irn) ||
37 (be_abi_get_callee_save_irn(babi, fp_noreg) == irn);
42 /*************************************************
45 * | | ___ _ __ ___| |_ __ _ _ __ | |_ ___
46 * | | / _ \| '_ \/ __| __/ _` | '_ \| __/ __|
47 * | |___| (_) | | | \__ \ || (_| | | | | |_\__ \
48 * \_____\___/|_| |_|___/\__\__,_|_| |_|\__|___/
50 *************************************************/
53 * creates a unique ident by adding a number to a tag
55 * @param tag the tag string, must contain a %d if a number
58 static ident *unique_id(const char *tag)
60 static unsigned id = 0;
63 snprintf(str, sizeof(str), tag, ++id);
64 return new_id_from_str(str);
70 * Transforms a SymConst.
72 * @param mod the debug module
73 * @param block the block the new node should belong to
74 * @param node the ir SymConst node
75 * @param mode mode of the SymConst
76 * @return the created ia32 Const node
78 static ir_node *gen_SymConst(ia32_transform_env_t *env) {
80 dbg_info *dbg = env->dbg;
81 ir_mode *mode = env->mode;
82 ir_graph *irg = env->irg;
83 ir_node *block = env->block;
85 if (mode_is_float(mode)) {
86 if (USE_SSE2(env->cg))
87 cnst = new_rd_ia32_fConst(dbg, irg, block, mode);
89 cnst = new_rd_ia32_vfConst(dbg, irg, block, mode);
92 cnst = new_rd_ia32_Const(dbg, irg, block, mode);
94 set_ia32_Const_attr(cnst, env->irn);
99 * Get a primitive type for a mode.
101 static ir_type *get_prim_type(pmap *types, ir_mode *mode)
103 pmap_entry *e = pmap_find(types, mode);
108 snprintf(buf, sizeof(buf), "prim_type_%s", get_mode_name(mode));
109 res = new_type_primitive(new_id_from_str(buf), mode);
110 pmap_insert(types, mode, res);
118 * Get an entity that is initialized with a tarval
120 static entity *get_entity_for_tv(ia32_code_gen_t *cg, ir_node *cnst)
122 tarval *tv = get_Const_tarval(cnst);
123 pmap_entry *e = pmap_find(cg->isa->tv_ent, tv);
128 ir_mode *mode = get_irn_mode(cnst);
129 ir_type *tp = get_Const_type(cnst);
130 if (tp == firm_unknown_type)
131 tp = get_prim_type(cg->isa->types, mode);
133 res = new_entity(get_glob_type(), unique_id("ia32FloatCnst_%u"), tp);
135 set_entity_ld_ident(res, get_entity_ident(res));
136 set_entity_visibility(res, visibility_local);
137 set_entity_variability(res, variability_constant);
138 set_entity_allocation(res, allocation_static);
140 /* we create a new entity here: It's initialization must resist on the
142 rem = current_ir_graph;
143 current_ir_graph = get_const_code_irg();
144 set_atomic_ent_value(res, new_Const_type(tv, tp));
145 current_ir_graph = rem;
147 pmap_insert(cg->isa->tv_ent, tv, res);
155 * Transforms a Const.
157 * @param mod the debug module
158 * @param block the block the new node should belong to
159 * @param node the ir Const node
160 * @param mode mode of the Const
161 * @return the created ia32 Const node
163 static ir_node *gen_Const(ia32_transform_env_t *env) {
166 ir_graph *irg = env->irg;
167 ir_node *block = env->block;
168 ir_node *node = env->irn;
169 dbg_info *dbg = env->dbg;
170 ir_mode *mode = env->mode;
172 if (mode_is_float(mode)) {
173 if (! USE_SSE2(env->cg)) {
174 cnst_classify_t clss = classify_Const(node);
176 if (clss == CNST_NULL)
177 return new_rd_ia32_vfldz(dbg, irg, block, mode);
178 else if (clss == CNST_ONE)
179 return new_rd_ia32_vfld1(dbg, irg, block, mode);
181 sym.entity_p = get_entity_for_tv(env->cg, node);
183 cnst = new_rd_SymConst(dbg, irg, block, sym, symconst_addr_ent);
185 cnst = gen_SymConst(env);
188 cnst = new_rd_ia32_Const(dbg, irg, block, get_irn_mode(node));
189 set_ia32_Const_attr(cnst, node);
197 * Transforms (all) Const's into ia32_Const and places them in the
198 * block where they are used (or in the cfg-pred Block in case of Phi's).
199 * Additionally all reference nodes are changed into mode_Is nodes.
201 void ia32_place_consts_set_modes(ir_node *irn, void *env) {
202 ia32_code_gen_t *cg = env;
203 ia32_transform_env_t tenv;
205 ir_node *pred, *cnst;
212 mode = get_irn_mode(irn);
214 /* transform all reference nodes into mode_Is nodes */
215 if (mode_is_reference(mode)) {
217 set_irn_mode(irn, mode);
220 tenv.block = get_nodes_block(irn);
223 DEBUG_ONLY(tenv.mod = cg->mod;)
225 /* Loop over all predecessors and check for Sym/Const nodes */
226 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
227 pred = get_irn_n(irn, i);
229 opc = get_irn_opcode(pred);
231 tenv.mode = get_irn_mode(pred);
232 tenv.dbg = get_irn_dbg_info(pred);
234 /* If it's a Phi, then we need to create the */
235 /* new Const in it's predecessor block */
237 tenv.block = get_Block_cfgpred_block(get_nodes_block(irn), i);
240 /* put the const into the block where the original const was */
241 if (! cg->opt.placecnst) {
242 tenv.block = get_nodes_block(pred);
247 cnst = gen_Const(&tenv);
250 cnst = gen_SymConst(&tenv);
256 /* if we found a const, then set it */
258 set_irn_n(irn, i, cnst);
265 /********************************************************************************************************
266 * _____ _ _ ____ _ _ _ _ _
267 * | __ \ | | | | / __ \ | | (_) (_) | | (_)
268 * | |__) |__ ___ _ __ | |__ ___ | | ___ | | | |_ __ | |_ _ _ __ ___ _ ______ _| |_ _ ___ _ __
269 * | ___/ _ \/ _ \ '_ \| '_ \ / _ \| |/ _ \ | | | | '_ \| __| | '_ ` _ \| |_ / _` | __| |/ _ \| '_ \
270 * | | | __/ __/ |_) | | | | (_) | | __/ | |__| | |_) | |_| | | | | | | |/ / (_| | |_| | (_) | | | |
271 * |_| \___|\___| .__/|_| |_|\___/|_|\___| \____/| .__/ \__|_|_| |_| |_|_/___\__,_|\__|_|\___/|_| |_|
274 ********************************************************************************************************/
277 * NOTE: THESE PEEPHOLE OPTIMIZATIONS MUST BE CALLED AFTER SCHEDULING AND REGISTER ALLOCATION.
280 static int ia32_cnst_compare(ir_node *n1, ir_node *n2) {
281 return get_ia32_id_cnst(n1) == get_ia32_id_cnst(n2);
285 * Checks for potential CJmp/CJmpAM optimization candidates.
287 static ir_node *ia32_determine_cjmp_cand(ir_node *irn, is_op_func_t *is_op_func) {
288 ir_node *cand = NULL;
289 ir_node *prev = sched_prev(irn);
291 if (is_Block(prev)) {
292 if (get_Block_n_cfgpreds(prev) == 1)
293 prev = get_Block_cfgpred(prev, 0);
298 /* The predecessor must be a ProjX. */
299 if (prev && is_Proj(prev) && get_irn_mode(prev) == mode_X) {
300 prev = get_Proj_pred(prev);
302 if (is_op_func(prev))
309 static int is_TestJmp_cand(const ir_node *irn) {
310 return is_ia32_TestJmp(irn) || is_ia32_And(irn);
314 * Checks if two consecutive arguments of cand matches
315 * the two arguments of irn (TestJmp).
317 static int is_TestJmp_replacement(ir_node *cand, ir_node *irn) {
318 ir_node *in1 = get_irn_n(irn, 0);
319 ir_node *in2 = get_irn_n(irn, 1);
320 int i, n = get_irn_arity(cand);
323 for (i = 0; i < n - 1; i++) {
324 if (get_irn_n(cand, i) == in1 &&
325 get_irn_n(cand, i + 1) == in2)
333 return ia32_cnst_compare(cand, irn);
339 * Tries to replace a TestJmp by a CJmp or CJmpAM (in case of And)
341 static void ia32_optimize_TestJmp(ir_node *irn, ia32_code_gen_t *cg) {
342 ir_node *cand = ia32_determine_cjmp_cand(irn, is_TestJmp_cand);
345 /* we found a possible candidate */
346 replace = cand ? is_TestJmp_replacement(cand, irn) : 0;
349 DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn));
351 if (is_ia32_And(cand))
352 set_irn_op(irn, op_ia32_CJmpAM);
354 set_irn_op(irn, op_ia32_CJmp);
356 DB((cg->mod, LEVEL_1, "%+F\n", irn));
360 static int is_CondJmp_cand(const ir_node *irn) {
361 return is_ia32_CondJmp(irn) || is_ia32_Sub(irn);
365 * Checks if the arguments of cand are the same of irn.
367 static int is_CondJmp_replacement(ir_node *cand, ir_node *irn) {
368 int i, n = get_irn_arity(cand);
371 for (i = 0; i < n; i++) {
372 if (get_irn_n(cand, i) == get_irn_n(irn, i)) {
379 return ia32_cnst_compare(cand, irn);
385 * Tries to replace a CondJmp by a CJmpAM
387 static void ia32_optimize_CondJmp(ir_node *irn, ia32_code_gen_t *cg) {
388 ir_node *cand = ia32_determine_cjmp_cand(irn, is_CondJmp_cand);
391 /* we found a possible candidate */
392 replace = cand ? is_CondJmp_replacement(cand, irn) : 0;
395 DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn));
397 set_irn_op(irn, op_ia32_CJmp);
399 DB((cg->mod, LEVEL_1, "%+F\n", irn));
404 * Tries to optimize two following IncSP.
406 static void ia32_optimize_IncSP(ir_node *irn, ia32_code_gen_t *cg) {
407 ir_node *prev = be_get_IncSP_pred(irn);
408 int real_uses = get_irn_n_edges(prev);
410 if (be_is_IncSP(prev) && real_uses == 1) {
411 /* first IncSP has only one IncSP user, kill the first one */
412 unsigned prev_offs = be_get_IncSP_offset(prev);
413 be_stack_dir_t prev_dir = be_get_IncSP_direction(prev);
414 unsigned curr_offs = be_get_IncSP_offset(irn);
415 be_stack_dir_t curr_dir = be_get_IncSP_direction(irn);
417 int new_ofs = prev_offs * (prev_dir == be_stack_dir_expand ? -1 : +1) +
418 curr_offs * (curr_dir == be_stack_dir_expand ? -1 : +1);
422 curr_dir = be_stack_dir_expand;
425 curr_dir = be_stack_dir_shrink;
426 be_set_IncSP_offset(prev, 0);
427 be_set_IncSP_offset(irn, (unsigned)new_ofs);
428 be_set_IncSP_direction(irn, curr_dir);
433 * Performs Peephole Optimizations.
435 void ia32_peephole_optimization(ir_node *irn, void *env) {
436 ia32_code_gen_t *cg = env;
438 if (is_ia32_TestJmp(irn))
439 ia32_optimize_TestJmp(irn, cg);
440 else if (is_ia32_CondJmp(irn))
441 ia32_optimize_CondJmp(irn, cg);
442 else if (be_is_IncSP(irn))
443 ia32_optimize_IncSP(irn, cg);
448 /******************************************************************
450 * /\ | | | | | \/ | | |
451 * / \ __| | __| |_ __ ___ ___ ___| \ / | ___ __| | ___
452 * / /\ \ / _` |/ _` | '__/ _ \/ __/ __| |\/| |/ _ \ / _` |/ _ \
453 * / ____ \ (_| | (_| | | | __/\__ \__ \ | | | (_) | (_| | __/
454 * /_/ \_\__,_|\__,_|_| \___||___/___/_| |_|\___/ \__,_|\___|
456 ******************************************************************/
458 static int node_is_ia32_comm(const ir_node *irn) {
459 return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0;
462 static int ia32_get_irn_n_edges(const ir_node *irn) {
463 const ir_edge_t *edge;
466 foreach_out_edge(irn, edge) {
474 * Returns the first mode_M Proj connected to irn.
476 static ir_node *get_mem_proj(const ir_node *irn) {
477 const ir_edge_t *edge;
480 assert(get_irn_mode(irn) == mode_T && "expected mode_T node");
482 foreach_out_edge(irn, edge) {
483 src = get_edge_src_irn(edge);
485 assert(is_Proj(src) && "Proj expected");
487 if (get_irn_mode(src) == mode_M)
495 * Returns the first Proj with mode != mode_M connected to irn.
497 static ir_node *get_res_proj(const ir_node *irn) {
498 const ir_edge_t *edge;
501 assert(get_irn_mode(irn) == mode_T && "expected mode_T node");
503 foreach_out_edge(irn, edge) {
504 src = get_edge_src_irn(edge);
506 assert(is_Proj(src) && "Proj expected");
508 if (get_irn_mode(src) != mode_M)
516 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor.
518 * @param pred The node to be checked
519 * @param is_op_func The check-function
520 * @return 1 if conditions are fulfilled, 0 otherwise
522 static int pred_is_specific_node(const ir_node *pred, is_op_func_t *is_op_func) {
523 if (is_Proj(pred) && is_op_func(get_Proj_pred(pred))) {
531 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor
532 * and if the predecessor is in block bl.
534 * @param bl The block
535 * @param pred The node to be checked
536 * @param is_op_func The check-function
537 * @return 1 if conditions are fulfilled, 0 otherwise
539 static int pred_is_specific_nodeblock(const ir_node *bl, const ir_node *pred,
540 int (*is_op_func)(const ir_node *n))
543 pred = get_Proj_pred(pred);
544 if ((bl == get_nodes_block(pred)) && is_op_func(pred)) {
555 * Checks if irn is a candidate for address calculation or address mode.
557 * address calculation (AC):
558 * - none of the operand must be a Load within the same block OR
559 * - all Loads must have more than one user OR
560 * - the irn has a frame entity (it's a former FrameAddr)
563 * - at least one operand has to be a Load within the same block AND
564 * - the load must not have other users than the irn AND
565 * - the irn must not have a frame entity set
567 * @param block The block the Loads must/not be in
568 * @param irn The irn to check
569 * @param check_addr 1 if to check for address calculation, 0 otherwise
570 * return 1 if irn is a candidate for AC or AM, 0 otherwise
572 static int is_candidate(const ir_node *block, const ir_node *irn, int check_addr) {
574 int n, is_cand = check_addr;
576 in = get_irn_n(irn, 2);
578 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
579 n = ia32_get_irn_n_edges(in);
580 is_cand = check_addr ? (n == 1 ? 0 : is_cand) : (n == 1 ? 1 : is_cand);
583 in = get_irn_n(irn, 3);
585 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
586 n = ia32_get_irn_n_edges(in);
587 is_cand = check_addr ? (n == 1 ? 0 : is_cand) : (n == 1 ? 1 : is_cand);
590 is_cand = get_ia32_frame_ent(irn) ? (check_addr ? 1 : 0) : is_cand;
596 * Compares the base and index addr and the load/store entities
597 * and returns 1 if they are equal.
599 static int load_store_addr_is_equal(const ir_node *load, const ir_node *store,
600 const ir_node *addr_b, const ir_node *addr_i)
602 int is_equal = (addr_b == get_irn_n(load, 0)) && (addr_i == get_irn_n(load, 1));
603 entity *lent = get_ia32_frame_ent(load);
604 entity *sent = get_ia32_frame_ent(store);
605 ident *lid = get_ia32_am_sc(load);
606 ident *sid = get_ia32_am_sc(store);
607 char *loffs = get_ia32_am_offs(load);
608 char *soffs = get_ia32_am_offs(store);
610 /* are both entities set and equal? */
611 if (is_equal && (lent || sent))
612 is_equal = lent && sent && (lent == sent);
614 /* are address mode idents set and equal? */
615 if (is_equal && (lid || sid))
616 is_equal = lid && sid && (lid == sid);
618 /* are offsets set and equal */
619 if (is_equal && (loffs || soffs))
620 is_equal = loffs && soffs && strcmp(loffs, soffs) == 0;
622 /* are the load and the store of the same mode? */
623 is_equal = is_equal ? get_ia32_ls_mode(load) == get_ia32_ls_mode(store) : 0;
631 * Folds Add or Sub to LEA if possible
633 static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) {
634 ir_graph *irg = get_irn_irg(irn);
635 dbg_info *dbg = get_irn_dbg_info(irn);
636 ir_node *block = get_nodes_block(irn);
639 const char *offs_cnst = NULL;
640 char *offs_lea = NULL;
647 ir_node *left, *right, *temp;
648 ir_node *base, *index;
649 ia32_am_flavour_t am_flav;
650 DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;)
652 if (is_ia32_Add(irn))
655 left = get_irn_n(irn, 2);
656 right = get_irn_n(irn, 3);
658 /* "normalize" arguments in case of add with two operands */
659 if (isadd && ! be_is_NoReg(cg, right)) {
660 /* put LEA == ia32_am_O as right operand */
661 if (is_ia32_Lea(left) && get_ia32_am_flavour(left) == ia32_am_O) {
662 set_irn_n(irn, 2, right);
663 set_irn_n(irn, 3, left);
669 /* put LEA != ia32_am_O as left operand */
670 if (is_ia32_Lea(right) && get_ia32_am_flavour(right) != ia32_am_O) {
671 set_irn_n(irn, 2, right);
672 set_irn_n(irn, 3, left);
678 /* put SHL as left operand iff left is NOT a LEA */
679 if (! is_ia32_Lea(left) && pred_is_specific_node(right, is_ia32_Shl)) {
680 set_irn_n(irn, 2, right);
681 set_irn_n(irn, 3, left);
694 /* check for operation with immediate */
695 if (is_ia32_ImmConst(irn)) {
696 DBG((mod, LEVEL_1, "\tfound op with imm const"));
698 offs_cnst = get_ia32_cnst(irn);
701 else if (is_ia32_ImmSymConst(irn)) {
702 DBG((mod, LEVEL_1, "\tfound op with imm symconst"));
706 am_sc = get_ia32_id_cnst(irn);
707 am_sc_sign = is_ia32_am_sc_sign(irn);
710 /* determine the operand which needs to be checked */
711 if (be_is_NoReg(cg, right)) {
718 /* check if right operand is AMConst (LEA with ia32_am_O) */
719 /* but we can only eat it up if there is no other symconst */
720 /* because the linker won't accept two symconsts */
721 if (! have_am_sc && is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
722 DBG((mod, LEVEL_1, "\tgot op with LEA am_O"));
724 offs_lea = get_ia32_am_offs(temp);
725 am_sc = get_ia32_am_sc(temp);
726 am_sc_sign = is_ia32_am_sc_sign(temp);
732 /* default for add -> make right operand to index */
736 DBG((mod, LEVEL_1, "\tgot LEA candidate with index %+F\n", index));
738 /* determine the operand which needs to be checked */
740 if (is_ia32_Lea(left)) {
744 /* check for SHL 1,2,3 */
745 if (pred_is_specific_node(temp, is_ia32_Shl)) {
746 temp = get_Proj_pred(temp);
748 if (get_ia32_Immop_tarval(temp)) {
749 scale = get_tarval_long(get_ia32_Immop_tarval(temp));
752 index = get_irn_n(temp, 2);
754 DBG((mod, LEVEL_1, "\tgot scaled index %+F\n", index));
760 if (! be_is_NoReg(cg, index)) {
761 /* if we have index, but left == right -> no base */
765 else if (! is_ia32_Lea(left) && (index != right)) {
766 /* index != right -> we found a good Shl */
767 /* left != LEA -> this Shl was the left operand */
768 /* -> base is right operand */
774 /* Try to assimilate a LEA as left operand */
775 if (is_ia32_Lea(left) && (get_ia32_am_flavour(left) != ia32_am_O)) {
776 ir_node *assim_lea_idx, *assim_lea_base;
778 am_flav = get_ia32_am_flavour(left);
779 assim_lea_base = get_irn_n(left, 0);
780 assim_lea_idx = get_irn_n(left, 1);
783 /* If we have an Add with a real right operand (not NoReg) and */
784 /* the LEA contains already an index calculation then we create */
786 /* If the LEA contains already a frame_entity then we also */
787 /* create a new one otherwise we would loose it. */
788 if ((isadd && ! be_is_NoReg(cg, index) && (am_flav & ia32_I)) || /* no new LEA if index already set */
789 get_ia32_frame_ent(left) || /* no new LEA if stack access */
790 (have_am_sc && get_ia32_am_sc(left)) || /* no new LEA if AM symconst already present */
791 /* at least on of the LEA operands must be NOREG */
792 (!be_is_NoReg(cg, assim_lea_base) && !be_is_NoReg(cg, assim_lea_idx)))
794 DBG((mod, LEVEL_1, "\tleave old LEA, creating new one\n"));
797 DBG((mod, LEVEL_1, "\tgot LEA as left operand ... assimilating\n"));
798 offs = get_ia32_am_offs(left);
799 am_sc = have_am_sc ? am_sc : get_ia32_am_sc(left);
800 have_am_sc = am_sc ? 1 : 0;
801 am_sc_sign = is_ia32_am_sc_sign(left);
802 scale = get_ia32_am_scale(left);
804 if (be_is_NoReg(cg, assim_lea_base) && ! be_is_NoReg(cg, assim_lea_idx)) {
805 /* assimilate index */
806 assert(be_is_NoReg(cg, index) && ! be_is_NoReg(cg, base) && "operand mismatch for LEA assimilation");
807 index = assim_lea_idx;
809 else if (! be_is_NoReg(cg, assim_lea_base) && be_is_NoReg(cg, assim_lea_idx)) {
810 /* assimilate base */
811 assert(! be_is_NoReg(cg, index) && (base == left) && "operand mismatch for LEA assimilation");
812 base = assim_lea_base;
817 /* ok, we can create a new LEA */
819 res = new_rd_ia32_Lea(dbg, irg, block, base, index, mode_Is);
821 /* add the old offset of a previous LEA */
823 add_ia32_am_offs(res, offs);
826 /* add the new offset */
829 add_ia32_am_offs(res, offs_cnst);
832 add_ia32_am_offs(res, offs_lea);
836 /* either lea_O-cnst, -cnst or -lea_O */
839 add_ia32_am_offs(res, offs_lea);
842 sub_ia32_am_offs(res, offs_cnst);
845 sub_ia32_am_offs(res, offs_lea);
849 /* set the address mode symconst */
851 set_ia32_am_sc(res, am_sc);
853 set_ia32_am_sc_sign(res);
856 /* copy the frame entity (could be set in case of Add */
857 /* which was a FrameAddr) */
858 set_ia32_frame_ent(res, get_ia32_frame_ent(irn));
860 if (is_ia32_use_frame(irn))
861 set_ia32_use_frame(res);
864 set_ia32_am_scale(res, scale);
867 /* determine new am flavour */
868 if (offs || offs_cnst || offs_lea) {
871 if (! be_is_NoReg(cg, base)) {
874 if (! be_is_NoReg(cg, index)) {
880 set_ia32_am_flavour(res, am_flav);
882 set_ia32_op_type(res, ia32_AddrModeS);
884 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
886 DBG((mod, LEVEL_1, "\tLEA [%+F + %+F * %d + %s]\n", base, index, scale, get_ia32_am_offs(res)));
888 /* get the result Proj of the Add/Sub */
889 irn = get_res_proj(irn);
891 assert(irn && "Couldn't find result proj");
893 /* exchange the old op with the new LEA */
901 * Optimizes a pattern around irn to address mode if possible.
903 void ia32_optimize_am(ir_node *irn, void *env) {
904 ia32_code_gen_t *cg = env;
908 ir_node *block, *noreg_gp, *noreg_fp;
909 ir_node *left, *right, *temp;
910 ir_node *store, *load, *mem_proj;
911 ir_node *succ, *addr_b, *addr_i;
912 int check_am_src = 0;
913 DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;)
915 if (! is_ia32_irn(irn))
918 dbg = get_irn_dbg_info(irn);
919 mode = get_irn_mode(irn);
920 block = get_nodes_block(irn);
921 noreg_gp = ia32_new_NoReg_gp(cg);
922 noreg_fp = ia32_new_NoReg_fp(cg);
924 DBG((mod, LEVEL_1, "checking for AM\n"));
926 /* 1st part: check for address calculations and transform the into Lea */
928 /* Following cases can occur: */
929 /* - Sub (l, imm) -> LEA [base - offset] */
930 /* - Sub (l, r == LEA with ia32_am_O) -> LEA [base - offset] */
931 /* - Add (l, imm) -> LEA [base + offset] */
932 /* - Add (l, r == LEA with ia32_am_O) -> LEA [base + offset] */
933 /* - Add (l == LEA with ia32_am_O, r) -> LEA [base + offset] */
934 /* - Add (l, r) -> LEA [base + index * scale] */
935 /* with scale > 1 iff l/r == shl (1,2,3) */
937 if (is_ia32_Sub(irn) || is_ia32_Add(irn)) {
938 left = get_irn_n(irn, 2);
939 right = get_irn_n(irn, 3);
941 /* Do not try to create a LEA if one of the operands is a Load. */
942 /* check is irn is a candidate for address calculation */
943 if (is_candidate(block, irn, 1)) {
944 DBG((mod, LEVEL_1, "\tfound address calculation candidate %+F ... ", irn));
945 res = fold_addr(cg, irn, noreg_gp);
948 DB((mod, LEVEL_1, "transformed into %+F\n", res));
950 DB((mod, LEVEL_1, "not transformed\n"));
954 /* 2nd part: fold following patterns: */
955 /* - Load -> LEA into Load } TODO: If the LEA is used by more than one Load/Store */
956 /* - Store -> LEA into Store } it might be better to keep the LEA */
957 /* - op -> Load into AMop with am_Source */
959 /* - op is am_Source capable AND */
960 /* - the Load is only used by this op AND */
961 /* - the Load is in the same block */
962 /* - Store -> op -> Load into AMop with am_Dest */
964 /* - op is am_Dest capable AND */
965 /* - the Store uses the same address as the Load AND */
966 /* - the Load is only used by this op AND */
967 /* - the Load and Store are in the same block AND */
968 /* - nobody else uses the result of the op */
970 if ((res == irn) && (get_ia32_am_support(irn) != ia32_am_None) && !is_ia32_Lea(irn)) {
971 /* 1st: check for Load/Store -> LEA */
972 if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn)) {
973 left = get_irn_n(irn, 0);
975 if (is_ia32_Lea(left)) {
976 DBG((mod, LEVEL_1, "\nmerging %+F into %+F\n", left, irn));
978 /* get the AM attributes from the LEA */
979 add_ia32_am_offs(irn, get_ia32_am_offs(left));
980 set_ia32_am_scale(irn, get_ia32_am_scale(left));
981 set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
983 set_ia32_am_sc(irn, get_ia32_am_sc(left));
984 if (is_ia32_am_sc_sign(left))
985 set_ia32_am_sc_sign(irn);
987 set_ia32_op_type(irn, is_ia32_Ld(irn) ? ia32_AddrModeS : ia32_AddrModeD);
989 /* set base and index */
990 set_irn_n(irn, 0, get_irn_n(left, 0));
991 set_irn_n(irn, 1, get_irn_n(left, 1));
993 /* clear remat flag */
994 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
997 /* check if the node is an address mode candidate */
998 else if (is_candidate(block, irn, 0)) {
999 DBG((mod, LEVEL_1, "\tfound address mode candidate %+F ... ", irn));
1001 left = get_irn_n(irn, 2);
1002 if (get_irn_arity(irn) == 4) {
1003 /* it's an "unary" operation */
1007 right = get_irn_n(irn, 3);
1010 /* normalize commutative ops */
1011 if (node_is_ia32_comm(irn)) {
1012 /* Assure that right operand is always a Load if there is one */
1013 /* because non-commutative ops can only use Dest AM if the right */
1014 /* operand is a load, so we only need to check right operand. */
1015 if (pred_is_specific_nodeblock(block, left, is_ia32_Ld))
1017 set_irn_n(irn, 2, right);
1018 set_irn_n(irn, 3, left);
1026 /* check for Store -> op -> Load */
1028 /* Store -> op -> Load optimization is only possible if supported by op */
1029 /* and if right operand is a Load */
1030 if ((get_ia32_am_support(irn) & ia32_am_Dest) &&
1031 pred_is_specific_nodeblock(block, right, is_ia32_Ld))
1034 /* An address mode capable op always has a result Proj. */
1035 /* If this Proj is used by more than one other node, we don't need to */
1036 /* check further, otherwise we check for Store and remember the address, */
1037 /* the Store points to. */
1039 succ = get_res_proj(irn);
1040 assert(succ && "Couldn't find result proj");
1046 /* now check for users and Store */
1047 if (ia32_get_irn_n_edges(succ) == 1) {
1048 succ = get_edge_src_irn(get_irn_out_edge_first(succ));
1050 if (is_ia32_fStore(succ) || is_ia32_Store(succ)) {
1052 addr_b = get_irn_n(store, 0);
1054 /* Could be that the Store is connected to the address */
1055 /* calculating LEA while the Load is already transformed. */
1056 if (is_ia32_Lea(addr_b)) {
1058 addr_b = get_irn_n(succ, 0);
1059 addr_i = get_irn_n(succ, 1);
1068 /* we found a Store as single user: Now check for Load */
1070 /* Extra check for commutative ops with two Loads */
1071 /* -> put the interesting Load right */
1072 if (node_is_ia32_comm(irn) &&
1073 pred_is_specific_nodeblock(block, left, is_ia32_Ld))
1075 if ((addr_b == get_irn_n(get_Proj_pred(left), 0)) &&
1076 (addr_i == get_irn_n(get_Proj_pred(left), 1)))
1078 /* We exchange left and right, so it's easier to kill */
1079 /* the correct Load later and to handle unary operations. */
1080 set_irn_n(irn, 2, right);
1081 set_irn_n(irn, 3, left);
1089 /* skip the Proj for easier access */
1090 load = get_Proj_pred(right);
1092 /* Compare Load and Store address */
1093 if (load_store_addr_is_equal(load, store, addr_b, addr_i)) {
1094 /* Right Load is from same address, so we can */
1095 /* disconnect the Load and Store here */
1097 /* set new base, index and attributes */
1098 set_irn_n(irn, 0, addr_b);
1099 set_irn_n(irn, 1, addr_i);
1100 add_ia32_am_offs(irn, get_ia32_am_offs(load));
1101 set_ia32_am_scale(irn, get_ia32_am_scale(load));
1102 set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
1103 set_ia32_op_type(irn, ia32_AddrModeD);
1104 set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
1105 set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
1107 set_ia32_am_sc(irn, get_ia32_am_sc(load));
1108 if (is_ia32_am_sc_sign(load))
1109 set_ia32_am_sc_sign(irn);
1111 if (is_ia32_use_frame(load))
1112 set_ia32_use_frame(irn);
1114 /* connect to Load memory and disconnect Load */
1115 if (get_irn_arity(irn) == 5) {
1117 set_irn_n(irn, 4, get_irn_n(load, 2));
1118 set_irn_n(irn, 3, noreg_gp);
1122 set_irn_n(irn, 3, get_irn_n(load, 2));
1123 set_irn_n(irn, 2, noreg_gp);
1126 /* connect the memory Proj of the Store to the op */
1127 mem_proj = get_mem_proj(store);
1128 set_Proj_pred(mem_proj, irn);
1129 set_Proj_proj(mem_proj, 1);
1131 /* clear remat flag */
1132 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1134 DB((mod, LEVEL_1, "merged with %+F and %+F into dest AM\n", load, store));
1137 else if (get_ia32_am_support(irn) & ia32_am_Source) {
1138 /* There was no store, check if we still can optimize for source address mode */
1141 } /* if (support AM Dest) */
1142 else if (get_ia32_am_support(irn) & ia32_am_Source) {
1143 /* op doesn't support am AM Dest -> check for AM Source */
1147 /* normalize commutative ops */
1148 if (node_is_ia32_comm(irn)) {
1149 /* Assure that left operand is always a Load if there is one */
1150 /* because non-commutative ops can only use Source AM if the */
1151 /* left operand is a Load, so we only need to check the left */
1152 /* operand afterwards. */
1153 if (pred_is_specific_nodeblock(block, right, is_ia32_Ld)) {
1154 set_irn_n(irn, 2, right);
1155 set_irn_n(irn, 3, left);
1163 /* optimize op -> Load iff Load is only used by this op */
1164 /* and left operand is a Load which only used by this irn */
1166 pred_is_specific_nodeblock(block, left, is_ia32_Ld) &&
1167 (ia32_get_irn_n_edges(left) == 1))
1169 left = get_Proj_pred(left);
1171 addr_b = get_irn_n(left, 0);
1172 addr_i = get_irn_n(left, 1);
1174 /* set new base, index and attributes */
1175 set_irn_n(irn, 0, addr_b);
1176 set_irn_n(irn, 1, addr_i);
1177 add_ia32_am_offs(irn, get_ia32_am_offs(left));
1178 set_ia32_am_scale(irn, get_ia32_am_scale(left));
1179 set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
1180 set_ia32_op_type(irn, ia32_AddrModeS);
1181 set_ia32_frame_ent(irn, get_ia32_frame_ent(left));
1182 set_ia32_ls_mode(irn, get_ia32_ls_mode(left));
1184 set_ia32_am_sc(irn, get_ia32_am_sc(left));
1185 if (is_ia32_am_sc_sign(left))
1186 set_ia32_am_sc_sign(irn);
1188 /* clear remat flag */
1189 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1191 if (is_ia32_use_frame(left))
1192 set_ia32_use_frame(irn);
1194 /* connect to Load memory */
1195 if (get_irn_arity(irn) == 5) {
1197 set_irn_n(irn, 4, get_irn_n(left, 2));
1201 set_irn_n(irn, 3, get_irn_n(left, 2));
1204 /* disconnect from Load */
1205 set_irn_n(irn, 2, noreg_gp);
1207 /* If Load has a memory Proj, connect it to the op */
1208 mem_proj = get_mem_proj(left);
1210 set_Proj_pred(mem_proj, irn);
1211 set_Proj_proj(mem_proj, 1);
1214 DB((mod, LEVEL_1, "merged with %+F into source AM\n", left));