8 #include "firm_types.h"
15 #include "../benode_t.h"
16 #include "../besched_t.h"
18 #include "ia32_new_nodes.h"
19 #include "bearch_ia32_t.h"
20 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
21 #include "ia32_transform.h"
24 #define is_NoMem(irn) (get_irn_op(irn) == op_NoMem)
26 typedef int is_op_func_t(const ir_node *n);
29 * checks if a node represents the NOREG value
31 static int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) {
32 be_abi_irg_t *babi = cg->birg->abi;
33 const arch_register_t *fp_noreg = USE_SSE2(cg) ?
34 &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG];
36 return (be_abi_get_callee_save_irn(babi, &ia32_gp_regs[REG_GP_NOREG]) == irn) ||
37 (be_abi_get_callee_save_irn(babi, fp_noreg) == irn);
42 /*************************************************
45 * | | ___ _ __ ___| |_ __ _ _ __ | |_ ___
46 * | | / _ \| '_ \/ __| __/ _` | '_ \| __/ __|
47 * | |___| (_) | | | \__ \ || (_| | | | | |_\__ \
48 * \_____\___/|_| |_|___/\__\__,_|_| |_|\__|___/
50 *************************************************/
53 * creates a unique ident by adding a number to a tag
55 * @param tag the tag string, must contain a %d if a number
58 static ident *unique_id(const char *tag)
60 static unsigned id = 0;
63 snprintf(str, sizeof(str), tag, ++id);
64 return new_id_from_str(str);
70 * Transforms a SymConst.
72 * @param mod the debug module
73 * @param block the block the new node should belong to
74 * @param node the ir SymConst node
75 * @param mode mode of the SymConst
76 * @return the created ia32 Const node
78 static ir_node *gen_SymConst(ia32_transform_env_t *env) {
80 dbg_info *dbg = env->dbg;
81 ir_mode *mode = env->mode;
82 ir_graph *irg = env->irg;
83 ir_node *block = env->block;
85 if (mode_is_float(mode)) {
86 if (USE_SSE2(env->cg))
87 cnst = new_rd_ia32_fConst(dbg, irg, block, mode);
89 cnst = new_rd_ia32_vfConst(dbg, irg, block, mode);
92 cnst = new_rd_ia32_Const(dbg, irg, block, mode);
94 set_ia32_Const_attr(cnst, env->irn);
99 * Get a primitive type for a mode.
101 static ir_type *get_prim_type(pmap *types, ir_mode *mode)
103 pmap_entry *e = pmap_find(types, mode);
108 snprintf(buf, sizeof(buf), "prim_type_%s", get_mode_name(mode));
109 res = new_type_primitive(new_id_from_str(buf), mode);
110 pmap_insert(types, mode, res);
118 * Get an entity that is initialized with a tarval
120 static entity *get_entity_for_tv(ia32_code_gen_t *cg, ir_node *cnst)
122 tarval *tv = get_Const_tarval(cnst);
123 pmap_entry *e = pmap_find(cg->tv_ent, tv);
128 ir_mode *mode = get_irn_mode(cnst);
129 ir_type *tp = get_Const_type(cnst);
130 if (tp == firm_unknown_type)
131 tp = get_prim_type(cg->types, mode);
133 res = new_entity(get_glob_type(), unique_id("ia32FloatCnst_%u"), tp);
135 set_entity_ld_ident(res, get_entity_ident(res));
136 set_entity_visibility(res, visibility_local);
137 set_entity_variability(res, variability_constant);
138 set_entity_allocation(res, allocation_static);
140 /* we create a new entity here: It's initialization must resist on the
142 rem = current_ir_graph;
143 current_ir_graph = get_const_code_irg();
144 set_atomic_ent_value(res, new_Const_type(tv, tp));
145 current_ir_graph = rem;
153 * Transforms a Const.
155 * @param mod the debug module
156 * @param block the block the new node should belong to
157 * @param node the ir Const node
158 * @param mode mode of the Const
159 * @return the created ia32 Const node
161 static ir_node *gen_Const(ia32_transform_env_t *env) {
164 ir_graph *irg = env->irg;
165 ir_node *block = env->block;
166 ir_node *node = env->irn;
167 dbg_info *dbg = env->dbg;
168 ir_mode *mode = env->mode;
170 if (mode_is_float(mode)) {
171 if (! USE_SSE2(env->cg)) {
172 cnst_classify_t clss = classify_Const(node);
174 if (clss == CNST_NULL)
175 return new_rd_ia32_vfldz(dbg, irg, block, mode);
176 else if (clss == CNST_ONE)
177 return new_rd_ia32_vfld1(dbg, irg, block, mode);
179 sym.entity_p = get_entity_for_tv(env->cg, node);
181 cnst = new_rd_SymConst(dbg, irg, block, sym, symconst_addr_ent);
183 cnst = gen_SymConst(env);
186 cnst = new_rd_ia32_Const(dbg, irg, block, get_irn_mode(node));
187 set_ia32_Const_attr(cnst, node);
195 * Transforms (all) Const's into ia32_Const and places them in the
196 * block where they are used (or in the cfg-pred Block in case of Phi's).
197 * Additionally all reference nodes are changed into mode_Is nodes.
199 void ia32_place_consts_set_modes(ir_node *irn, void *env) {
200 ia32_code_gen_t *cg = env;
201 ia32_transform_env_t tenv;
203 ir_node *pred, *cnst;
210 mode = get_irn_mode(irn);
212 /* transform all reference nodes into mode_Is nodes */
213 if (mode_is_reference(mode)) {
215 set_irn_mode(irn, mode);
218 tenv.block = get_nodes_block(irn);
221 DEBUG_ONLY(tenv.mod = cg->mod;)
223 /* Loop over all predecessors and check for Sym/Const nodes */
224 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
225 pred = get_irn_n(irn, i);
227 opc = get_irn_opcode(pred);
229 tenv.mode = get_irn_mode(pred);
230 tenv.dbg = get_irn_dbg_info(pred);
232 /* If it's a Phi, then we need to create the */
233 /* new Const in it's predecessor block */
235 tenv.block = get_Block_cfgpred_block(get_nodes_block(irn), i);
238 /* put the const into the block where the original const was */
239 if (! cg->opt.placecnst) {
240 tenv.block = get_nodes_block(pred);
245 cnst = gen_Const(&tenv);
248 cnst = gen_SymConst(&tenv);
254 /* if we found a const, then set it */
256 set_irn_n(irn, i, cnst);
263 /********************************************************************************************************
264 * _____ _ _ ____ _ _ _ _ _
265 * | __ \ | | | | / __ \ | | (_) (_) | | (_)
266 * | |__) |__ ___ _ __ | |__ ___ | | ___ | | | |_ __ | |_ _ _ __ ___ _ ______ _| |_ _ ___ _ __
267 * | ___/ _ \/ _ \ '_ \| '_ \ / _ \| |/ _ \ | | | | '_ \| __| | '_ ` _ \| |_ / _` | __| |/ _ \| '_ \
268 * | | | __/ __/ |_) | | | | (_) | | __/ | |__| | |_) | |_| | | | | | | |/ / (_| | |_| | (_) | | | |
269 * |_| \___|\___| .__/|_| |_|\___/|_|\___| \____/| .__/ \__|_|_| |_| |_|_/___\__,_|\__|_|\___/|_| |_|
272 ********************************************************************************************************/
275 * NOTE: THESE PEEPHOLE OPTIMIZATIONS MUST BE CALLED AFTER SCHEDULING AND REGISTER ALLOCATION.
278 static int ia32_cnst_compare(ir_node *n1, ir_node *n2) {
279 return get_ia32_id_cnst(n1) == get_ia32_id_cnst(n2);
283 * Checks for potential CJmp/CJmpAM optimization candidates.
285 static ir_node *ia32_determine_cjmp_cand(ir_node *irn, is_op_func_t *is_op_func) {
286 ir_node *cand = NULL;
287 ir_node *prev = sched_prev(irn);
289 if (is_Block(prev)) {
290 if (get_Block_n_cfgpreds(prev) == 1)
291 prev = get_Block_cfgpred(prev, 0);
296 /* The predecessor must be a ProjX. */
297 if (prev && is_Proj(prev) && get_irn_mode(prev) == mode_X) {
298 prev = get_Proj_pred(prev);
300 if (is_op_func(prev))
307 static int is_TestJmp_cand(const ir_node *irn) {
308 return is_ia32_TestJmp(irn) || is_ia32_And(irn);
312 * Checks if two consecutive arguments of cand matches
313 * the two arguments of irn (TestJmp).
315 static int is_TestJmp_replacement(ir_node *cand, ir_node *irn) {
316 ir_node *in1 = get_irn_n(irn, 0);
317 ir_node *in2 = get_irn_n(irn, 1);
318 int i, n = get_irn_arity(cand);
321 for (i = 0; i < n - 1; i++) {
322 if (get_irn_n(cand, i) == in1 &&
323 get_irn_n(cand, i + 1) == in2)
331 return ia32_cnst_compare(cand, irn);
337 * Tries to replace a TestJmp by a CJmp or CJmpAM (in case of And)
339 static void ia32_optimize_TestJmp(ir_node *irn, ia32_code_gen_t *cg) {
340 ir_node *cand = ia32_determine_cjmp_cand(irn, is_TestJmp_cand);
343 /* we found a possible candidate */
344 replace = cand ? is_TestJmp_replacement(cand, irn) : 0;
347 DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn));
349 if (is_ia32_And(cand))
350 set_irn_op(irn, op_ia32_CJmpAM);
352 set_irn_op(irn, op_ia32_CJmp);
354 DB((cg->mod, LEVEL_1, "%+F\n", irn));
358 static int is_CondJmp_cand(const ir_node *irn) {
359 return is_ia32_CondJmp(irn) || is_ia32_Sub(irn);
363 * Checks if the arguments of cand are the same of irn.
365 static int is_CondJmp_replacement(ir_node *cand, ir_node *irn) {
366 int i, n = get_irn_arity(cand);
369 for (i = 0; i < n; i++) {
370 if (get_irn_n(cand, i) == get_irn_n(irn, i)) {
377 return ia32_cnst_compare(cand, irn);
383 * Tries to replace a CondJmp by a CJmpAM
385 static void ia32_optimize_CondJmp(ir_node *irn, ia32_code_gen_t *cg) {
386 ir_node *cand = ia32_determine_cjmp_cand(irn, is_CondJmp_cand);
389 /* we found a possible candidate */
390 replace = cand ? is_CondJmp_replacement(cand, irn) : 0;
393 DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn));
395 set_irn_op(irn, op_ia32_CJmp);
397 DB((cg->mod, LEVEL_1, "%+F\n", irn));
402 * Tries to optimize two following IncSP.
404 static void ia32_optimize_IncSP(ir_node *irn, ia32_code_gen_t *cg) {
405 ir_node *prev = be_get_IncSP_pred(irn);
406 int real_uses = get_irn_n_edges(prev);
408 if (real_uses != 1) {
410 This is a hack that should be removed if be_abi_fix_stack_nodes()
411 is fixed. Currently it leaves some IncSP's outside the chain ...
412 The previous IncSp is NOT our prev, but directly scheduled before ...
413 Impossible in a bug-free implementation :-)
415 prev = sched_prev(irn);
419 if (be_is_IncSP(prev) && real_uses == 1) {
420 /* first IncSP has only one IncSP user, kill the first one */
421 unsigned prev_offs = be_get_IncSP_offset(prev);
422 be_stack_dir_t prev_dir = be_get_IncSP_direction(prev);
423 unsigned curr_offs = be_get_IncSP_offset(irn);
424 be_stack_dir_t curr_dir = be_get_IncSP_direction(irn);
426 int new_ofs = prev_offs * (prev_dir == be_stack_dir_expand ? -1 : +1) +
427 curr_offs * (curr_dir == be_stack_dir_expand ? -1 : +1);
431 curr_dir = be_stack_dir_expand;
434 curr_dir = be_stack_dir_shrink;
435 be_set_IncSP_offset(prev, 0);
436 be_set_IncSP_offset(irn, (unsigned)new_ofs);
437 be_set_IncSP_direction(irn, curr_dir);
442 * Performs Peephole Optimizations.
444 void ia32_peephole_optimization(ir_node *irn, void *env) {
445 ia32_code_gen_t *cg = env;
447 if (is_ia32_TestJmp(irn))
448 ia32_optimize_TestJmp(irn, cg);
449 else if (is_ia32_CondJmp(irn))
450 ia32_optimize_CondJmp(irn, cg);
451 else if (be_is_IncSP(irn))
452 ia32_optimize_IncSP(irn, cg);
457 /******************************************************************
459 * /\ | | | | | \/ | | |
460 * / \ __| | __| |_ __ ___ ___ ___| \ / | ___ __| | ___
461 * / /\ \ / _` |/ _` | '__/ _ \/ __/ __| |\/| |/ _ \ / _` |/ _ \
462 * / ____ \ (_| | (_| | | | __/\__ \__ \ | | | (_) | (_| | __/
463 * /_/ \_\__,_|\__,_|_| \___||___/___/_| |_|\___/ \__,_|\___|
465 ******************************************************************/
467 static int node_is_ia32_comm(const ir_node *irn) {
468 return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0;
471 static int ia32_get_irn_n_edges(const ir_node *irn) {
472 const ir_edge_t *edge;
475 foreach_out_edge(irn, edge) {
483 * Returns the first mode_M Proj connected to irn.
485 static ir_node *get_mem_proj(const ir_node *irn) {
486 const ir_edge_t *edge;
489 assert(get_irn_mode(irn) == mode_T && "expected mode_T node");
491 foreach_out_edge(irn, edge) {
492 src = get_edge_src_irn(edge);
494 assert(is_Proj(src) && "Proj expected");
496 if (get_irn_mode(src) == mode_M)
504 * Returns the first Proj with mode != mode_M connected to irn.
506 static ir_node *get_res_proj(const ir_node *irn) {
507 const ir_edge_t *edge;
510 assert(get_irn_mode(irn) == mode_T && "expected mode_T node");
512 foreach_out_edge(irn, edge) {
513 src = get_edge_src_irn(edge);
515 assert(is_Proj(src) && "Proj expected");
517 if (get_irn_mode(src) != mode_M)
525 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor.
527 * @param pred The node to be checked
528 * @param is_op_func The check-function
529 * @return 1 if conditions are fulfilled, 0 otherwise
531 static int pred_is_specific_node(const ir_node *pred, is_op_func_t *is_op_func) {
532 if (is_Proj(pred) && is_op_func(get_Proj_pred(pred))) {
540 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor
541 * and if the predecessor is in block bl.
543 * @param bl The block
544 * @param pred The node to be checked
545 * @param is_op_func The check-function
546 * @return 1 if conditions are fulfilled, 0 otherwise
548 static int pred_is_specific_nodeblock(const ir_node *bl, const ir_node *pred,
549 int (*is_op_func)(const ir_node *n))
552 pred = get_Proj_pred(pred);
553 if ((bl == get_nodes_block(pred)) && is_op_func(pred)) {
564 * Checks if irn is a candidate for address calculation or address mode.
566 * address calculation (AC):
567 * - none of the operand must be a Load within the same block OR
568 * - all Loads must have more than one user OR
569 * - the irn has a frame entity (it's a former FrameAddr)
572 * - at least one operand has to be a Load within the same block AND
573 * - the load must not have other users than the irn AND
574 * - the irn must not have a frame entity set
576 * @param block The block the Loads must/not be in
577 * @param irn The irn to check
578 * @param check_addr 1 if to check for address calculation, 0 otherwise
579 * return 1 if irn is a candidate for AC or AM, 0 otherwise
581 static int is_candidate(const ir_node *block, const ir_node *irn, int check_addr) {
583 int n, is_cand = check_addr;
585 in = get_irn_n(irn, 2);
587 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
588 n = ia32_get_irn_n_edges(in);
589 is_cand = check_addr ? (n == 1 ? 0 : is_cand) : (n == 1 ? 1 : is_cand);
592 in = get_irn_n(irn, 3);
594 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
595 n = ia32_get_irn_n_edges(in);
596 is_cand = check_addr ? (n == 1 ? 0 : is_cand) : (n == 1 ? 1 : is_cand);
599 is_cand = get_ia32_frame_ent(irn) ? (check_addr ? 1 : 0) : is_cand;
605 * Compares the base and index addr and the load/store entities
606 * and returns 1 if they are equal.
608 static int load_store_addr_is_equal(const ir_node *load, const ir_node *store,
609 const ir_node *addr_b, const ir_node *addr_i)
611 int is_equal = (addr_b == get_irn_n(load, 0)) && (addr_i == get_irn_n(load, 1));
612 entity *lent = get_ia32_frame_ent(load);
613 entity *sent = get_ia32_frame_ent(store);
614 ident *lid = get_ia32_am_sc(load);
615 ident *sid = get_ia32_am_sc(store);
616 char *loffs = get_ia32_am_offs(load);
617 char *soffs = get_ia32_am_offs(store);
619 /* are both entities set and equal? */
620 if (is_equal && (lent || sent))
621 is_equal = lent && sent && (lent == sent);
623 /* are address mode idents set and equal? */
624 if (is_equal && (lid || sid))
625 is_equal = lid && sid && (lid == sid);
627 /* are offsets set and equal */
628 if (is_equal && (loffs || soffs))
629 is_equal = loffs && soffs && strcmp(loffs, soffs) == 0;
631 /* are the load and the store of the same mode? */
632 is_equal = is_equal ? get_ia32_ls_mode(load) == get_ia32_ls_mode(store) : 0;
640 * Folds Add or Sub to LEA if possible
642 static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) {
643 ir_graph *irg = get_irn_irg(irn);
644 dbg_info *dbg = get_irn_dbg_info(irn);
645 ir_node *block = get_nodes_block(irn);
648 const char *offs_cnst = NULL;
649 char *offs_lea = NULL;
656 ir_node *left, *right, *temp;
657 ir_node *base, *index;
658 ia32_am_flavour_t am_flav;
659 DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;)
661 if (is_ia32_Add(irn))
664 left = get_irn_n(irn, 2);
665 right = get_irn_n(irn, 3);
667 /* "normalize" arguments in case of add with two operands */
668 if (isadd && ! be_is_NoReg(cg, right)) {
669 /* put LEA == ia32_am_O as right operand */
670 if (is_ia32_Lea(left) && get_ia32_am_flavour(left) == ia32_am_O) {
671 set_irn_n(irn, 2, right);
672 set_irn_n(irn, 3, left);
678 /* put LEA != ia32_am_O as left operand */
679 if (is_ia32_Lea(right) && get_ia32_am_flavour(right) != ia32_am_O) {
680 set_irn_n(irn, 2, right);
681 set_irn_n(irn, 3, left);
687 /* put SHL as left operand iff left is NOT a LEA */
688 if (! is_ia32_Lea(left) && pred_is_specific_node(right, is_ia32_Shl)) {
689 set_irn_n(irn, 2, right);
690 set_irn_n(irn, 3, left);
703 /* check for operation with immediate */
704 if (is_ia32_ImmConst(irn)) {
705 DBG((mod, LEVEL_1, "\tfound op with imm const"));
707 offs_cnst = get_ia32_cnst(irn);
710 else if (is_ia32_ImmSymConst(irn)) {
711 DBG((mod, LEVEL_1, "\tfound op with imm symconst"));
715 am_sc = get_ia32_id_cnst(irn);
716 am_sc_sign = is_ia32_am_sc_sign(irn);
719 /* determine the operand which needs to be checked */
720 if (be_is_NoReg(cg, right)) {
727 /* check if right operand is AMConst (LEA with ia32_am_O) */
728 /* but we can only eat it up if there is no other symconst */
729 /* because the linker won't accept two symconsts */
730 if (! have_am_sc && is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
731 DBG((mod, LEVEL_1, "\tgot op with LEA am_O"));
733 offs_lea = get_ia32_am_offs(temp);
734 am_sc = get_ia32_am_sc(temp);
735 am_sc_sign = is_ia32_am_sc_sign(temp);
741 /* default for add -> make right operand to index */
745 DBG((mod, LEVEL_1, "\tgot LEA candidate with index %+F\n", index));
747 /* determine the operand which needs to be checked */
749 if (is_ia32_Lea(left)) {
753 /* check for SHL 1,2,3 */
754 if (pred_is_specific_node(temp, is_ia32_Shl)) {
755 temp = get_Proj_pred(temp);
757 if (get_ia32_Immop_tarval(temp)) {
758 scale = get_tarval_long(get_ia32_Immop_tarval(temp));
761 index = get_irn_n(temp, 2);
763 DBG((mod, LEVEL_1, "\tgot scaled index %+F\n", index));
769 if (! be_is_NoReg(cg, index)) {
770 /* if we have index, but left == right -> no base */
774 else if (! is_ia32_Lea(left) && (index != right)) {
775 /* index != right -> we found a good Shl */
776 /* left != LEA -> this Shl was the left operand */
777 /* -> base is right operand */
783 /* Try to assimilate a LEA as left operand */
784 if (is_ia32_Lea(left) && (get_ia32_am_flavour(left) != ia32_am_O)) {
785 ir_node *assim_lea_idx, *assim_lea_base;
787 am_flav = get_ia32_am_flavour(left);
788 assim_lea_base = get_irn_n(left, 0);
789 assim_lea_idx = get_irn_n(left, 1);
792 /* If we have an Add with a real right operand (not NoReg) and */
793 /* the LEA contains already an index calculation then we create */
795 /* If the LEA contains already a frame_entity then we also */
796 /* create a new one otherwise we would loose it. */
797 if ((isadd && ! be_is_NoReg(cg, index) && (am_flav & ia32_I)) || /* no new LEA if index already set */
798 get_ia32_frame_ent(left) || /* no new LEA if stack access */
799 (have_am_sc && get_ia32_am_sc(left)) || /* no new LEA if AM symconst already present */
800 /* at least on of the LEA operands must be NOREG */
801 (!be_is_NoReg(cg, assim_lea_base) && !be_is_NoReg(cg, assim_lea_idx)))
803 DBG((mod, LEVEL_1, "\tleave old LEA, creating new one\n"));
806 DBG((mod, LEVEL_1, "\tgot LEA as left operand ... assimilating\n"));
807 offs = get_ia32_am_offs(left);
808 am_sc = have_am_sc ? am_sc : get_ia32_am_sc(left);
809 have_am_sc = am_sc ? 1 : 0;
810 am_sc_sign = is_ia32_am_sc_sign(left);
811 scale = get_ia32_am_scale(left);
813 if (be_is_NoReg(cg, assim_lea_base) && ! be_is_NoReg(cg, assim_lea_idx)) {
814 /* assimilate index */
815 assert(be_is_NoReg(cg, index) && ! be_is_NoReg(cg, base) && "operand mismatch for LEA assimilation");
816 index = assim_lea_idx;
818 else if (! be_is_NoReg(cg, assim_lea_base) && be_is_NoReg(cg, assim_lea_idx)) {
819 /* assimilate base */
820 assert(! be_is_NoReg(cg, index) && (base == left) && "operand mismatch for LEA assimilation");
821 base = assim_lea_base;
826 /* ok, we can create a new LEA */
828 res = new_rd_ia32_Lea(dbg, irg, block, base, index, mode_Is);
830 /* add the old offset of a previous LEA */
832 add_ia32_am_offs(res, offs);
835 /* add the new offset */
838 add_ia32_am_offs(res, offs_cnst);
841 add_ia32_am_offs(res, offs_lea);
845 /* either lea_O-cnst, -cnst or -lea_O */
848 add_ia32_am_offs(res, offs_lea);
851 sub_ia32_am_offs(res, offs_cnst);
854 sub_ia32_am_offs(res, offs_lea);
858 /* set the address mode symconst */
860 set_ia32_am_sc(res, am_sc);
862 set_ia32_am_sc_sign(res);
865 /* copy the frame entity (could be set in case of Add */
866 /* which was a FrameAddr) */
867 set_ia32_frame_ent(res, get_ia32_frame_ent(irn));
869 if (is_ia32_use_frame(irn))
870 set_ia32_use_frame(res);
873 set_ia32_am_scale(res, scale);
876 /* determine new am flavour */
877 if (offs || offs_cnst || offs_lea) {
880 if (! be_is_NoReg(cg, base)) {
883 if (! be_is_NoReg(cg, index)) {
889 set_ia32_am_flavour(res, am_flav);
891 set_ia32_op_type(res, ia32_AddrModeS);
893 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
895 DBG((mod, LEVEL_1, "\tLEA [%+F + %+F * %d + %s]\n", base, index, scale, get_ia32_am_offs(res)));
897 /* get the result Proj of the Add/Sub */
898 irn = get_res_proj(irn);
900 assert(irn && "Couldn't find result proj");
902 /* exchange the old op with the new LEA */
910 * Optimizes a pattern around irn to address mode if possible.
912 void ia32_optimize_am(ir_node *irn, void *env) {
913 ia32_code_gen_t *cg = env;
917 ir_node *block, *noreg_gp, *noreg_fp;
918 ir_node *left, *right, *temp;
919 ir_node *store, *load, *mem_proj;
920 ir_node *succ, *addr_b, *addr_i;
921 int check_am_src = 0;
922 DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;)
924 if (! is_ia32_irn(irn))
927 dbg = get_irn_dbg_info(irn);
928 mode = get_irn_mode(irn);
929 block = get_nodes_block(irn);
930 noreg_gp = ia32_new_NoReg_gp(cg);
931 noreg_fp = ia32_new_NoReg_fp(cg);
933 DBG((mod, LEVEL_1, "checking for AM\n"));
935 /* 1st part: check for address calculations and transform the into Lea */
937 /* Following cases can occur: */
938 /* - Sub (l, imm) -> LEA [base - offset] */
939 /* - Sub (l, r == LEA with ia32_am_O) -> LEA [base - offset] */
940 /* - Add (l, imm) -> LEA [base + offset] */
941 /* - Add (l, r == LEA with ia32_am_O) -> LEA [base + offset] */
942 /* - Add (l == LEA with ia32_am_O, r) -> LEA [base + offset] */
943 /* - Add (l, r) -> LEA [base + index * scale] */
944 /* with scale > 1 iff l/r == shl (1,2,3) */
946 if (is_ia32_Sub(irn) || is_ia32_Add(irn)) {
947 left = get_irn_n(irn, 2);
948 right = get_irn_n(irn, 3);
950 /* Do not try to create a LEA if one of the operands is a Load. */
951 /* check is irn is a candidate for address calculation */
952 if (is_candidate(block, irn, 1)) {
953 DBG((mod, LEVEL_1, "\tfound address calculation candidate %+F ... ", irn));
954 res = fold_addr(cg, irn, noreg_gp);
957 DB((mod, LEVEL_1, "transformed into %+F\n", res));
959 DB((mod, LEVEL_1, "not transformed\n"));
963 /* 2nd part: fold following patterns: */
964 /* - Load -> LEA into Load } TODO: If the LEA is used by more than one Load/Store */
965 /* - Store -> LEA into Store } it might be better to keep the LEA */
966 /* - op -> Load into AMop with am_Source */
968 /* - op is am_Source capable AND */
969 /* - the Load is only used by this op AND */
970 /* - the Load is in the same block */
971 /* - Store -> op -> Load into AMop with am_Dest */
973 /* - op is am_Dest capable AND */
974 /* - the Store uses the same address as the Load AND */
975 /* - the Load is only used by this op AND */
976 /* - the Load and Store are in the same block AND */
977 /* - nobody else uses the result of the op */
979 if ((res == irn) && (get_ia32_am_support(irn) != ia32_am_None) && !is_ia32_Lea(irn)) {
980 /* 1st: check for Load/Store -> LEA */
981 if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn)) {
982 left = get_irn_n(irn, 0);
984 if (is_ia32_Lea(left)) {
985 DBG((mod, LEVEL_1, "\nmerging %+F into %+F\n", left, irn));
987 /* get the AM attributes from the LEA */
988 add_ia32_am_offs(irn, get_ia32_am_offs(left));
989 set_ia32_am_scale(irn, get_ia32_am_scale(left));
990 set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
992 set_ia32_am_sc(irn, get_ia32_am_sc(left));
993 if (is_ia32_am_sc_sign(left))
994 set_ia32_am_sc_sign(irn);
996 set_ia32_op_type(irn, is_ia32_Ld(irn) ? ia32_AddrModeS : ia32_AddrModeD);
998 /* set base and index */
999 set_irn_n(irn, 0, get_irn_n(left, 0));
1000 set_irn_n(irn, 1, get_irn_n(left, 1));
1002 /* clear remat flag */
1003 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1006 /* check if the node is an address mode candidate */
1007 else if (is_candidate(block, irn, 0)) {
1008 DBG((mod, LEVEL_1, "\tfound address mode candidate %+F ... ", irn));
1010 left = get_irn_n(irn, 2);
1011 if (get_irn_arity(irn) == 4) {
1012 /* it's an "unary" operation */
1016 right = get_irn_n(irn, 3);
1019 /* normalize commutative ops */
1020 if (node_is_ia32_comm(irn)) {
1021 /* Assure that right operand is always a Load if there is one */
1022 /* because non-commutative ops can only use Dest AM if the right */
1023 /* operand is a load, so we only need to check right operand. */
1024 if (pred_is_specific_nodeblock(block, left, is_ia32_Ld))
1026 set_irn_n(irn, 2, right);
1027 set_irn_n(irn, 3, left);
1035 /* check for Store -> op -> Load */
1037 /* Store -> op -> Load optimization is only possible if supported by op */
1038 /* and if right operand is a Load */
1039 if ((get_ia32_am_support(irn) & ia32_am_Dest) &&
1040 pred_is_specific_nodeblock(block, right, is_ia32_Ld))
1043 /* An address mode capable op always has a result Proj. */
1044 /* If this Proj is used by more than one other node, we don't need to */
1045 /* check further, otherwise we check for Store and remember the address, */
1046 /* the Store points to. */
1048 succ = get_res_proj(irn);
1049 assert(succ && "Couldn't find result proj");
1055 /* now check for users and Store */
1056 if (ia32_get_irn_n_edges(succ) == 1) {
1057 succ = get_edge_src_irn(get_irn_out_edge_first(succ));
1059 if (is_ia32_fStore(succ) || is_ia32_Store(succ)) {
1061 addr_b = get_irn_n(store, 0);
1063 /* Could be that the Store is connected to the address */
1064 /* calculating LEA while the Load is already transformed. */
1065 if (is_ia32_Lea(addr_b)) {
1067 addr_b = get_irn_n(succ, 0);
1068 addr_i = get_irn_n(succ, 1);
1077 /* we found a Store as single user: Now check for Load */
1079 /* Extra check for commutative ops with two Loads */
1080 /* -> put the interesting Load right */
1081 if (node_is_ia32_comm(irn) &&
1082 pred_is_specific_nodeblock(block, left, is_ia32_Ld))
1084 if ((addr_b == get_irn_n(get_Proj_pred(left), 0)) &&
1085 (addr_i == get_irn_n(get_Proj_pred(left), 1)))
1087 /* We exchange left and right, so it's easier to kill */
1088 /* the correct Load later and to handle unary operations. */
1089 set_irn_n(irn, 2, right);
1090 set_irn_n(irn, 3, left);
1098 /* skip the Proj for easier access */
1099 load = get_Proj_pred(right);
1101 /* Compare Load and Store address */
1102 if (load_store_addr_is_equal(load, store, addr_b, addr_i)) {
1103 /* Right Load is from same address, so we can */
1104 /* disconnect the Load and Store here */
1106 /* set new base, index and attributes */
1107 set_irn_n(irn, 0, addr_b);
1108 set_irn_n(irn, 1, addr_i);
1109 add_ia32_am_offs(irn, get_ia32_am_offs(load));
1110 set_ia32_am_scale(irn, get_ia32_am_scale(load));
1111 set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
1112 set_ia32_op_type(irn, ia32_AddrModeD);
1113 set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
1114 set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
1116 set_ia32_am_sc(irn, get_ia32_am_sc(load));
1117 if (is_ia32_am_sc_sign(load))
1118 set_ia32_am_sc_sign(irn);
1120 if (is_ia32_use_frame(load))
1121 set_ia32_use_frame(irn);
1123 /* connect to Load memory and disconnect Load */
1124 if (get_irn_arity(irn) == 5) {
1126 set_irn_n(irn, 4, get_irn_n(load, 2));
1127 set_irn_n(irn, 3, noreg_gp);
1131 set_irn_n(irn, 3, get_irn_n(load, 2));
1132 set_irn_n(irn, 2, noreg_gp);
1135 /* connect the memory Proj of the Store to the op */
1136 mem_proj = get_mem_proj(store);
1137 set_Proj_pred(mem_proj, irn);
1138 set_Proj_proj(mem_proj, 1);
1140 /* clear remat flag */
1141 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1143 DB((mod, LEVEL_1, "merged with %+F and %+F into dest AM\n", load, store));
1146 else if (get_ia32_am_support(irn) & ia32_am_Source) {
1147 /* There was no store, check if we still can optimize for source address mode */
1150 } /* if (support AM Dest) */
1151 else if (get_ia32_am_support(irn) & ia32_am_Source) {
1152 /* op doesn't support am AM Dest -> check for AM Source */
1156 /* normalize commutative ops */
1157 if (node_is_ia32_comm(irn)) {
1158 /* Assure that left operand is always a Load if there is one */
1159 /* because non-commutative ops can only use Source AM if the */
1160 /* left operand is a Load, so we only need to check the left */
1161 /* operand afterwards. */
1162 if (pred_is_specific_nodeblock(block, right, is_ia32_Ld)) {
1163 set_irn_n(irn, 2, right);
1164 set_irn_n(irn, 3, left);
1172 /* optimize op -> Load iff Load is only used by this op */
1173 /* and left operand is a Load which only used by this irn */
1175 pred_is_specific_nodeblock(block, left, is_ia32_Ld) &&
1176 (ia32_get_irn_n_edges(left) == 1))
1178 left = get_Proj_pred(left);
1180 addr_b = get_irn_n(left, 0);
1181 addr_i = get_irn_n(left, 1);
1183 /* set new base, index and attributes */
1184 set_irn_n(irn, 0, addr_b);
1185 set_irn_n(irn, 1, addr_i);
1186 add_ia32_am_offs(irn, get_ia32_am_offs(left));
1187 set_ia32_am_scale(irn, get_ia32_am_scale(left));
1188 set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
1189 set_ia32_op_type(irn, ia32_AddrModeS);
1190 set_ia32_frame_ent(irn, get_ia32_frame_ent(left));
1191 set_ia32_ls_mode(irn, get_ia32_ls_mode(left));
1193 set_ia32_am_sc(irn, get_ia32_am_sc(left));
1194 if (is_ia32_am_sc_sign(left))
1195 set_ia32_am_sc_sign(irn);
1197 /* clear remat flag */
1198 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1200 if (is_ia32_use_frame(left))
1201 set_ia32_use_frame(irn);
1203 /* connect to Load memory */
1204 if (get_irn_arity(irn) == 5) {
1206 set_irn_n(irn, 4, get_irn_n(left, 2));
1210 set_irn_n(irn, 3, get_irn_n(left, 2));
1213 /* disconnect from Load */
1214 set_irn_n(irn, 2, noreg_gp);
1216 /* If Load has a memory Proj, connect it to the op */
1217 mem_proj = get_mem_proj(left);
1219 set_Proj_pred(mem_proj, irn);
1220 set_Proj_proj(mem_proj, 1);
1223 DB((mod, LEVEL_1, "merged with %+F into source AM\n", left));