8 #include "firm_types.h"
15 #include "../benode_t.h"
16 #include "../besched_t.h"
18 #include "ia32_new_nodes.h"
19 #include "bearch_ia32_t.h"
20 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
21 #include "ia32_transform.h"
26 #include "dbginfo_t.h"
30 * Merge the debug info due to a LEA creation.
32 * @param oldn the node
33 * @param n the new constant holding the value
35 #define DBG_OPT_LEA(oldn, n) \
37 hook_merge_nodes(&n, 1, &oldn, 1, FS_BE_IA32_LEA); \
38 __dbg_info_merge_pair(n, oldn, dbg_backend); \
43 #define is_NoMem(irn) (get_irn_op(irn) == op_NoMem)
45 typedef int is_op_func_t(const ir_node *n);
48 * checks if a node represents the NOREG value
50 static int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) {
51 be_abi_irg_t *babi = cg->birg->abi;
52 const arch_register_t *fp_noreg = USE_SSE2(cg) ?
53 &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG];
55 return (be_abi_get_callee_save_irn(babi, &ia32_gp_regs[REG_GP_NOREG]) == irn) ||
56 (be_abi_get_callee_save_irn(babi, fp_noreg) == irn);
61 /*************************************************
64 * | | ___ _ __ ___| |_ __ _ _ __ | |_ ___
65 * | | / _ \| '_ \/ __| __/ _` | '_ \| __/ __|
66 * | |___| (_) | | | \__ \ || (_| | | | | |_\__ \
67 * \_____\___/|_| |_|___/\__\__,_|_| |_|\__|___/
69 *************************************************/
72 * creates a unique ident by adding a number to a tag
74 * @param tag the tag string, must contain a %d if a number
77 static ident *unique_id(const char *tag)
79 static unsigned id = 0;
82 snprintf(str, sizeof(str), tag, ++id);
83 return new_id_from_str(str);
89 * Transforms a SymConst.
91 * @param mod the debug module
92 * @param block the block the new node should belong to
93 * @param node the ir SymConst node
94 * @param mode mode of the SymConst
95 * @return the created ia32 Const node
97 static ir_node *gen_SymConst(ia32_transform_env_t *env) {
99 dbg_info *dbg = env->dbg;
100 ir_mode *mode = env->mode;
101 ir_graph *irg = env->irg;
102 ir_node *block = env->block;
104 if (mode_is_float(mode)) {
105 if (USE_SSE2(env->cg))
106 cnst = new_rd_ia32_fConst(dbg, irg, block, mode);
108 cnst = new_rd_ia32_vfConst(dbg, irg, block, mode);
111 cnst = new_rd_ia32_Const(dbg, irg, block, mode);
113 set_ia32_Const_attr(cnst, env->irn);
118 * Get a primitive type for a mode.
120 static ir_type *get_prim_type(pmap *types, ir_mode *mode)
122 pmap_entry *e = pmap_find(types, mode);
127 snprintf(buf, sizeof(buf), "prim_type_%s", get_mode_name(mode));
128 res = new_type_primitive(new_id_from_str(buf), mode);
129 pmap_insert(types, mode, res);
137 * Get an entity that is initialized with a tarval
139 static entity *get_entity_for_tv(ia32_code_gen_t *cg, ir_node *cnst)
141 tarval *tv = get_Const_tarval(cnst);
142 pmap_entry *e = pmap_find(cg->isa->tv_ent, tv);
147 ir_mode *mode = get_irn_mode(cnst);
148 ir_type *tp = get_Const_type(cnst);
149 if (tp == firm_unknown_type)
150 tp = get_prim_type(cg->isa->types, mode);
152 res = new_entity(get_glob_type(), unique_id("ia32FloatCnst_%u"), tp);
154 set_entity_ld_ident(res, get_entity_ident(res));
155 set_entity_visibility(res, visibility_local);
156 set_entity_variability(res, variability_constant);
157 set_entity_allocation(res, allocation_static);
159 /* we create a new entity here: It's initialization must resist on the
161 rem = current_ir_graph;
162 current_ir_graph = get_const_code_irg();
163 set_atomic_ent_value(res, new_Const_type(tv, tp));
164 current_ir_graph = rem;
166 pmap_insert(cg->isa->tv_ent, tv, res);
174 * Transforms a Const.
176 * @param mod the debug module
177 * @param block the block the new node should belong to
178 * @param node the ir Const node
179 * @param mode mode of the Const
180 * @return the created ia32 Const node
182 static ir_node *gen_Const(ia32_transform_env_t *env) {
185 ir_graph *irg = env->irg;
186 ir_node *block = env->block;
187 ir_node *node = env->irn;
188 dbg_info *dbg = env->dbg;
189 ir_mode *mode = env->mode;
191 if (mode_is_float(mode)) {
193 if (! USE_SSE2(env->cg)) {
194 cnst_classify_t clss = classify_Const(node);
196 if (clss == CNST_NULL)
197 return new_rd_ia32_vfldz(dbg, irg, block, mode);
198 else if (clss == CNST_ONE)
199 return new_rd_ia32_vfld1(dbg, irg, block, mode);
201 sym.entity_p = get_entity_for_tv(env->cg, node);
203 cnst = new_rd_SymConst(dbg, irg, block, sym, symconst_addr_ent);
205 cnst = gen_SymConst(env);
208 cnst = new_rd_ia32_Const(dbg, irg, block, get_irn_mode(node));
209 set_ia32_Const_attr(cnst, node);
217 * Transforms (all) Const's into ia32_Const and places them in the
218 * block where they are used (or in the cfg-pred Block in case of Phi's).
219 * Additionally all reference nodes are changed into mode_Is nodes.
221 void ia32_place_consts_set_modes(ir_node *irn, void *env) {
222 ia32_code_gen_t *cg = env;
223 ia32_transform_env_t tenv;
225 ir_node *pred, *cnst;
232 mode = get_irn_mode(irn);
234 /* transform all reference nodes into mode_Is nodes */
235 if (mode_is_reference(mode)) {
237 set_irn_mode(irn, mode);
240 tenv.block = get_nodes_block(irn);
243 DEBUG_ONLY(tenv.mod = cg->mod;)
245 /* Loop over all predecessors and check for Sym/Const nodes */
246 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
247 pred = get_irn_n(irn, i);
249 opc = get_irn_opcode(pred);
251 tenv.mode = get_irn_mode(pred);
252 tenv.dbg = get_irn_dbg_info(pred);
254 /* If it's a Phi, then we need to create the */
255 /* new Const in it's predecessor block */
257 tenv.block = get_Block_cfgpred_block(get_nodes_block(irn), i);
260 /* put the const into the block where the original const was */
261 if (! cg->opt.placecnst) {
262 tenv.block = get_nodes_block(pred);
267 cnst = gen_Const(&tenv);
270 cnst = gen_SymConst(&tenv);
276 /* if we found a const, then set it */
278 set_irn_n(irn, i, cnst);
285 /********************************************************************************************************
286 * _____ _ _ ____ _ _ _ _ _
287 * | __ \ | | | | / __ \ | | (_) (_) | | (_)
288 * | |__) |__ ___ _ __ | |__ ___ | | ___ | | | |_ __ | |_ _ _ __ ___ _ ______ _| |_ _ ___ _ __
289 * | ___/ _ \/ _ \ '_ \| '_ \ / _ \| |/ _ \ | | | | '_ \| __| | '_ ` _ \| |_ / _` | __| |/ _ \| '_ \
290 * | | | __/ __/ |_) | | | | (_) | | __/ | |__| | |_) | |_| | | | | | | |/ / (_| | |_| | (_) | | | |
291 * |_| \___|\___| .__/|_| |_|\___/|_|\___| \____/| .__/ \__|_|_| |_| |_|_/___\__,_|\__|_|\___/|_| |_|
294 ********************************************************************************************************/
297 * NOTE: THESE PEEPHOLE OPTIMIZATIONS MUST BE CALLED AFTER SCHEDULING AND REGISTER ALLOCATION.
300 static int ia32_cnst_compare(ir_node *n1, ir_node *n2) {
301 return get_ia32_id_cnst(n1) == get_ia32_id_cnst(n2);
305 * Checks for potential CJmp/CJmpAM optimization candidates.
307 static ir_node *ia32_determine_cjmp_cand(ir_node *irn, is_op_func_t *is_op_func) {
308 ir_node *cand = NULL;
309 ir_node *prev = sched_prev(irn);
311 if (is_Block(prev)) {
312 if (get_Block_n_cfgpreds(prev) == 1)
313 prev = get_Block_cfgpred(prev, 0);
318 /* The predecessor must be a ProjX. */
319 if (prev && is_Proj(prev) && get_irn_mode(prev) == mode_X) {
320 prev = get_Proj_pred(prev);
322 if (is_op_func(prev))
329 static int is_TestJmp_cand(const ir_node *irn) {
330 return is_ia32_TestJmp(irn) || is_ia32_And(irn);
334 * Checks if two consecutive arguments of cand matches
335 * the two arguments of irn (TestJmp).
337 static int is_TestJmp_replacement(ir_node *cand, ir_node *irn) {
338 ir_node *in1 = get_irn_n(irn, 0);
339 ir_node *in2 = get_irn_n(irn, 1);
340 int i, n = get_irn_arity(cand);
343 for (i = 0; i < n - 1; i++) {
344 if (get_irn_n(cand, i) == in1 &&
345 get_irn_n(cand, i + 1) == in2)
353 return ia32_cnst_compare(cand, irn);
359 * Tries to replace a TestJmp by a CJmp or CJmpAM (in case of And)
361 static void ia32_optimize_TestJmp(ir_node *irn, ia32_code_gen_t *cg) {
362 ir_node *cand = ia32_determine_cjmp_cand(irn, is_TestJmp_cand);
365 /* we found a possible candidate */
366 replace = cand ? is_TestJmp_replacement(cand, irn) : 0;
369 DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn));
371 if (is_ia32_And(cand))
372 set_irn_op(irn, op_ia32_CJmpAM);
374 set_irn_op(irn, op_ia32_CJmp);
376 DB((cg->mod, LEVEL_1, "%+F\n", irn));
380 static int is_CondJmp_cand(const ir_node *irn) {
381 return is_ia32_CondJmp(irn) || is_ia32_Sub(irn);
385 * Checks if the arguments of cand are the same of irn.
387 static int is_CondJmp_replacement(ir_node *cand, ir_node *irn) {
388 int i, n = get_irn_arity(cand);
391 for (i = 0; i < n; i++) {
392 if (get_irn_n(cand, i) == get_irn_n(irn, i)) {
399 return ia32_cnst_compare(cand, irn);
405 * Tries to replace a CondJmp by a CJmpAM
407 static void ia32_optimize_CondJmp(ir_node *irn, ia32_code_gen_t *cg) {
408 ir_node *cand = ia32_determine_cjmp_cand(irn, is_CondJmp_cand);
411 /* we found a possible candidate */
412 replace = cand ? is_CondJmp_replacement(cand, irn) : 0;
415 DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn));
417 set_irn_op(irn, op_ia32_CJmp);
419 DB((cg->mod, LEVEL_1, "%+F\n", irn));
424 * Creates a Push from Store(IncSP(gp_reg_size))
426 static void ia32_create_Push(ir_node *irn, ia32_code_gen_t *cg) {
427 ir_node *sp = get_irn_n(irn, 0);
428 ir_node *val, *next, *push, *bl, *proj_M, *proj_res, *old_proj_M;
429 const ir_edge_t *edge;
431 if (get_ia32_am_offs(irn) || !be_is_IncSP(sp))
434 if (arch_get_irn_register(cg->arch_env, get_irn_n(irn, 1)) !=
435 &ia32_gp_regs[REG_GP_NOREG])
438 val = get_irn_n(irn, 2);
439 if (mode_is_float(get_irn_mode(val)))
442 if (be_get_IncSP_direction(sp) != be_stack_dir_expand ||
443 be_get_IncSP_offset(sp) != get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode))
446 /* ok, translate into Push */
447 edge = get_irn_out_edge_first(irn);
448 old_proj_M = get_edge_src_irn(edge);
450 next = sched_next(irn);
454 bl = get_nodes_block(irn);
455 push = new_rd_ia32_Push(NULL, current_ir_graph, bl,
456 be_get_IncSP_pred(sp), val, be_get_IncSP_mem(sp), mode_T);
457 proj_res = new_r_Proj(current_ir_graph, bl, push, get_irn_mode(sp), 0);
458 proj_M = new_r_Proj(current_ir_graph, bl, push, mode_M, 1);
460 /* the push must have SP out register */
461 arch_set_irn_register(cg->arch_env, push, arch_get_irn_register(cg->arch_env, sp));
463 exchange(old_proj_M, proj_M);
464 exchange(sp, proj_res);
465 sched_add_before(next, push);
466 sched_add_after(push, proj_res);
470 * Creates a Pop from IncSP(Load(sp))
472 static void ia32_create_Pop(ir_node *irn, ia32_code_gen_t *cg) {
473 ir_node *old_proj_M = be_get_IncSP_mem(irn);
474 ir_node *load = skip_Proj(old_proj_M);
475 ir_node *old_proj_res = NULL;
476 ir_node *bl, *pop, *next, *proj_res, *proj_sp, *proj_M;
477 const ir_edge_t *edge;
478 const arch_register_t *reg, *sp;
480 if (! is_ia32_Load(load) || get_ia32_am_offs(load))
483 if (arch_get_irn_register(cg->arch_env, get_irn_n(load, 1)) !=
484 &ia32_gp_regs[REG_GP_NOREG])
486 if (arch_get_irn_register(cg->arch_env, get_irn_n(load, 0)) != cg->isa->sp)
489 /* ok, translate into pop */
490 foreach_out_edge(load, edge) {
491 ir_node *succ = get_edge_src_irn(edge);
492 if (succ != old_proj_M) {
497 if (! old_proj_res) {
499 return; /* should not happen */
502 bl = get_nodes_block(load);
504 /* IncSP is typically scheduled after the load, so remove it first */
506 next = sched_next(old_proj_res);
507 sched_remove(old_proj_res);
510 reg = arch_get_irn_register(cg->arch_env, load);
511 sp = arch_get_irn_register(cg->arch_env, irn);
513 pop = new_rd_ia32_Pop(NULL, current_ir_graph, bl, get_irn_n(irn, 0), get_irn_n(load, 2), mode_T);
514 proj_res = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(old_proj_res), 0);
515 proj_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(irn), 1);
516 proj_M = new_r_Proj(current_ir_graph, bl, pop, mode_M, 2);
518 exchange(old_proj_M, proj_M);
519 exchange(old_proj_res, proj_res);
520 exchange(irn, proj_sp);
522 arch_set_irn_register(cg->arch_env, proj_res, reg);
523 arch_set_irn_register(cg->arch_env, proj_sp, sp);
525 sched_add_before(next, proj_sp);
526 sched_add_before(proj_sp, proj_res);
527 sched_add_before(proj_res,pop);
531 * Tries to optimize two following IncSP.
533 static void ia32_optimize_IncSP(ir_node *irn, ia32_code_gen_t *cg) {
534 ir_node *prev = be_get_IncSP_pred(irn);
535 int real_uses = get_irn_n_edges(prev);
537 if (be_is_IncSP(prev) && real_uses == 1) {
538 /* first IncSP has only one IncSP user, kill the first one */
539 unsigned prev_offs = be_get_IncSP_offset(prev);
540 be_stack_dir_t prev_dir = be_get_IncSP_direction(prev);
541 unsigned curr_offs = be_get_IncSP_offset(irn);
542 be_stack_dir_t curr_dir = be_get_IncSP_direction(irn);
544 int new_ofs = prev_offs * (prev_dir == be_stack_dir_expand ? -1 : +1) +
545 curr_offs * (curr_dir == be_stack_dir_expand ? -1 : +1);
549 curr_dir = be_stack_dir_expand;
552 curr_dir = be_stack_dir_shrink;
553 be_set_IncSP_offset(prev, 0);
554 be_set_IncSP_offset(irn, (unsigned)new_ofs);
555 be_set_IncSP_direction(irn, curr_dir);
558 ia32_create_Pop(irn, cg);
562 * Performs Peephole Optimizations.
564 void ia32_peephole_optimization(ir_node *irn, void *env) {
565 ia32_code_gen_t *cg = env;
567 if (is_ia32_TestJmp(irn))
568 ia32_optimize_TestJmp(irn, cg);
569 else if (is_ia32_CondJmp(irn))
570 ia32_optimize_CondJmp(irn, cg);
571 else if (be_is_IncSP(irn))
572 ia32_optimize_IncSP(irn, cg);
573 else if (is_ia32_Store(irn))
574 ia32_create_Push(irn, cg);
579 /******************************************************************
581 * /\ | | | | | \/ | | |
582 * / \ __| | __| |_ __ ___ ___ ___| \ / | ___ __| | ___
583 * / /\ \ / _` |/ _` | '__/ _ \/ __/ __| |\/| |/ _ \ / _` |/ _ \
584 * / ____ \ (_| | (_| | | | __/\__ \__ \ | | | (_) | (_| | __/
585 * /_/ \_\__,_|\__,_|_| \___||___/___/_| |_|\___/ \__,_|\___|
587 ******************************************************************/
589 static int node_is_ia32_comm(const ir_node *irn) {
590 return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0;
593 static int ia32_get_irn_n_edges(const ir_node *irn) {
594 const ir_edge_t *edge;
597 foreach_out_edge(irn, edge) {
605 * Returns the first mode_M Proj connected to irn.
607 static ir_node *get_mem_proj(const ir_node *irn) {
608 const ir_edge_t *edge;
611 assert(get_irn_mode(irn) == mode_T && "expected mode_T node");
613 foreach_out_edge(irn, edge) {
614 src = get_edge_src_irn(edge);
616 assert(is_Proj(src) && "Proj expected");
618 if (get_irn_mode(src) == mode_M)
626 * Returns the first Proj with mode != mode_M connected to irn.
628 static ir_node *get_res_proj(const ir_node *irn) {
629 const ir_edge_t *edge;
632 assert(get_irn_mode(irn) == mode_T && "expected mode_T node");
634 foreach_out_edge(irn, edge) {
635 src = get_edge_src_irn(edge);
637 assert(is_Proj(src) && "Proj expected");
639 if (get_irn_mode(src) != mode_M)
647 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor.
649 * @param pred The node to be checked
650 * @param is_op_func The check-function
651 * @return 1 if conditions are fulfilled, 0 otherwise
653 static int pred_is_specific_node(const ir_node *pred, is_op_func_t *is_op_func) {
654 if (is_Proj(pred) && is_op_func(get_Proj_pred(pred))) {
662 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor
663 * and if the predecessor is in block bl.
665 * @param bl The block
666 * @param pred The node to be checked
667 * @param is_op_func The check-function
668 * @return 1 if conditions are fulfilled, 0 otherwise
670 static int pred_is_specific_nodeblock(const ir_node *bl, const ir_node *pred,
671 int (*is_op_func)(const ir_node *n))
674 pred = get_Proj_pred(pred);
675 if ((bl == get_nodes_block(pred)) && is_op_func(pred)) {
686 * Checks if irn is a candidate for address calculation or address mode.
688 * address calculation (AC):
689 * - none of the operand must be a Load within the same block OR
690 * - all Loads must have more than one user OR
691 * - the irn has a frame entity (it's a former FrameAddr)
694 * - at least one operand has to be a Load within the same block AND
695 * - the load must not have other users than the irn AND
696 * - the irn must not have a frame entity set
698 * @param block The block the Loads must/not be in
699 * @param irn The irn to check
700 * @param check_addr 1 if to check for address calculation, 0 otherwise
701 * return 1 if irn is a candidate for AC or AM, 0 otherwise
703 static int is_candidate(const ir_node *block, const ir_node *irn, int check_addr) {
705 int n, is_cand = check_addr;
707 in = get_irn_n(irn, 2);
709 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
710 n = ia32_get_irn_n_edges(in);
711 is_cand = check_addr ? (n == 1 ? 0 : is_cand) : (n == 1 ? 1 : is_cand);
714 in = get_irn_n(irn, 3);
716 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
717 n = ia32_get_irn_n_edges(in);
718 is_cand = check_addr ? (n == 1 ? 0 : is_cand) : (n == 1 ? 1 : is_cand);
721 is_cand = get_ia32_frame_ent(irn) ? (check_addr ? 1 : 0) : is_cand;
727 * Compares the base and index addr and the load/store entities
728 * and returns 1 if they are equal.
730 static int load_store_addr_is_equal(const ir_node *load, const ir_node *store,
731 const ir_node *addr_b, const ir_node *addr_i)
733 int is_equal = (addr_b == get_irn_n(load, 0)) && (addr_i == get_irn_n(load, 1));
734 entity *lent = get_ia32_frame_ent(load);
735 entity *sent = get_ia32_frame_ent(store);
736 ident *lid = get_ia32_am_sc(load);
737 ident *sid = get_ia32_am_sc(store);
738 char *loffs = get_ia32_am_offs(load);
739 char *soffs = get_ia32_am_offs(store);
741 /* are both entities set and equal? */
742 if (is_equal && (lent || sent))
743 is_equal = lent && sent && (lent == sent);
745 /* are address mode idents set and equal? */
746 if (is_equal && (lid || sid))
747 is_equal = lid && sid && (lid == sid);
749 /* are offsets set and equal */
750 if (is_equal && (loffs || soffs))
751 is_equal = loffs && soffs && strcmp(loffs, soffs) == 0;
753 /* are the load and the store of the same mode? */
754 is_equal = is_equal ? get_ia32_ls_mode(load) == get_ia32_ls_mode(store) : 0;
762 * Folds Add or Sub to LEA if possible
764 static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) {
765 ir_graph *irg = get_irn_irg(irn);
766 dbg_info *dbg = get_irn_dbg_info(irn);
767 ir_node *block = get_nodes_block(irn);
770 const char *offs_cnst = NULL;
771 char *offs_lea = NULL;
778 ir_node *left, *right, *temp;
779 ir_node *base, *index;
780 ia32_am_flavour_t am_flav;
781 DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;)
783 if (is_ia32_Add(irn))
786 left = get_irn_n(irn, 2);
787 right = get_irn_n(irn, 3);
789 /* "normalize" arguments in case of add with two operands */
790 if (isadd && ! be_is_NoReg(cg, right)) {
791 /* put LEA == ia32_am_O as right operand */
792 if (is_ia32_Lea(left) && get_ia32_am_flavour(left) == ia32_am_O) {
793 set_irn_n(irn, 2, right);
794 set_irn_n(irn, 3, left);
800 /* put LEA != ia32_am_O as left operand */
801 if (is_ia32_Lea(right) && get_ia32_am_flavour(right) != ia32_am_O) {
802 set_irn_n(irn, 2, right);
803 set_irn_n(irn, 3, left);
809 /* put SHL as left operand iff left is NOT a LEA */
810 if (! is_ia32_Lea(left) && pred_is_specific_node(right, is_ia32_Shl)) {
811 set_irn_n(irn, 2, right);
812 set_irn_n(irn, 3, left);
825 /* check for operation with immediate */
826 if (is_ia32_ImmConst(irn)) {
827 DBG((mod, LEVEL_1, "\tfound op with imm const"));
829 offs_cnst = get_ia32_cnst(irn);
832 else if (is_ia32_ImmSymConst(irn)) {
833 DBG((mod, LEVEL_1, "\tfound op with imm symconst"));
837 am_sc = get_ia32_id_cnst(irn);
838 am_sc_sign = is_ia32_am_sc_sign(irn);
841 /* determine the operand which needs to be checked */
842 if (be_is_NoReg(cg, right)) {
849 /* check if right operand is AMConst (LEA with ia32_am_O) */
850 /* but we can only eat it up if there is no other symconst */
851 /* because the linker won't accept two symconsts */
852 if (! have_am_sc && is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
853 DBG((mod, LEVEL_1, "\tgot op with LEA am_O"));
855 offs_lea = get_ia32_am_offs(temp);
856 am_sc = get_ia32_am_sc(temp);
857 am_sc_sign = is_ia32_am_sc_sign(temp);
863 /* default for add -> make right operand to index */
867 DBG((mod, LEVEL_1, "\tgot LEA candidate with index %+F\n", index));
869 /* determine the operand which needs to be checked */
871 if (is_ia32_Lea(left)) {
875 /* check for SHL 1,2,3 */
876 if (pred_is_specific_node(temp, is_ia32_Shl)) {
877 temp = get_Proj_pred(temp);
879 if (get_ia32_Immop_tarval(temp)) {
880 scale = get_tarval_long(get_ia32_Immop_tarval(temp));
883 index = get_irn_n(temp, 2);
885 DBG((mod, LEVEL_1, "\tgot scaled index %+F\n", index));
891 if (! be_is_NoReg(cg, index)) {
892 /* if we have index, but left == right -> no base */
896 else if (! is_ia32_Lea(left) && (index != right)) {
897 /* index != right -> we found a good Shl */
898 /* left != LEA -> this Shl was the left operand */
899 /* -> base is right operand */
905 /* Try to assimilate a LEA as left operand */
906 if (is_ia32_Lea(left) && (get_ia32_am_flavour(left) != ia32_am_O)) {
907 ir_node *assim_lea_idx, *assim_lea_base;
909 am_flav = get_ia32_am_flavour(left);
910 assim_lea_base = get_irn_n(left, 0);
911 assim_lea_idx = get_irn_n(left, 1);
914 /* If we have an Add with a real right operand (not NoReg) and */
915 /* the LEA contains already an index calculation then we create */
917 /* If the LEA contains already a frame_entity then we also */
918 /* create a new one otherwise we would loose it. */
919 if ((isadd && ! be_is_NoReg(cg, index) && (am_flav & ia32_I)) || /* no new LEA if index already set */
920 get_ia32_frame_ent(left) || /* no new LEA if stack access */
921 (have_am_sc && get_ia32_am_sc(left)) || /* no new LEA if AM symconst already present */
922 /* at least on of the LEA operands must be NOREG */
923 (!be_is_NoReg(cg, assim_lea_base) && !be_is_NoReg(cg, assim_lea_idx)))
925 DBG((mod, LEVEL_1, "\tleave old LEA, creating new one\n"));
928 DBG((mod, LEVEL_1, "\tgot LEA as left operand ... assimilating\n"));
929 offs = get_ia32_am_offs(left);
930 am_sc = have_am_sc ? am_sc : get_ia32_am_sc(left);
931 have_am_sc = am_sc ? 1 : 0;
932 am_sc_sign = is_ia32_am_sc_sign(left);
933 scale = get_ia32_am_scale(left);
935 if (be_is_NoReg(cg, assim_lea_base) && ! be_is_NoReg(cg, assim_lea_idx)) {
936 /* assimilate index */
937 assert(be_is_NoReg(cg, index) && ! be_is_NoReg(cg, base) && "operand mismatch for LEA assimilation");
938 index = assim_lea_idx;
940 else if (! be_is_NoReg(cg, assim_lea_base) && be_is_NoReg(cg, assim_lea_idx)) {
941 /* assimilate base */
942 assert(! be_is_NoReg(cg, index) && (base == left) && "operand mismatch for LEA assimilation");
943 base = assim_lea_base;
948 /* ok, we can create a new LEA */
950 res = new_rd_ia32_Lea(dbg, irg, block, base, index, mode_Is);
952 /* add the old offset of a previous LEA */
954 add_ia32_am_offs(res, offs);
957 /* add the new offset */
960 add_ia32_am_offs(res, offs_cnst);
963 add_ia32_am_offs(res, offs_lea);
967 /* either lea_O-cnst, -cnst or -lea_O */
970 add_ia32_am_offs(res, offs_lea);
973 sub_ia32_am_offs(res, offs_cnst);
976 sub_ia32_am_offs(res, offs_lea);
980 /* set the address mode symconst */
982 set_ia32_am_sc(res, am_sc);
984 set_ia32_am_sc_sign(res);
987 /* copy the frame entity (could be set in case of Add */
988 /* which was a FrameAddr) */
989 set_ia32_frame_ent(res, get_ia32_frame_ent(irn));
991 if (is_ia32_use_frame(irn))
992 set_ia32_use_frame(res);
995 set_ia32_am_scale(res, scale);
998 /* determine new am flavour */
999 if (offs || offs_cnst || offs_lea) {
1002 if (! be_is_NoReg(cg, base)) {
1005 if (! be_is_NoReg(cg, index)) {
1011 set_ia32_am_flavour(res, am_flav);
1013 set_ia32_op_type(res, ia32_AddrModeS);
1015 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
1017 DBG((mod, LEVEL_1, "\tLEA [%+F + %+F * %d + %s]\n", base, index, scale, get_ia32_am_offs(res)));
1019 /* we will exchange it, report here before the Proj is created */
1020 DBG_OPT_LEA(irn, res);
1022 /* get the result Proj of the Add/Sub */
1023 irn = get_res_proj(irn);
1025 assert(irn && "Couldn't find result proj");
1027 /* exchange the old op with the new LEA */
1035 * Optimizes a pattern around irn to address mode if possible.
1037 void ia32_optimize_am(ir_node *irn, void *env) {
1038 ia32_code_gen_t *cg = env;
1042 ir_node *block, *noreg_gp, *noreg_fp;
1043 ir_node *left, *right, *temp;
1044 ir_node *store, *load, *mem_proj;
1045 ir_node *succ, *addr_b, *addr_i;
1046 int check_am_src = 0;
1047 DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;)
1049 if (! is_ia32_irn(irn))
1052 dbg = get_irn_dbg_info(irn);
1053 mode = get_irn_mode(irn);
1054 block = get_nodes_block(irn);
1055 noreg_gp = ia32_new_NoReg_gp(cg);
1056 noreg_fp = ia32_new_NoReg_fp(cg);
1058 DBG((mod, LEVEL_1, "checking for AM\n"));
1060 /* 1st part: check for address calculations and transform the into Lea */
1062 /* Following cases can occur: */
1063 /* - Sub (l, imm) -> LEA [base - offset] */
1064 /* - Sub (l, r == LEA with ia32_am_O) -> LEA [base - offset] */
1065 /* - Add (l, imm) -> LEA [base + offset] */
1066 /* - Add (l, r == LEA with ia32_am_O) -> LEA [base + offset] */
1067 /* - Add (l == LEA with ia32_am_O, r) -> LEA [base + offset] */
1068 /* - Add (l, r) -> LEA [base + index * scale] */
1069 /* with scale > 1 iff l/r == shl (1,2,3) */
1071 if (is_ia32_Sub(irn) || is_ia32_Add(irn)) {
1072 left = get_irn_n(irn, 2);
1073 right = get_irn_n(irn, 3);
1075 /* Do not try to create a LEA if one of the operands is a Load. */
1076 /* check is irn is a candidate for address calculation */
1077 if (is_candidate(block, irn, 1)) {
1078 DBG((mod, LEVEL_1, "\tfound address calculation candidate %+F ... ", irn));
1079 res = fold_addr(cg, irn, noreg_gp);
1082 DB((mod, LEVEL_1, "transformed into %+F\n", res));
1084 DB((mod, LEVEL_1, "not transformed\n"));
1088 /* 2nd part: fold following patterns: */
1089 /* - Load -> LEA into Load } TODO: If the LEA is used by more than one Load/Store */
1090 /* - Store -> LEA into Store } it might be better to keep the LEA */
1091 /* - op -> Load into AMop with am_Source */
1093 /* - op is am_Source capable AND */
1094 /* - the Load is only used by this op AND */
1095 /* - the Load is in the same block */
1096 /* - Store -> op -> Load into AMop with am_Dest */
1098 /* - op is am_Dest capable AND */
1099 /* - the Store uses the same address as the Load AND */
1100 /* - the Load is only used by this op AND */
1101 /* - the Load and Store are in the same block AND */
1102 /* - nobody else uses the result of the op */
1104 if ((res == irn) && (get_ia32_am_support(irn) != ia32_am_None) && !is_ia32_Lea(irn)) {
1105 /* 1st: check for Load/Store -> LEA */
1106 if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn)) {
1107 left = get_irn_n(irn, 0);
1109 if (is_ia32_Lea(left)) {
1110 DBG((mod, LEVEL_1, "\nmerging %+F into %+F\n", left, irn));
1112 /* get the AM attributes from the LEA */
1113 add_ia32_am_offs(irn, get_ia32_am_offs(left));
1114 set_ia32_am_scale(irn, get_ia32_am_scale(left));
1115 set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
1117 set_ia32_am_sc(irn, get_ia32_am_sc(left));
1118 if (is_ia32_am_sc_sign(left))
1119 set_ia32_am_sc_sign(irn);
1121 set_ia32_op_type(irn, is_ia32_Ld(irn) ? ia32_AddrModeS : ia32_AddrModeD);
1123 /* set base and index */
1124 set_irn_n(irn, 0, get_irn_n(left, 0));
1125 set_irn_n(irn, 1, get_irn_n(left, 1));
1127 /* clear remat flag */
1128 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1131 /* check if the node is an address mode candidate */
1132 else if (is_candidate(block, irn, 0)) {
1133 DBG((mod, LEVEL_1, "\tfound address mode candidate %+F ... ", irn));
1135 left = get_irn_n(irn, 2);
1136 if (get_irn_arity(irn) == 4) {
1137 /* it's an "unary" operation */
1141 right = get_irn_n(irn, 3);
1144 /* normalize commutative ops */
1145 if (node_is_ia32_comm(irn)) {
1146 /* Assure that right operand is always a Load if there is one */
1147 /* because non-commutative ops can only use Dest AM if the right */
1148 /* operand is a load, so we only need to check right operand. */
1149 if (pred_is_specific_nodeblock(block, left, is_ia32_Ld))
1151 set_irn_n(irn, 2, right);
1152 set_irn_n(irn, 3, left);
1160 /* check for Store -> op -> Load */
1162 /* Store -> op -> Load optimization is only possible if supported by op */
1163 /* and if right operand is a Load */
1164 if ((get_ia32_am_support(irn) & ia32_am_Dest) &&
1165 pred_is_specific_nodeblock(block, right, is_ia32_Ld))
1168 /* An address mode capable op always has a result Proj. */
1169 /* If this Proj is used by more than one other node, we don't need to */
1170 /* check further, otherwise we check for Store and remember the address, */
1171 /* the Store points to. */
1173 succ = get_res_proj(irn);
1174 assert(succ && "Couldn't find result proj");
1180 /* now check for users and Store */
1181 if (ia32_get_irn_n_edges(succ) == 1) {
1182 succ = get_edge_src_irn(get_irn_out_edge_first(succ));
1184 if (is_ia32_fStore(succ) || is_ia32_Store(succ)) {
1186 addr_b = get_irn_n(store, 0);
1188 /* Could be that the Store is connected to the address */
1189 /* calculating LEA while the Load is already transformed. */
1190 if (is_ia32_Lea(addr_b)) {
1192 addr_b = get_irn_n(succ, 0);
1193 addr_i = get_irn_n(succ, 1);
1202 /* we found a Store as single user: Now check for Load */
1204 /* Extra check for commutative ops with two Loads */
1205 /* -> put the interesting Load right */
1206 if (node_is_ia32_comm(irn) &&
1207 pred_is_specific_nodeblock(block, left, is_ia32_Ld))
1209 if ((addr_b == get_irn_n(get_Proj_pred(left), 0)) &&
1210 (addr_i == get_irn_n(get_Proj_pred(left), 1)))
1212 /* We exchange left and right, so it's easier to kill */
1213 /* the correct Load later and to handle unary operations. */
1214 set_irn_n(irn, 2, right);
1215 set_irn_n(irn, 3, left);
1223 /* skip the Proj for easier access */
1224 load = get_Proj_pred(right);
1226 /* Compare Load and Store address */
1227 if (load_store_addr_is_equal(load, store, addr_b, addr_i)) {
1228 /* Right Load is from same address, so we can */
1229 /* disconnect the Load and Store here */
1231 /* set new base, index and attributes */
1232 set_irn_n(irn, 0, addr_b);
1233 set_irn_n(irn, 1, addr_i);
1234 add_ia32_am_offs(irn, get_ia32_am_offs(load));
1235 set_ia32_am_scale(irn, get_ia32_am_scale(load));
1236 set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
1237 set_ia32_op_type(irn, ia32_AddrModeD);
1238 set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
1239 set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
1241 set_ia32_am_sc(irn, get_ia32_am_sc(load));
1242 if (is_ia32_am_sc_sign(load))
1243 set_ia32_am_sc_sign(irn);
1245 if (is_ia32_use_frame(load))
1246 set_ia32_use_frame(irn);
1248 /* connect to Load memory and disconnect Load */
1249 if (get_irn_arity(irn) == 5) {
1251 set_irn_n(irn, 4, get_irn_n(load, 2));
1252 set_irn_n(irn, 3, noreg_gp);
1256 set_irn_n(irn, 3, get_irn_n(load, 2));
1257 set_irn_n(irn, 2, noreg_gp);
1260 /* connect the memory Proj of the Store to the op */
1261 mem_proj = get_mem_proj(store);
1262 set_Proj_pred(mem_proj, irn);
1263 set_Proj_proj(mem_proj, 1);
1265 /* clear remat flag */
1266 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1268 DB((mod, LEVEL_1, "merged with %+F and %+F into dest AM\n", load, store));
1271 else if (get_ia32_am_support(irn) & ia32_am_Source) {
1272 /* There was no store, check if we still can optimize for source address mode */
1275 } /* if (support AM Dest) */
1276 else if (get_ia32_am_support(irn) & ia32_am_Source) {
1277 /* op doesn't support am AM Dest -> check for AM Source */
1281 /* normalize commutative ops */
1282 if (node_is_ia32_comm(irn)) {
1283 /* Assure that left operand is always a Load if there is one */
1284 /* because non-commutative ops can only use Source AM if the */
1285 /* left operand is a Load, so we only need to check the left */
1286 /* operand afterwards. */
1287 if (pred_is_specific_nodeblock(block, right, is_ia32_Ld)) {
1288 set_irn_n(irn, 2, right);
1289 set_irn_n(irn, 3, left);
1297 /* optimize op -> Load iff Load is only used by this op */
1298 /* and left operand is a Load which only used by this irn */
1300 pred_is_specific_nodeblock(block, left, is_ia32_Ld) &&
1301 (ia32_get_irn_n_edges(left) == 1))
1303 left = get_Proj_pred(left);
1305 addr_b = get_irn_n(left, 0);
1306 addr_i = get_irn_n(left, 1);
1308 /* set new base, index and attributes */
1309 set_irn_n(irn, 0, addr_b);
1310 set_irn_n(irn, 1, addr_i);
1311 add_ia32_am_offs(irn, get_ia32_am_offs(left));
1312 set_ia32_am_scale(irn, get_ia32_am_scale(left));
1313 set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
1314 set_ia32_op_type(irn, ia32_AddrModeS);
1315 set_ia32_frame_ent(irn, get_ia32_frame_ent(left));
1316 set_ia32_ls_mode(irn, get_ia32_ls_mode(left));
1318 set_ia32_am_sc(irn, get_ia32_am_sc(left));
1319 if (is_ia32_am_sc_sign(left))
1320 set_ia32_am_sc_sign(irn);
1322 /* clear remat flag */
1323 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1325 if (is_ia32_use_frame(left))
1326 set_ia32_use_frame(irn);
1328 /* connect to Load memory */
1329 if (get_irn_arity(irn) == 5) {
1331 set_irn_n(irn, 4, get_irn_n(left, 2));
1335 set_irn_n(irn, 3, get_irn_n(left, 2));
1338 /* disconnect from Load */
1339 set_irn_n(irn, 2, noreg_gp);
1341 /* If Load has a memory Proj, connect it to the op */
1342 mem_proj = get_mem_proj(left);
1344 set_Proj_pred(mem_proj, irn);
1345 set_Proj_proj(mem_proj, 1);
1348 DB((mod, LEVEL_1, "merged with %+F into source AM\n", left));