8 #include "firm_types.h"
15 #include "../benode_t.h"
16 #include "../besched_t.h"
18 #include "ia32_new_nodes.h"
19 #include "bearch_ia32_t.h"
20 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
21 #include "ia32_transform.h"
22 #include "ia32_dbg_stat.h"
25 #define is_NoMem(irn) (get_irn_op(irn) == op_NoMem)
27 typedef int is_op_func_t(const ir_node *n);
30 * checks if a node represents the NOREG value
32 static int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) {
33 be_abi_irg_t *babi = cg->birg->abi;
34 const arch_register_t *fp_noreg = USE_SSE2(cg) ?
35 &ia32_xmm_regs[REG_XMM_NOREG] : &ia32_vfp_regs[REG_VFP_NOREG];
37 return (be_abi_get_callee_save_irn(babi, &ia32_gp_regs[REG_GP_NOREG]) == irn) ||
38 (be_abi_get_callee_save_irn(babi, fp_noreg) == irn);
43 /*************************************************
46 * | | ___ _ __ ___| |_ __ _ _ __ | |_ ___
47 * | | / _ \| '_ \/ __| __/ _` | '_ \| __/ __|
48 * | |___| (_) | | | \__ \ || (_| | | | | |_\__ \
49 * \_____\___/|_| |_|___/\__\__,_|_| |_|\__|___/
51 *************************************************/
54 * creates a unique ident by adding a number to a tag
56 * @param tag the tag string, must contain a %d if a number
59 static ident *unique_id(const char *tag)
61 static unsigned id = 0;
64 snprintf(str, sizeof(str), tag, ++id);
65 return new_id_from_str(str);
71 * Transforms a SymConst.
73 * @param mod the debug module
74 * @param block the block the new node should belong to
75 * @param node the ir SymConst node
76 * @param mode mode of the SymConst
77 * @return the created ia32 Const node
79 static ir_node *gen_SymConst(ia32_transform_env_t *env) {
81 dbg_info *dbg = env->dbg;
82 ir_mode *mode = env->mode;
83 ir_graph *irg = env->irg;
84 ir_node *block = env->block;
86 if (mode_is_float(mode)) {
88 if (USE_SSE2(env->cg))
89 cnst = new_rd_ia32_xConst(dbg, irg, block, get_irg_no_mem(irg), mode);
91 cnst = new_rd_ia32_vfConst(dbg, irg, block, get_irg_no_mem(irg), mode);
94 cnst = new_rd_ia32_Const(dbg, irg, block, get_irg_no_mem(irg), mode);
95 set_ia32_Const_attr(cnst, env->irn);
100 * Get a primitive type for a mode.
102 static ir_type *get_prim_type(pmap *types, ir_mode *mode)
104 pmap_entry *e = pmap_find(types, mode);
109 snprintf(buf, sizeof(buf), "prim_type_%s", get_mode_name(mode));
110 res = new_type_primitive(new_id_from_str(buf), mode);
111 pmap_insert(types, mode, res);
119 * Get an entity that is initialized with a tarval
121 static entity *get_entity_for_tv(ia32_code_gen_t *cg, ir_node *cnst)
123 tarval *tv = get_Const_tarval(cnst);
124 pmap_entry *e = pmap_find(cg->isa->tv_ent, tv);
129 ir_mode *mode = get_irn_mode(cnst);
130 ir_type *tp = get_Const_type(cnst);
131 if (tp == firm_unknown_type)
132 tp = get_prim_type(cg->isa->types, mode);
134 res = new_entity(get_glob_type(), unique_id("ia32FloatCnst_%u"), tp);
136 set_entity_ld_ident(res, get_entity_ident(res));
137 set_entity_visibility(res, visibility_local);
138 set_entity_variability(res, variability_constant);
139 set_entity_allocation(res, allocation_static);
141 /* we create a new entity here: It's initialization must resist on the
143 rem = current_ir_graph;
144 current_ir_graph = get_const_code_irg();
145 set_atomic_ent_value(res, new_Const_type(tv, tp));
146 current_ir_graph = rem;
148 pmap_insert(cg->isa->tv_ent, tv, res);
156 * Transforms a Const.
158 * @param mod the debug module
159 * @param block the block the new node should belong to
160 * @param node the ir Const node
161 * @param mode mode of the Const
162 * @return the created ia32 Const node
164 static ir_node *gen_Const(ia32_transform_env_t *env) {
167 ir_graph *irg = env->irg;
168 ir_node *block = env->block;
169 ir_node *node = env->irn;
170 dbg_info *dbg = env->dbg;
171 ir_mode *mode = env->mode;
173 if (mode_is_float(mode)) {
175 if (! USE_SSE2(env->cg)) {
176 cnst_classify_t clss = classify_Const(node);
178 if (clss == CNST_NULL)
179 return new_rd_ia32_vfldz(dbg, irg, block, mode);
180 else if (clss == CNST_ONE)
181 return new_rd_ia32_vfld1(dbg, irg, block, mode);
183 sym.entity_p = get_entity_for_tv(env->cg, node);
185 cnst = new_rd_SymConst(dbg, irg, block, sym, symconst_addr_ent);
187 cnst = gen_SymConst(env);
190 cnst = new_rd_ia32_Const(dbg, irg, block, get_irg_no_mem(irg), get_irn_mode(node));
191 set_ia32_Const_attr(cnst, node);
199 * Transforms (all) Const's into ia32_Const and places them in the
200 * block where they are used (or in the cfg-pred Block in case of Phi's).
201 * Additionally all reference nodes are changed into mode_Is nodes.
203 void ia32_place_consts_set_modes(ir_node *irn, void *env) {
204 ia32_code_gen_t *cg = env;
205 ia32_transform_env_t tenv;
207 ir_node *pred, *cnst;
214 mode = get_irn_mode(irn);
216 /* transform all reference nodes into mode_Is nodes */
217 if (mode_is_reference(mode)) {
219 set_irn_mode(irn, mode);
222 tenv.block = get_nodes_block(irn);
225 DEBUG_ONLY(tenv.mod = cg->mod;)
227 /* Loop over all predecessors and check for Sym/Const nodes */
228 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
229 pred = get_irn_n(irn, i);
231 opc = get_irn_opcode(pred);
233 tenv.mode = get_irn_mode(pred);
234 tenv.dbg = get_irn_dbg_info(pred);
236 /* If it's a Phi, then we need to create the */
237 /* new Const in it's predecessor block */
239 tenv.block = get_Block_cfgpred_block(get_nodes_block(irn), i);
242 /* put the const into the block where the original const was */
243 if (! (cg->opt & IA32_OPT_PLACECNST)) {
244 tenv.block = get_nodes_block(pred);
249 cnst = gen_Const(&tenv);
252 cnst = gen_SymConst(&tenv);
258 /* if we found a const, then set it */
260 set_irn_n(irn, i, cnst);
267 /********************************************************************************************************
268 * _____ _ _ ____ _ _ _ _ _
269 * | __ \ | | | | / __ \ | | (_) (_) | | (_)
270 * | |__) |__ ___ _ __ | |__ ___ | | ___ | | | |_ __ | |_ _ _ __ ___ _ ______ _| |_ _ ___ _ __
271 * | ___/ _ \/ _ \ '_ \| '_ \ / _ \| |/ _ \ | | | | '_ \| __| | '_ ` _ \| |_ / _` | __| |/ _ \| '_ \
272 * | | | __/ __/ |_) | | | | (_) | | __/ | |__| | |_) | |_| | | | | | | |/ / (_| | |_| | (_) | | | |
273 * |_| \___|\___| .__/|_| |_|\___/|_|\___| \____/| .__/ \__|_|_| |_| |_|_/___\__,_|\__|_|\___/|_| |_|
276 ********************************************************************************************************/
279 * NOTE: THESE PEEPHOLE OPTIMIZATIONS MUST BE CALLED AFTER SCHEDULING AND REGISTER ALLOCATION.
282 static int ia32_cnst_compare(ir_node *n1, ir_node *n2) {
283 return get_ia32_id_cnst(n1) == get_ia32_id_cnst(n2);
287 * Checks for potential CJmp/CJmpAM optimization candidates.
289 static ir_node *ia32_determine_cjmp_cand(ir_node *irn, is_op_func_t *is_op_func) {
290 ir_node *cand = NULL;
291 ir_node *prev = sched_prev(irn);
293 if (is_Block(prev)) {
294 if (get_Block_n_cfgpreds(prev) == 1)
295 prev = get_Block_cfgpred(prev, 0);
300 /* The predecessor must be a ProjX. */
301 if (prev && is_Proj(prev) && get_irn_mode(prev) == mode_X) {
302 prev = get_Proj_pred(prev);
304 if (is_op_func(prev))
311 static int is_TestJmp_cand(const ir_node *irn) {
312 return is_ia32_TestJmp(irn) || is_ia32_And(irn);
316 * Checks if two consecutive arguments of cand matches
317 * the two arguments of irn (TestJmp).
319 static int is_TestJmp_replacement(ir_node *cand, ir_node *irn) {
320 ir_node *in1 = get_irn_n(irn, 0);
321 ir_node *in2 = get_irn_n(irn, 1);
322 int i, n = get_irn_arity(cand);
325 for (i = 0; i < n - 1; i++) {
326 if (get_irn_n(cand, i) == in1 &&
327 get_irn_n(cand, i + 1) == in2)
335 return ia32_cnst_compare(cand, irn);
341 * Tries to replace a TestJmp by a CJmp or CJmpAM (in case of And)
343 static void ia32_optimize_TestJmp(ir_node *irn, ia32_code_gen_t *cg) {
344 ir_node *cand = ia32_determine_cjmp_cand(irn, is_TestJmp_cand);
347 /* we found a possible candidate */
348 replace = cand ? is_TestJmp_replacement(cand, irn) : 0;
351 DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn));
353 if (is_ia32_And(cand))
354 set_irn_op(irn, op_ia32_CJmpAM);
356 set_irn_op(irn, op_ia32_CJmp);
358 DB((cg->mod, LEVEL_1, "%+F\n", irn));
362 static int is_CondJmp_cand(const ir_node *irn) {
363 return is_ia32_CondJmp(irn) || is_ia32_Sub(irn);
367 * Checks if the arguments of cand are the same of irn.
369 static int is_CondJmp_replacement(ir_node *cand, ir_node *irn) {
370 int i, n = get_irn_arity(cand);
373 for (i = 0; i < n; i++) {
374 if (get_irn_n(cand, i) != get_irn_n(irn, i)) {
381 return ia32_cnst_compare(cand, irn);
387 * Tries to replace a CondJmp by a CJmpAM
389 static void ia32_optimize_CondJmp(ir_node *irn, ia32_code_gen_t *cg) {
390 ir_node *cand = ia32_determine_cjmp_cand(irn, is_CondJmp_cand);
393 /* we found a possible candidate */
394 replace = cand ? is_CondJmp_replacement(cand, irn) : 0;
397 DBG((cg->mod, LEVEL_1, "replacing %+F by ", irn));
400 set_irn_op(irn, op_ia32_CJmpAM);
402 DB((cg->mod, LEVEL_1, "%+F\n", irn));
407 * Creates a Push from Store(IncSP(gp_reg_size))
409 static void ia32_create_Push(ir_node *irn, ia32_code_gen_t *cg) {
410 ir_node *sp = get_irn_n(irn, 0);
411 ir_node *val, *next, *push, *bl, *proj_M, *proj_res, *old_proj_M;
412 const ir_edge_t *edge;
414 if (get_ia32_am_offs(irn) || !be_is_IncSP(sp))
417 if (arch_get_irn_register(cg->arch_env, get_irn_n(irn, 1)) !=
418 &ia32_gp_regs[REG_GP_NOREG])
421 val = get_irn_n(irn, 2);
422 if (mode_is_float(get_irn_mode(val)))
425 if (be_get_IncSP_direction(sp) != be_stack_dir_expand ||
426 be_get_IncSP_offset(sp) != get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode))
429 /* ok, translate into Push */
430 edge = get_irn_out_edge_first(irn);
431 old_proj_M = get_edge_src_irn(edge);
433 next = sched_next(irn);
437 bl = get_nodes_block(irn);
438 push = new_rd_ia32_Push(NULL, current_ir_graph, bl,
439 be_get_IncSP_pred(sp), val, be_get_IncSP_mem(sp));
440 proj_res = new_r_Proj(current_ir_graph, bl, push, get_irn_mode(sp), pn_ia32_Push_stack);
441 proj_M = new_r_Proj(current_ir_graph, bl, push, mode_M, pn_ia32_Push_M);
443 /* copy a possible constant from the store */
444 set_ia32_id_cnst(push, get_ia32_id_cnst(irn));
445 set_ia32_immop_type(push, get_ia32_immop_type(irn));
447 /* the push must have SP out register */
448 arch_set_irn_register(cg->arch_env, push, arch_get_irn_register(cg->arch_env, sp));
450 exchange(old_proj_M, proj_M);
451 exchange(sp, proj_res);
452 sched_add_before(next, push);
453 sched_add_after(push, proj_res);
457 * Creates a Pop from IncSP(Load(sp))
459 static void ia32_create_Pop(ir_node *irn, ia32_code_gen_t *cg) {
460 ir_node *old_proj_M = be_get_IncSP_mem(irn);
461 ir_node *load = skip_Proj(old_proj_M);
462 ir_node *old_proj_res = NULL;
463 ir_node *bl, *pop, *next, *proj_res, *proj_sp, *proj_M;
464 const ir_edge_t *edge;
465 const arch_register_t *reg, *sp;
467 if (! is_ia32_Load(load) || get_ia32_am_offs(load))
470 if (arch_get_irn_register(cg->arch_env, get_irn_n(load, 1)) !=
471 &ia32_gp_regs[REG_GP_NOREG])
473 if (arch_get_irn_register(cg->arch_env, get_irn_n(load, 0)) != cg->isa->arch_isa.sp)
476 /* ok, translate into pop */
477 foreach_out_edge(load, edge) {
478 ir_node *succ = get_edge_src_irn(edge);
479 if (succ != old_proj_M) {
484 if (! old_proj_res) {
486 return; /* should not happen */
489 bl = get_nodes_block(load);
491 /* IncSP is typically scheduled after the load, so remove it first */
493 next = sched_next(old_proj_res);
494 sched_remove(old_proj_res);
497 reg = arch_get_irn_register(cg->arch_env, load);
498 sp = arch_get_irn_register(cg->arch_env, irn);
500 pop = new_rd_ia32_Pop(NULL, current_ir_graph, bl, get_irn_n(irn, 0), get_irn_n(load, 2));
501 proj_res = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(old_proj_res), pn_ia32_Pop_res);
502 proj_sp = new_r_Proj(current_ir_graph, bl, pop, get_irn_mode(irn), pn_ia32_Pop_stack);
503 proj_M = new_r_Proj(current_ir_graph, bl, pop, mode_M, pn_ia32_Pop_M);
505 exchange(old_proj_M, proj_M);
506 exchange(old_proj_res, proj_res);
507 exchange(irn, proj_sp);
509 arch_set_irn_register(cg->arch_env, proj_res, reg);
510 arch_set_irn_register(cg->arch_env, proj_sp, sp);
512 sched_add_before(next, proj_sp);
513 sched_add_before(proj_sp, proj_res);
514 sched_add_before(proj_res,pop);
518 * Tries to optimize two following IncSP.
520 static void ia32_optimize_IncSP(ir_node *irn, ia32_code_gen_t *cg) {
521 ir_node *prev = be_get_IncSP_pred(irn);
522 int real_uses = get_irn_n_edges(prev);
524 if (be_is_IncSP(prev) && real_uses == 1) {
525 /* first IncSP has only one IncSP user, kill the first one */
526 unsigned prev_offs = be_get_IncSP_offset(prev);
527 be_stack_dir_t prev_dir = be_get_IncSP_direction(prev);
528 unsigned curr_offs = be_get_IncSP_offset(irn);
529 be_stack_dir_t curr_dir = be_get_IncSP_direction(irn);
531 int new_ofs = prev_offs * (prev_dir == be_stack_dir_expand ? -1 : +1) +
532 curr_offs * (curr_dir == be_stack_dir_expand ? -1 : +1);
536 curr_dir = be_stack_dir_expand;
539 curr_dir = be_stack_dir_shrink;
540 be_set_IncSP_offset(prev, 0);
541 be_set_IncSP_offset(irn, (unsigned)new_ofs);
542 be_set_IncSP_direction(irn, curr_dir);
544 /* Omit the optimized IncSP */
545 be_set_IncSP_pred(irn, be_get_IncSP_pred(prev));
550 * Performs Peephole Optimizations.
552 void ia32_peephole_optimization(ir_node *irn, void *env) {
553 ia32_code_gen_t *cg = env;
555 if (is_ia32_TestJmp(irn))
556 ia32_optimize_TestJmp(irn, cg);
557 else if (is_ia32_CondJmp(irn))
558 ia32_optimize_CondJmp(irn, cg);
559 else if (be_is_IncSP(irn))
560 ia32_optimize_IncSP(irn, cg);
561 else if (is_ia32_Store(irn))
562 ia32_create_Push(irn, cg);
567 /******************************************************************
569 * /\ | | | | | \/ | | |
570 * / \ __| | __| |_ __ ___ ___ ___| \ / | ___ __| | ___
571 * / /\ \ / _` |/ _` | '__/ _ \/ __/ __| |\/| |/ _ \ / _` |/ _ \
572 * / ____ \ (_| | (_| | | | __/\__ \__ \ | | | (_) | (_| | __/
573 * /_/ \_\__,_|\__,_|_| \___||___/___/_| |_|\___/ \__,_|\___|
575 ******************************************************************/
577 static int node_is_ia32_comm(const ir_node *irn) {
578 return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0;
581 static int ia32_get_irn_n_edges(const ir_node *irn) {
582 const ir_edge_t *edge;
585 foreach_out_edge(irn, edge) {
593 * Returns the first mode_M Proj connected to irn.
595 static ir_node *get_mem_proj(const ir_node *irn) {
596 const ir_edge_t *edge;
599 assert(get_irn_mode(irn) == mode_T && "expected mode_T node");
601 foreach_out_edge(irn, edge) {
602 src = get_edge_src_irn(edge);
604 assert(is_Proj(src) && "Proj expected");
606 if (get_irn_mode(src) == mode_M)
614 * Returns the first Proj with mode != mode_M connected to irn.
616 static ir_node *get_res_proj(const ir_node *irn) {
617 const ir_edge_t *edge;
620 assert(get_irn_mode(irn) == mode_T && "expected mode_T node");
622 foreach_out_edge(irn, edge) {
623 src = get_edge_src_irn(edge);
625 assert(is_Proj(src) && "Proj expected");
627 if (get_irn_mode(src) != mode_M)
635 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor.
637 * @param pred The node to be checked
638 * @param is_op_func The check-function
639 * @return 1 if conditions are fulfilled, 0 otherwise
641 static int pred_is_specific_node(const ir_node *pred, is_op_func_t *is_op_func) {
642 if (is_Proj(pred) && is_op_func(get_Proj_pred(pred))) {
650 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor
651 * and if the predecessor is in block bl.
653 * @param bl The block
654 * @param pred The node to be checked
655 * @param is_op_func The check-function
656 * @return 1 if conditions are fulfilled, 0 otherwise
658 static int pred_is_specific_nodeblock(const ir_node *bl, const ir_node *pred,
659 int (*is_op_func)(const ir_node *n))
662 pred = get_Proj_pred(pred);
663 if ((bl == get_nodes_block(pred)) && is_op_func(pred)) {
674 * Checks if irn is a candidate for address calculation or address mode.
676 * address calculation (AC):
677 * - none of the operand must be a Load within the same block OR
678 * - all Loads must have more than one user OR
679 * - the irn has a frame entity (it's a former FrameAddr)
682 * - at least one operand has to be a Load within the same block AND
683 * - the load must not have other users than the irn AND
684 * - the irn must not have a frame entity set
686 * @param block The block the Loads must/not be in
687 * @param irn The irn to check
688 * @param check_addr 1 if to check for address calculation, 0 otherwise
689 * return 1 if irn is a candidate for AC or AM, 0 otherwise
691 static int is_candidate(const ir_node *block, const ir_node *irn, int check_addr) {
693 int n, is_cand = check_addr;
695 in = get_irn_n(irn, 2);
697 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
698 n = ia32_get_irn_n_edges(in);
699 is_cand = check_addr ? (n == 1 ? 0 : is_cand) : (n == 1 ? 1 : is_cand);
702 in = get_irn_n(irn, 3);
704 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
705 n = ia32_get_irn_n_edges(in);
706 is_cand = check_addr ? (n == 1 ? 0 : is_cand) : (n == 1 ? 1 : is_cand);
709 is_cand = get_ia32_frame_ent(irn) ? (check_addr ? 1 : 0) : is_cand;
715 * Compares the base and index addr and the load/store entities
716 * and returns 1 if they are equal.
718 static int load_store_addr_is_equal(const ir_node *load, const ir_node *store,
719 const ir_node *addr_b, const ir_node *addr_i)
721 int is_equal = (addr_b == get_irn_n(load, 0)) && (addr_i == get_irn_n(load, 1));
722 entity *lent = get_ia32_frame_ent(load);
723 entity *sent = get_ia32_frame_ent(store);
724 ident *lid = get_ia32_am_sc(load);
725 ident *sid = get_ia32_am_sc(store);
726 char *loffs = get_ia32_am_offs(load);
727 char *soffs = get_ia32_am_offs(store);
729 /* are both entities set and equal? */
730 if (is_equal && (lent || sent))
731 is_equal = lent && sent && (lent == sent);
733 /* are address mode idents set and equal? */
734 if (is_equal && (lid || sid))
735 is_equal = lid && sid && (lid == sid);
737 /* are offsets set and equal */
738 if (is_equal && (loffs || soffs))
739 is_equal = loffs && soffs && strcmp(loffs, soffs) == 0;
741 /* are the load and the store of the same mode? */
742 is_equal = is_equal ? get_ia32_ls_mode(load) == get_ia32_ls_mode(store) : 0;
747 typedef enum _ia32_take_lea_attr {
748 IA32_LEA_ATTR_NONE = 0,
749 IA32_LEA_ATTR_BASE = (1 << 0),
750 IA32_LEA_ATTR_INDEX = (1 << 1),
751 IA32_LEA_ATTR_OFFS = (1 << 2),
752 IA32_LEA_ATTR_SCALE = (1 << 3),
753 IA32_LEA_ATTR_AMSC = (1 << 4),
754 IA32_LEA_ATTR_FENT = (1 << 5)
755 } ia32_take_lea_attr;
758 * Decides if we have to keep the LEA operand or if we can assimilate it.
760 static int do_new_lea(ir_node *irn, ir_node *base, ir_node *index, ir_node *lea,
761 int have_am_sc, ia32_code_gen_t *cg)
763 ir_node *lea_base = get_irn_n(lea, 0);
764 ir_node *lea_idx = get_irn_n(lea, 1);
765 entity *irn_ent = get_ia32_frame_ent(irn);
766 entity *lea_ent = get_ia32_frame_ent(lea);
768 int is_noreg_base = be_is_NoReg(cg, base);
769 int is_noreg_index = be_is_NoReg(cg, index);
770 ia32_am_flavour_t am_flav = get_ia32_am_flavour(lea);
772 /* If the Add and the LEA both have a different frame entity set: keep */
773 if (irn_ent && lea_ent && (irn_ent != lea_ent))
774 return IA32_LEA_ATTR_NONE;
775 else if (! irn_ent && lea_ent)
776 ret_val |= IA32_LEA_ATTR_FENT;
778 /* If the Add and the LEA both have already an address mode symconst: keep */
779 if (have_am_sc && get_ia32_am_sc(lea))
780 return IA32_LEA_ATTR_NONE;
781 else if (get_ia32_am_sc(lea))
782 ret_val |= IA32_LEA_ATTR_AMSC;
784 /* Check the different base-index combinations */
786 if (! is_noreg_base && ! is_noreg_index) {
787 /* Assimilate if base is the lea and the LEA is just a Base + Offset calculation */
788 if ((base == lea) && ! (am_flav & ia32_I ? 1 : 0)) {
789 if (am_flav & ia32_O)
790 ret_val |= IA32_LEA_ATTR_OFFS;
792 ret_val |= IA32_LEA_ATTR_BASE;
795 return IA32_LEA_ATTR_NONE;
797 else if (! is_noreg_base && is_noreg_index) {
798 /* Base is set but index not */
800 /* Base points to LEA: assimilate everything */
801 if (am_flav & ia32_O)
802 ret_val |= IA32_LEA_ATTR_OFFS;
803 if (am_flav & ia32_S)
804 ret_val |= IA32_LEA_ATTR_SCALE;
805 if (am_flav & ia32_I)
806 ret_val |= IA32_LEA_ATTR_INDEX;
808 ret_val |= IA32_LEA_ATTR_BASE;
810 else if (am_flav & ia32_B ? 0 : 1) {
811 /* Base is not the LEA but the LEA is an index only calculation: assimilate */
812 if (am_flav & ia32_O)
813 ret_val |= IA32_LEA_ATTR_OFFS;
814 if (am_flav & ia32_S)
815 ret_val |= IA32_LEA_ATTR_SCALE;
817 ret_val |= IA32_LEA_ATTR_INDEX;
820 return IA32_LEA_ATTR_NONE;
822 else if (is_noreg_base && ! is_noreg_index) {
823 /* Index is set but not base */
825 /* Index points to LEA: assimilate everything */
826 if (am_flav & ia32_O)
827 ret_val |= IA32_LEA_ATTR_OFFS;
828 if (am_flav & ia32_S)
829 ret_val |= IA32_LEA_ATTR_SCALE;
830 if (am_flav & ia32_B)
831 ret_val |= IA32_LEA_ATTR_BASE;
833 ret_val |= IA32_LEA_ATTR_INDEX;
835 else if (am_flav & ia32_I ? 0 : 1) {
836 /* Index is not the LEA but the LEA is a base only calculation: assimilate */
837 if (am_flav & ia32_O)
838 ret_val |= IA32_LEA_ATTR_OFFS;
839 if (am_flav & ia32_S)
840 ret_val |= IA32_LEA_ATTR_SCALE;
842 ret_val |= IA32_LEA_ATTR_BASE;
845 return IA32_LEA_ATTR_NONE;
848 assert(0 && "There must have been set base or index");
856 * Folds Add or Sub to LEA if possible
858 static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn, ir_node *noreg) {
859 ir_graph *irg = get_irn_irg(irn);
860 dbg_info *dbg = get_irn_dbg_info(irn);
861 ir_node *block = get_nodes_block(irn);
863 ir_node *shift = NULL;
864 ir_node *lea_o = NULL;
867 const char *offs_cnst = NULL;
868 char *offs_lea = NULL;
875 entity *lea_ent = NULL;
876 ir_node *left, *right, *temp;
877 ir_node *base, *index;
878 ia32_am_flavour_t am_flav;
879 DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;)
881 if (is_ia32_Add(irn))
884 left = get_irn_n(irn, 2);
885 right = get_irn_n(irn, 3);
887 /* "normalize" arguments in case of add with two operands */
888 if (isadd && ! be_is_NoReg(cg, right)) {
889 /* put LEA == ia32_am_O as right operand */
890 if (is_ia32_Lea(left) && get_ia32_am_flavour(left) == ia32_am_O) {
891 set_irn_n(irn, 2, right);
892 set_irn_n(irn, 3, left);
898 /* put LEA != ia32_am_O as left operand */
899 if (is_ia32_Lea(right) && get_ia32_am_flavour(right) != ia32_am_O) {
900 set_irn_n(irn, 2, right);
901 set_irn_n(irn, 3, left);
907 /* put SHL as left operand iff left is NOT a LEA */
908 if (! is_ia32_Lea(left) && pred_is_specific_node(right, is_ia32_Shl)) {
909 set_irn_n(irn, 2, right);
910 set_irn_n(irn, 3, left);
923 /* check for operation with immediate */
924 if (is_ia32_ImmConst(irn)) {
925 DBG((mod, LEVEL_1, "\tfound op with imm const"));
927 offs_cnst = get_ia32_cnst(irn);
930 else if (is_ia32_ImmSymConst(irn)) {
931 DBG((mod, LEVEL_1, "\tfound op with imm symconst"));
935 am_sc = get_ia32_id_cnst(irn);
936 am_sc_sign = is_ia32_am_sc_sign(irn);
939 /* determine the operand which needs to be checked */
940 if (be_is_NoReg(cg, right)) {
947 /* check if right operand is AMConst (LEA with ia32_am_O) */
948 /* but we can only eat it up if there is no other symconst */
949 /* because the linker won't accept two symconsts */
950 if (! have_am_sc && is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
951 DBG((mod, LEVEL_1, "\tgot op with LEA am_O"));
953 offs_lea = get_ia32_am_offs(temp);
954 am_sc = get_ia32_am_sc(temp);
955 am_sc_sign = is_ia32_am_sc_sign(temp);
962 /* default for add -> make right operand to index */
966 DBG((mod, LEVEL_1, "\tgot LEA candidate with index %+F\n", index));
968 /* determine the operand which needs to be checked */
970 if (is_ia32_Lea(left)) {
974 /* check for SHL 1,2,3 */
975 if (pred_is_specific_node(temp, is_ia32_Shl)) {
976 temp = get_Proj_pred(temp);
979 if (get_ia32_Immop_tarval(temp)) {
980 scale = get_tarval_long(get_ia32_Immop_tarval(temp));
983 index = get_irn_n(temp, 2);
985 DBG((mod, LEVEL_1, "\tgot scaled index %+F\n", index));
995 if (! be_is_NoReg(cg, index)) {
996 /* if we have index, but left == right -> no base */
1000 else if (! is_ia32_Lea(left) && (index != right)) {
1001 /* index != right -> we found a good Shl */
1002 /* left != LEA -> this Shl was the left operand */
1003 /* -> base is right operand */
1009 /* Try to assimilate a LEA as left operand */
1010 if (is_ia32_Lea(left) && (get_ia32_am_flavour(left) != ia32_am_O)) {
1011 /* check if we can assimilate the LEA */
1012 int take_attr = do_new_lea(irn, base, index, left, have_am_sc, cg);
1014 if (take_attr == IA32_LEA_ATTR_NONE) {
1015 DBG((mod, LEVEL_1, "\tleave old LEA, creating new one\n"));
1018 DBG((mod, LEVEL_1, "\tgot LEA as left operand ... assimilating\n"));
1019 lea = left; /* for statistics */
1021 if (take_attr & IA32_LEA_ATTR_OFFS)
1022 offs = get_ia32_am_offs(left);
1024 if (take_attr & IA32_LEA_ATTR_AMSC) {
1025 am_sc = get_ia32_am_sc(left);
1027 am_sc_sign = is_ia32_am_sc_sign(left);
1030 if (take_attr & IA32_LEA_ATTR_SCALE)
1031 scale = get_ia32_am_scale(left);
1033 if (take_attr & IA32_LEA_ATTR_BASE)
1034 base = get_irn_n(left, 0);
1036 if (take_attr & IA32_LEA_ATTR_INDEX)
1037 index = get_irn_n(left, 1);
1039 if (take_attr & IA32_LEA_ATTR_FENT)
1040 lea_ent = get_ia32_frame_ent(left);
1044 /* ok, we can create a new LEA */
1046 res = new_rd_ia32_Lea(dbg, irg, block, base, index, mode_Is);
1048 /* add the old offset of a previous LEA */
1050 add_ia32_am_offs(res, offs);
1053 /* add the new offset */
1056 add_ia32_am_offs(res, offs_cnst);
1059 add_ia32_am_offs(res, offs_lea);
1063 /* either lea_O-cnst, -cnst or -lea_O */
1066 add_ia32_am_offs(res, offs_lea);
1069 sub_ia32_am_offs(res, offs_cnst);
1072 sub_ia32_am_offs(res, offs_lea);
1076 /* set the address mode symconst */
1078 set_ia32_am_sc(res, am_sc);
1080 set_ia32_am_sc_sign(res);
1083 /* copy the frame entity (could be set in case of Add */
1084 /* which was a FrameAddr) */
1086 set_ia32_frame_ent(res, lea_ent);
1088 set_ia32_frame_ent(res, get_ia32_frame_ent(irn));
1090 if (get_ia32_frame_ent(res))
1091 set_ia32_use_frame(res);
1094 set_ia32_am_scale(res, scale);
1096 am_flav = ia32_am_N;
1097 /* determine new am flavour */
1098 if (offs || offs_cnst || offs_lea) {
1101 if (! be_is_NoReg(cg, base)) {
1104 if (! be_is_NoReg(cg, index)) {
1110 set_ia32_am_flavour(res, am_flav);
1112 set_ia32_op_type(res, ia32_AddrModeS);
1114 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
1116 DBG((mod, LEVEL_1, "\tLEA [%+F + %+F * %d + %s]\n", base, index, scale, get_ia32_am_offs(res)));
1118 /* we will exchange it, report here before the Proj is created */
1119 if (shift && lea && lea_o)
1120 DBG_OPT_LEA4(irn, lea_o, lea, shift, res);
1121 else if (shift && lea)
1122 DBG_OPT_LEA3(irn, lea, shift, res);
1123 else if (shift && lea_o)
1124 DBG_OPT_LEA3(irn, lea_o, shift, res);
1125 else if (lea && lea_o)
1126 DBG_OPT_LEA3(irn, lea_o, lea, res);
1128 DBG_OPT_LEA2(irn, shift, res);
1130 DBG_OPT_LEA2(irn, lea, res);
1132 DBG_OPT_LEA2(irn, lea_o, res);
1134 DBG_OPT_LEA1(irn, res);
1136 /* get the result Proj of the Add/Sub */
1137 irn = get_res_proj(irn);
1139 assert(irn && "Couldn't find result proj");
1141 /* exchange the old op with the new LEA */
1150 * Merges a Load/Store node with a LEA.
1151 * @param irn The Load/Store node
1152 * @param lea The LEA
1154 static void merge_loadstore_lea(ir_node *irn, ir_node *lea) {
1155 entity *irn_ent = get_ia32_frame_ent(irn);
1156 entity *lea_ent = get_ia32_frame_ent(lea);
1158 /* If the irn and the LEA both have a different frame entity set: do not merge */
1159 if (irn_ent && lea_ent && (irn_ent != lea_ent))
1161 else if (! irn_ent && lea_ent) {
1162 set_ia32_frame_ent(irn, lea_ent);
1163 set_ia32_use_frame(irn);
1166 /* get the AM attributes from the LEA */
1167 add_ia32_am_offs(irn, get_ia32_am_offs(lea));
1168 set_ia32_am_scale(irn, get_ia32_am_scale(lea));
1169 set_ia32_am_flavour(irn, get_ia32_am_flavour(lea));
1171 set_ia32_am_sc(irn, get_ia32_am_sc(lea));
1172 if (is_ia32_am_sc_sign(lea))
1173 set_ia32_am_sc_sign(irn);
1175 set_ia32_op_type(irn, is_ia32_Ld(irn) ? ia32_AddrModeS : ia32_AddrModeD);
1177 /* set base and index */
1178 set_irn_n(irn, 0, get_irn_n(lea, 0));
1179 set_irn_n(irn, 1, get_irn_n(lea, 1));
1181 /* clear remat flag */
1182 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1184 if (is_ia32_Ld(irn))
1185 DBG_OPT_LOAD_LEA(lea, irn);
1187 DBG_OPT_STORE_LEA(lea, irn);
1192 * Sets new_right index of irn to right and new_left index to left.
1193 * Also exchange left and right
1195 static void exchange_left_right(ir_node *irn, ir_node **left, ir_node **right, int new_left, int new_right) {
1198 set_irn_n(irn, new_right, *right);
1199 set_irn_n(irn, new_left, *left);
1205 /* this is only needed for Compares, but currently ALL nodes
1206 * have this attribute :-) */
1207 set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn)));
1211 * Optimizes a pattern around irn to address mode if possible.
1213 void ia32_optimize_am(ir_node *irn, void *env) {
1214 ia32_code_gen_t *cg = env;
1218 ir_node *block, *noreg_gp, *noreg_fp;
1219 ir_node *left, *right, *temp;
1220 ir_node *store, *load, *mem_proj;
1221 ir_node *succ, *addr_b, *addr_i;
1222 int check_am_src = 0;
1223 int need_exchange_on_fail = 0;
1224 DEBUG_ONLY(firm_dbg_module_t *mod = cg->mod;)
1226 if (! is_ia32_irn(irn))
1229 dbg = get_irn_dbg_info(irn);
1230 mode = get_irn_mode(irn);
1231 block = get_nodes_block(irn);
1232 noreg_gp = ia32_new_NoReg_gp(cg);
1233 noreg_fp = ia32_new_NoReg_fp(cg);
1235 DBG((mod, LEVEL_1, "checking for AM\n"));
1237 /* 1st part: check for address calculations and transform the into Lea */
1239 /* Following cases can occur: */
1240 /* - Sub (l, imm) -> LEA [base - offset] */
1241 /* - Sub (l, r == LEA with ia32_am_O) -> LEA [base - offset] */
1242 /* - Add (l, imm) -> LEA [base + offset] */
1243 /* - Add (l, r == LEA with ia32_am_O) -> LEA [base + offset] */
1244 /* - Add (l == LEA with ia32_am_O, r) -> LEA [base + offset] */
1245 /* - Add (l, r) -> LEA [base + index * scale] */
1246 /* with scale > 1 iff l/r == shl (1,2,3) */
1248 if (is_ia32_Sub(irn) || is_ia32_Add(irn)) {
1249 left = get_irn_n(irn, 2);
1250 right = get_irn_n(irn, 3);
1252 /* Do not try to create a LEA if one of the operands is a Load. */
1253 /* check is irn is a candidate for address calculation */
1254 if (is_candidate(block, irn, 1)) {
1255 DBG((mod, LEVEL_1, "\tfound address calculation candidate %+F ... ", irn));
1256 res = fold_addr(cg, irn, noreg_gp);
1259 DB((mod, LEVEL_1, "transformed into %+F\n", res));
1261 DB((mod, LEVEL_1, "not transformed\n"));
1265 /* 2nd part: fold following patterns: */
1266 /* - Load -> LEA into Load } TODO: If the LEA is used by more than one Load/Store */
1267 /* - Store -> LEA into Store } it might be better to keep the LEA */
1268 /* - op -> Load into AMop with am_Source */
1270 /* - op is am_Source capable AND */
1271 /* - the Load is only used by this op AND */
1272 /* - the Load is in the same block */
1273 /* - Store -> op -> Load into AMop with am_Dest */
1275 /* - op is am_Dest capable AND */
1276 /* - the Store uses the same address as the Load AND */
1277 /* - the Load is only used by this op AND */
1278 /* - the Load and Store are in the same block AND */
1279 /* - nobody else uses the result of the op */
1281 if ((res == irn) && (get_ia32_am_support(irn) != ia32_am_None) && !is_ia32_Lea(irn)) {
1282 /* 1st: check for Load/Store -> LEA */
1283 if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn)) {
1284 left = get_irn_n(irn, 0);
1286 if (is_ia32_Lea(left)) {
1287 const ir_edge_t *edge, *ne;
1290 /* merge all Loads/Stores connected to this LEA with the LEA */
1291 foreach_out_edge_safe(left, edge, ne) {
1292 src = get_edge_src_irn(edge);
1294 if (src && (is_ia32_Ld(src) || is_ia32_St(src) || is_ia32_Store8Bit(src))) {
1295 DBG((mod, LEVEL_1, "\nmerging %+F into %+F\n", left, irn));
1296 merge_loadstore_lea(src, left);
1301 /* check if the node is an address mode candidate */
1302 else if (is_candidate(block, irn, 0)) {
1303 DBG((mod, LEVEL_1, "\tfound address mode candidate %+F ... ", irn));
1305 left = get_irn_n(irn, 2);
1306 if (get_irn_arity(irn) == 4) {
1307 /* it's an "unary" operation */
1311 right = get_irn_n(irn, 3);
1314 /* normalize commutative ops */
1315 if (node_is_ia32_comm(irn)) {
1316 /* Assure that right operand is always a Load if there is one */
1317 /* because non-commutative ops can only use Dest AM if the right */
1318 /* operand is a load, so we only need to check right operand. */
1319 if (pred_is_specific_nodeblock(block, left, is_ia32_Ld))
1321 exchange_left_right(irn, &left, &right, 3, 2);
1322 need_exchange_on_fail = 1;
1326 /* check for Store -> op -> Load */
1328 /* Store -> op -> Load optimization is only possible if supported by op */
1329 /* and if right operand is a Load */
1330 if ((get_ia32_am_support(irn) & ia32_am_Dest) &&
1331 pred_is_specific_nodeblock(block, right, is_ia32_Ld))
1334 /* An address mode capable op always has a result Proj. */
1335 /* If this Proj is used by more than one other node, we don't need to */
1336 /* check further, otherwise we check for Store and remember the address, */
1337 /* the Store points to. */
1339 succ = get_res_proj(irn);
1340 assert(succ && "Couldn't find result proj");
1346 /* now check for users and Store */
1347 if (ia32_get_irn_n_edges(succ) == 1) {
1348 succ = get_edge_src_irn(get_irn_out_edge_first(succ));
1350 if (is_ia32_xStore(succ) || is_ia32_Store(succ)) {
1352 addr_b = get_irn_n(store, 0);
1353 addr_i = get_irn_n(store, 1);
1358 /* we found a Store as single user: Now check for Load */
1360 /* Extra check for commutative ops with two Loads */
1361 /* -> put the interesting Load right */
1362 if (node_is_ia32_comm(irn) &&
1363 pred_is_specific_nodeblock(block, left, is_ia32_Ld))
1365 if ((addr_b == get_irn_n(get_Proj_pred(left), 0)) &&
1366 (addr_i == get_irn_n(get_Proj_pred(left), 1)))
1368 /* We exchange left and right, so it's easier to kill */
1369 /* the correct Load later and to handle unary operations. */
1370 set_irn_n(irn, 2, right);
1371 set_irn_n(irn, 3, left);
1377 /* this is only needed for Compares, but currently ALL nodes
1378 * have this attribute :-) */
1379 set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn)));
1383 /* skip the Proj for easier access */
1384 load = get_Proj_pred(right);
1386 /* Compare Load and Store address */
1387 if (load_store_addr_is_equal(load, store, addr_b, addr_i)) {
1388 /* Right Load is from same address, so we can */
1389 /* disconnect the Load and Store here */
1391 /* set new base, index and attributes */
1392 set_irn_n(irn, 0, addr_b);
1393 set_irn_n(irn, 1, addr_i);
1394 add_ia32_am_offs(irn, get_ia32_am_offs(load));
1395 set_ia32_am_scale(irn, get_ia32_am_scale(load));
1396 set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
1397 set_ia32_op_type(irn, ia32_AddrModeD);
1398 set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
1399 set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
1401 set_ia32_am_sc(irn, get_ia32_am_sc(load));
1402 if (is_ia32_am_sc_sign(load))
1403 set_ia32_am_sc_sign(irn);
1405 if (is_ia32_use_frame(load))
1406 set_ia32_use_frame(irn);
1408 /* connect to Load memory and disconnect Load */
1409 if (get_irn_arity(irn) == 5) {
1411 set_irn_n(irn, 4, get_irn_n(load, 2));
1412 set_irn_n(irn, 3, noreg_gp);
1416 set_irn_n(irn, 3, get_irn_n(load, 2));
1417 set_irn_n(irn, 2, noreg_gp);
1420 /* connect the memory Proj of the Store to the op */
1421 mem_proj = get_mem_proj(store);
1422 set_Proj_pred(mem_proj, irn);
1423 set_Proj_proj(mem_proj, 1);
1425 /* clear remat flag */
1426 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1428 DBG_OPT_AM_D(load, store, irn);
1430 DB((mod, LEVEL_1, "merged with %+F and %+F into dest AM\n", load, store));
1433 else if (get_ia32_am_support(irn) & ia32_am_Source) {
1434 /* There was no store, check if we still can optimize for source address mode */
1437 } /* if (support AM Dest) */
1438 else if (get_ia32_am_support(irn) & ia32_am_Source) {
1439 /* op doesn't support am AM Dest -> check for AM Source */
1443 /* was exchanged but optimize failed: exchange back */
1444 if (check_am_src && need_exchange_on_fail)
1445 exchange_left_right(irn, &left, &right, 3, 2);
1447 need_exchange_on_fail = 0;
1449 /* normalize commutative ops */
1450 if (check_am_src && node_is_ia32_comm(irn)) {
1451 /* Assure that left operand is always a Load if there is one */
1452 /* because non-commutative ops can only use Source AM if the */
1453 /* left operand is a Load, so we only need to check the left */
1454 /* operand afterwards. */
1455 if (pred_is_specific_nodeblock(block, right, is_ia32_Ld)) {
1456 exchange_left_right(irn, &left, &right, 3, 2);
1457 need_exchange_on_fail = 1;
1461 /* optimize op -> Load iff Load is only used by this op */
1462 /* and left operand is a Load which only used by this irn */
1464 pred_is_specific_nodeblock(block, left, is_ia32_Ld) &&
1465 (ia32_get_irn_n_edges(left) == 1))
1467 left = get_Proj_pred(left);
1469 addr_b = get_irn_n(left, 0);
1470 addr_i = get_irn_n(left, 1);
1472 /* set new base, index and attributes */
1473 set_irn_n(irn, 0, addr_b);
1474 set_irn_n(irn, 1, addr_i);
1475 add_ia32_am_offs(irn, get_ia32_am_offs(left));
1476 set_ia32_am_scale(irn, get_ia32_am_scale(left));
1477 set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
1478 set_ia32_op_type(irn, ia32_AddrModeS);
1479 set_ia32_frame_ent(irn, get_ia32_frame_ent(left));
1480 set_ia32_ls_mode(irn, get_ia32_ls_mode(left));
1482 set_ia32_am_sc(irn, get_ia32_am_sc(left));
1483 if (is_ia32_am_sc_sign(left))
1484 set_ia32_am_sc_sign(irn);
1486 /* clear remat flag */
1487 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1489 if (is_ia32_use_frame(left))
1490 set_ia32_use_frame(irn);
1492 /* connect to Load memory */
1493 if (get_irn_arity(irn) == 5) {
1495 set_irn_n(irn, 4, get_irn_n(left, 2));
1497 /* this is only needed for Compares, but currently ALL nodes
1498 * have this attribute :-) */
1499 set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn)));
1501 /* disconnect from Load */
1502 /* (make second op -> first, set second in to noreg) */
1503 set_irn_n(irn, 2, get_irn_n(irn, 3));
1504 set_irn_n(irn, 3, noreg_gp);
1508 set_irn_n(irn, 3, get_irn_n(left, 2));
1510 /* disconnect from Load */
1511 set_irn_n(irn, 2, noreg_gp);
1514 DBG_OPT_AM_S(left, irn);
1516 /* If Load has a memory Proj, connect it to the op */
1517 mem_proj = get_mem_proj(left);
1519 set_Proj_pred(mem_proj, irn);
1520 set_Proj_proj(mem_proj, 1);
1523 DB((mod, LEVEL_1, "merged with %+F into source AM\n", left));
1526 /* was exchanged but optimize failed: exchange back */
1527 if (need_exchange_on_fail)
1528 exchange_left_right(irn, &left, &right, 3, 2);