8 #include "firm_types.h"
16 #include "ia32_new_nodes.h"
17 #include "bearch_ia32_t.h"
18 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
21 #define is_NoMem(irn) (get_irn_op(irn) == op_NoMem)
23 static int be_is_NoReg(be_abi_irg_t *babi, const ir_node *irn) {
24 if (be_abi_get_callee_save_irn(babi, &ia32_gp_regs[REG_XXX]) == irn ||
25 be_abi_get_callee_save_irn(babi, &ia32_fp_regs[REG_XXXX]) == irn)
34 * creates a unique ident by adding a number to a tag
36 * @param tag the tag string, must contain a %d if a number
39 static ident *unique_id(const char *tag)
41 static unsigned id = 0;
44 snprintf(str, sizeof(str), tag, ++id);
45 return new_id_from_str(str);
51 * Transforms a SymConst.
53 * @param mod the debug module
54 * @param block the block the new node should belong to
55 * @param node the ir SymConst node
56 * @param mode mode of the SymConst
57 * @return the created ia32 Const node
59 static ir_node *gen_SymConst(ia32_transform_env_t *env) {
61 dbg_info *dbg = env->dbg;
62 ir_mode *mode = env->mode;
63 ir_graph *irg = env->irg;
64 ir_node *block = env->block;
66 cnst = new_rd_ia32_Const(dbg, irg, block, mode);
67 set_ia32_Const_attr(cnst, env->irn);
72 * Get a primitive type for a mode.
74 static ir_type *get_prim_type(pmap *types, ir_mode *mode)
76 pmap_entry *e = pmap_find(types, mode);
81 snprintf(buf, sizeof(buf), "prim_type_%s", get_mode_name(mode));
82 res = new_type_primitive(new_id_from_str(buf), mode);
83 pmap_insert(types, mode, res);
91 * Get an entity that is initialized with a tarval
93 static entity *get_entity_for_tv(ia32_code_gen_t *cg, ir_node *cnst)
95 tarval *tv = get_Const_tarval(cnst);
96 pmap_entry *e = pmap_find(cg->tv_ent, tv);
101 ir_mode *mode = get_irn_mode(cnst);
102 ir_type *tp = get_Const_type(cnst);
103 if (tp == firm_unknown_type)
104 tp = get_prim_type(cg->types, mode);
106 res = new_entity(get_glob_type(), unique_id("ia32FloatCnst_%u"), tp);
108 set_entity_ld_ident(res, get_entity_ident(res));
109 set_entity_visibility(res, visibility_local);
110 set_entity_variability(res, variability_constant);
111 set_entity_allocation(res, allocation_static);
113 /* we create a new entity here: It's initialization must resist on the
115 rem = current_ir_graph;
116 current_ir_graph = get_const_code_irg();
117 set_atomic_ent_value(res, new_Const_type(tv, tp));
118 current_ir_graph = rem;
126 * Transforms a Const.
128 * @param mod the debug module
129 * @param block the block the new node should belong to
130 * @param node the ir Const node
131 * @param mode mode of the Const
132 * @return the created ia32 Const node
134 static ir_node *gen_Const(ia32_transform_env_t *env) {
137 ir_graph *irg = env->irg;
138 ir_node *block = env->block;
139 ir_node *node = env->irn;
140 dbg_info *dbg = env->dbg;
141 ir_mode *mode = env->mode;
143 if (mode_is_float(mode)) {
144 sym.entity_p = get_entity_for_tv(env->cg, node);
146 cnst = new_rd_SymConst(dbg, irg, block, sym, symconst_addr_ent);
148 cnst = gen_SymConst(env);
151 cnst = new_rd_ia32_Const(dbg, irg, block, get_irn_mode(node));
152 set_ia32_Const_attr(cnst, node);
160 * Transforms (all) Const's into ia32_Const and places them in the
161 * block where they are used (or in the cfg-pred Block in case of Phi's)
163 void ia32_place_consts(ir_node *irn, void *env) {
164 ia32_code_gen_t *cg = env;
165 ia32_transform_env_t tenv;
167 ir_node *pred, *cnst;
174 mode = get_irn_mode(irn);
176 tenv.arch_env = cg->arch_env;
177 tenv.block = get_nodes_block(irn);
182 /* Loop over all predecessors and check for Sym/Const nodes */
183 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
184 pred = get_irn_n(irn, i);
186 opc = get_irn_opcode(pred);
188 tenv.mode = get_irn_mode(pred);
189 tenv.dbg = get_irn_dbg_info(pred);
191 /* If it's a Phi, then we need to create the */
192 /* new Const in it's predecessor block */
194 tenv.block = get_Block_cfgpred_block(get_nodes_block(irn), i);
199 cnst = gen_Const(&tenv);
202 cnst = gen_SymConst(&tenv);
208 /* if we found a const, then set it */
210 set_irn_n(irn, i, cnst);
216 /******************************************************************
218 * /\ | | | | | \/ | | |
219 * / \ __| | __| |_ __ ___ ___ ___| \ / | ___ __| | ___
220 * / /\ \ / _` |/ _` | '__/ _ \/ __/ __| |\/| |/ _ \ / _` |/ _ \
221 * / ____ \ (_| | (_| | | | __/\__ \__ \ | | | (_) | (_| | __/
222 * /_/ \_\__,_|\__,_|_| \___||___/___/_| |_|\___/ \__,_|\___|
224 ******************************************************************/
226 static int node_is_comm(const ir_node *irn) {
227 if (is_ia32_Add(irn) ||
249 static int ia32_get_irn_n_edges(const ir_node *irn) {
250 const ir_edge_t *edge;
253 foreach_out_edge(irn, edge) {
261 * Returns the first mode_M Proj connected to irn.
263 static ir_node *get_mem_proj(const ir_node *irn) {
264 const ir_edge_t *edge;
267 assert(get_irn_mode(irn) == mode_T && "expected mode_T node");
269 foreach_out_edge(irn, edge) {
270 src = get_edge_src_irn(edge);
272 assert(is_Proj(src) && "Proj expected");
274 if (get_irn_mode(src) == mode_M)
282 * Returns the Proj with number 0 connected to irn.
284 static ir_node *get_res_proj(const ir_node *irn) {
285 const ir_edge_t *edge;
288 assert(get_irn_mode(irn) == mode_T && "expected mode_T node");
290 foreach_out_edge(irn, edge) {
291 src = get_edge_src_irn(edge);
293 assert(is_Proj(src) && "Proj expected");
295 if (get_Proj_proj(src) == 0)
303 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor.
305 * @param pred The node to be checked
306 * @param is_op_func The check-function
307 * @return 1 if conditions are fulfilled, 0 otherwise
309 static int pred_is_specific_node(const ir_node *pred, int (*is_op_func)(const ir_node *n)) {
310 if (is_Proj(pred) && is_op_func(get_Proj_pred(pred))) {
318 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor
319 * and if the predecessor is in block bl.
321 * @param bl The block
322 * @param pred The node to be checked
323 * @param is_op_func The check-function
324 * @return 1 if conditions are fulfilled, 0 otherwise
326 static int pred_is_specific_nodeblock(const ir_node *bl, const ir_node *pred,
327 int (*is_op_func)(const ir_node *n))
330 pred = get_Proj_pred(pred);
331 if ((bl == get_nodes_block(pred)) && is_op_func(pred)) {
340 * Folds Add or Sub to LEA if possible
342 static ir_node *fold_addr(be_abi_irg_t *babi, ir_node *irn, firm_dbg_module_t *mod, ir_node *noreg) {
343 ir_graph *irg = get_irn_irg(irn);
344 ir_mode *mode = get_irn_mode(irn);
345 dbg_info *dbg = get_irn_dbg_info(irn);
346 ir_node *block = get_nodes_block(irn);
349 char *offs_cnst = NULL;
350 char *offs_lea = NULL;
354 ir_node *left, *right, *temp;
355 ir_node *base, *index;
356 ia32_am_flavour_t am_flav;
358 if (is_ia32_Add(irn))
361 left = get_irn_n(irn, 2);
362 right = get_irn_n(irn, 3);
370 /* "normalize" arguments in case of add with two operands */
371 if (isadd && ! be_is_NoReg(babi, right)) {
372 /* put LEA == ia32_am_O as right operand */
373 if (is_ia32_Lea(left) && get_ia32_am_flavour(left) == ia32_am_O) {
374 set_irn_n(irn, 2, right);
375 set_irn_n(irn, 3, left);
381 /* put LEA != ia32_am_O as left operand */
382 if (is_ia32_Lea(right) && get_ia32_am_flavour(right) != ia32_am_O) {
383 set_irn_n(irn, 2, right);
384 set_irn_n(irn, 3, left);
390 /* put SHL as left operand iff left is NOT a LEA */
391 if (! is_ia32_Lea(left) && pred_is_specific_node(right, is_ia32_Shl)) {
392 set_irn_n(irn, 2, right);
393 set_irn_n(irn, 3, left);
400 /* check if operand is either const */
401 if (get_ia32_cnst(irn)) {
402 DBG((mod, LEVEL_1, "\tfound op with imm"));
404 offs_cnst = get_ia32_cnst(irn);
408 /* determine the operand which needs to be checked */
409 if (be_is_NoReg(babi, right)) {
416 /* check if right operand is AMConst (LEA with ia32_am_O) */
417 if (is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
418 DBG((mod, LEVEL_1, "\tgot op with LEA am_O"));
420 offs_lea = get_ia32_am_offs(temp);
425 /* default for add -> make right operand to index */
429 DBG((mod, LEVEL_1, "\tgot LEA candidate with index %+F\n", index));
431 /* determine the operand which needs to be checked */
433 if (is_ia32_Lea(left)) {
437 /* check for SHL 1,2,3 */
438 if (pred_is_specific_node(temp, is_ia32_Shl)) {
439 temp = get_Proj_pred(temp);
441 if (get_ia32_Immop_tarval(temp)) {
442 scale = get_tarval_long(get_ia32_Immop_tarval(temp));
446 index = get_irn_n(temp, 2);
448 DBG((mod, LEVEL_1, "\tgot scaled index %+F\n", index));
454 if (! be_is_NoReg(babi, index)) {
455 /* if we have index, but left == right -> no base */
459 else if (! is_ia32_Lea(left) && (index != right)) {
460 /* index != right -> we found a good Shl */
461 /* left != LEA -> this Shl was the left operand */
462 /* -> base is right operand */
468 /* Try to assimilate a LEA as left operand */
469 if (is_ia32_Lea(left) && (get_ia32_am_flavour(left) != ia32_am_O)) {
470 am_flav = get_ia32_am_flavour(left);
472 /* If we have an Add with a real right operand (not NoReg) and */
473 /* the LEA contains already an index calculation then we create */
475 if (isadd && !be_is_NoReg(babi, index) && (am_flav & ia32_am_I)) {
476 DBG((mod, LEVEL_1, "\tleave old LEA, creating new one\n"));
479 DBG((mod, LEVEL_1, "\tgot LEA as left operand ... assimilating\n"));
480 offs = get_ia32_am_offs(left);
481 base = get_irn_n(left, 0);
482 index = get_irn_n(left, 1);
483 scale = get_ia32_am_scale(left);
487 /* ok, we can create a new LEA */
489 res = new_rd_ia32_Lea(dbg, irg, block, base, index, mode_Is);
491 /* add the old offset of a previous LEA */
493 add_ia32_am_offs(res, offs);
496 /* add the new offset */
499 add_ia32_am_offs(res, offs_cnst);
502 add_ia32_am_offs(res, offs_lea);
506 /* either lea_O-cnst, -cnst or -lea_O */
509 add_ia32_am_offs(res, offs_lea);
512 sub_ia32_am_offs(res, offs_cnst);
515 sub_ia32_am_offs(res, offs_lea);
520 set_ia32_am_scale(res, scale);
523 /* determine new am flavour */
524 if (offs || offs_cnst || offs_lea) {
527 if (! be_is_NoReg(babi, base)) {
530 if (! be_is_NoReg(babi, index)) {
536 set_ia32_am_flavour(res, am_flav);
538 set_ia32_op_type(res, ia32_AddrModeS);
540 DBG((mod, LEVEL_1, "\tLEA [%+F + %+F * %d + %s]\n", base, index, scale, get_ia32_am_offs(res)));
542 /* get the result Proj of the Add/Sub */
543 irn = get_res_proj(irn);
545 assert(irn && "Couldn't find result proj");
547 /* exchange the old op with the new LEA */
555 * Optimizes a pattern around irn to address mode if possible.
557 void ia32_optimize_am(ir_node *irn, void *env) {
558 ia32_code_gen_t *cg = env;
559 ir_graph *irg = cg->irg;
560 firm_dbg_module_t *mod = cg->mod;
562 be_abi_irg_t *babi = cg->birg->abi;
565 ir_node *block, *noreg_gp, *noreg_fp;
566 ir_node *left, *right, *temp;
567 ir_node *store, *load, *mem_proj;
568 ir_node *succ, *addr_b, *addr_i;
569 int check_am_src = 0;
571 if (! is_ia32_irn(irn))
574 dbg = get_irn_dbg_info(irn);
575 mode = get_irn_mode(irn);
576 block = get_nodes_block(irn);
577 noreg_gp = ia32_new_NoReg_gp(cg);
578 noreg_fp = ia32_new_NoReg_fp(cg);
580 DBG((mod, LEVEL_1, "checking for AM\n"));
582 /* 1st part: check for address calculations and transform the into Lea */
584 /* Following cases can occur: */
585 /* - Sub (l, imm) -> LEA [base - offset] */
586 /* - Sub (l, r == LEA with ia32_am_O) -> LEA [base - offset] */
587 /* - Add (l, imm) -> LEA [base + offset] */
588 /* - Add (l, r == LEA with ia32_am_O) -> LEA [base + offset] */
589 /* - Add (l == LEA with ia32_am_O, r) -> LEA [base + offset] */
590 /* - Add (l, r) -> LEA [base + index * scale] */
591 /* with scale > 1 iff l/r == shl (1,2,3) */
593 if (is_ia32_Sub(irn) || is_ia32_Add(irn)) {
594 left = get_irn_n(irn, 2);
595 right = get_irn_n(irn, 3);
597 /* Do not try to create a LEA if one of the operands is a Load. */
598 if (! pred_is_specific_nodeblock(block, left, is_ia32_Load) &&
599 ! pred_is_specific_nodeblock(block, right, is_ia32_Load))
601 res = fold_addr(babi, irn, mod, noreg_gp);
605 /* 2nd part: fold following patterns: */
606 /* - Load -> LEA into Load } TODO: If the LEA is used by more than one Load/Store */
607 /* - Store -> LEA into Store } it might be better to keep the LEA */
608 /* - op -> Load into AMop with am_Source */
610 /* - op is am_Source capable AND */
611 /* - the Load is only used by this op AND */
612 /* - the Load is in the same block */
613 /* - Store -> op -> Load into AMop with am_Dest */
615 /* - op is am_Dest capable AND */
616 /* - the Store uses the same address as the Load AND */
617 /* - the Load is only used by this op AND */
618 /* - the Load and Store are in the same block AND */
619 /* - nobody else uses the result of the op */
621 if ((res == irn) && (get_ia32_am_support(irn) != ia32_am_None) && !is_ia32_Lea(irn)) {
622 /* 1st: check for Load/Store -> LEA */
623 if (is_ia32_Ld(irn) || is_ia32_St(irn)) {
624 left = get_irn_n(irn, 0);
626 if (is_ia32_Lea(left)) {
627 /* get the AM attributes from the LEA */
628 add_ia32_am_offs(irn, get_ia32_am_offs(left));
629 set_ia32_am_scale(irn, get_ia32_am_scale(left));
630 set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
631 set_ia32_op_type(irn, get_ia32_op_type(left));
633 /* set base and index */
634 set_irn_n(irn, 0, get_irn_n(left, 0));
635 set_irn_n(irn, 1, get_irn_n(left, 1));
638 /* check if at least one operand is a Load */
639 else if (pred_is_specific_nodeblock(block, get_irn_n(irn, 2), is_ia32_Ld) ||
640 pred_is_specific_nodeblock(block, get_irn_n(irn, 3), is_ia32_Ld))
642 left = get_irn_n(irn, 2);
643 if (get_irn_arity(irn) == 4) {
644 /* it's an "unary" operation */
648 right = get_irn_n(irn, 3);
651 /* normalize commutative ops */
652 if (node_is_comm(irn)) {
653 /* Assure that right operand is always a Load if there is one */
654 /* because non-commutative ops can only use Dest AM if the right */
655 /* operand is a load, so we only need to check right operand. */
656 if (pred_is_specific_nodeblock(block, left, is_ia32_Ld))
658 set_irn_n(irn, 2, right);
659 set_irn_n(irn, 3, left);
667 /* check for Store -> op -> Load */
669 /* Store -> op -> Load optimization is only possible if supported by op */
670 /* and if right operand is a Load */
671 if ((get_ia32_am_support(irn) & ia32_am_Dest) &&
672 pred_is_specific_nodeblock(block, right, is_ia32_Ld))
675 /* An address mode capable op always has a result Proj. */
676 /* If this Proj is used by more than one other node, we don't need to */
677 /* check further, otherwise we check for Store and remember the address, */
678 /* the Store points to. */
680 succ = get_res_proj(irn);
681 assert(succ && "Couldn't find result proj");
687 /* now check for users and Store */
688 if (ia32_get_irn_n_edges(succ) == 1) {
689 succ = get_edge_src_irn(get_irn_out_edge_first(succ));
691 if (is_ia32_fStore(succ) || is_ia32_Store(succ)) {
693 addr_b = get_irn_n(store, 0);
695 /* Could be that the Store is connected to the address */
696 /* calculating LEA while the Load is already transformed. */
697 if (is_ia32_Lea(addr_b)) {
699 addr_b = get_irn_n(succ, 0);
700 addr_i = get_irn_n(succ, 1);
709 /* we found a Store as single user: Now check for Load */
711 /* Extra check for commutative ops with two Loads */
712 /* -> put the interesting Load right */
713 if (node_is_comm(irn) &&
714 pred_is_specific_nodeblock(block, left, is_ia32_Ld))
716 if ((addr_b == get_irn_n(get_Proj_pred(left), 0)) &&
717 (addr_i == get_irn_n(get_Proj_pred(left), 1)))
719 /* We exchange left and right, so it's easier to kill */
720 /* the correct Load later and to handle unary operations. */
721 set_irn_n(irn, 2, right);
722 set_irn_n(irn, 3, left);
730 /* skip the Proj for easier access */
731 load = get_Proj_pred(right);
733 /* Compare Load and Store address */
734 if ((addr_b == get_irn_n(load, 0)) && (addr_i == get_irn_n(load, 1)))
736 /* Right Load is from same address, so we can */
737 /* disconnect the Load and Store here */
739 /* set new base, index and attributes */
740 set_irn_n(irn, 0, addr_b);
741 set_irn_n(irn, 1, addr_i);
742 add_ia32_am_offs(irn, get_ia32_am_offs(load));
743 set_ia32_am_scale(irn, get_ia32_am_scale(load));
744 set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
745 set_ia32_op_type(irn, ia32_AddrModeD);
747 /* connect to Load memory and disconnect Load */
748 if (get_irn_arity(irn) == 5) {
750 set_irn_n(irn, 4, get_irn_n(load, 2));
751 set_irn_n(irn, 3, noreg_gp);
755 set_irn_n(irn, 3, get_irn_n(load, 2));
756 set_irn_n(irn, 2, noreg_gp);
759 /* connect the memory Proj of the Store to the op */
760 mem_proj = get_mem_proj(store);
761 set_Proj_pred(mem_proj, irn);
762 set_Proj_proj(mem_proj, 1);
765 else if (get_ia32_am_support(irn) & ia32_am_Source) {
766 /* There was no store, check if we still can optimize for source address mode */
769 } /* if (support AM Dest) */
770 else if (get_ia32_am_support(irn) & ia32_am_Source) {
771 /* op doesn't support am AM Dest -> check for AM Source */
775 /* normalize commutative ops */
776 if (node_is_comm(irn)) {
777 /* Assure that left operand is always a Load if there is one */
778 /* because non-commutative ops can only use Source AM if the */
779 /* left operand is a Load, so we only need to check the left */
780 /* operand afterwards. */
781 if (pred_is_specific_nodeblock(block, right, is_ia32_Ld)) {
782 set_irn_n(irn, 2, right);
783 set_irn_n(irn, 3, left);
791 /* optimize op -> Load iff Load is only used by this op */
792 /* and left operand is a Load which only used by this irn */
794 pred_is_specific_nodeblock(block, left, is_ia32_Ld) &&
795 (ia32_get_irn_n_edges(left) == 1))
797 left = get_Proj_pred(left);
799 addr_b = get_irn_n(left, 0);
800 addr_i = get_irn_n(left, 1);
802 /* set new base, index and attributes */
803 set_irn_n(irn, 0, addr_b);
804 set_irn_n(irn, 1, addr_i);
805 add_ia32_am_offs(irn, get_ia32_am_offs(left));
806 set_ia32_am_scale(irn, get_ia32_am_scale(left));
807 set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
808 set_ia32_op_type(irn, ia32_AddrModeS);
810 /* connect to Load memory */
811 if (get_irn_arity(irn) == 5) {
813 set_irn_n(irn, 4, get_irn_n(left, 2));
817 set_irn_n(irn, 3, get_irn_n(left, 2));
820 /* disconnect from Load */
821 set_irn_n(irn, 2, noreg_gp);
823 /* If Load has a memory Proj, connect it to the op */
824 mem_proj = get_mem_proj(left);
826 set_Proj_pred(mem_proj, irn);
827 set_Proj_proj(mem_proj, 1);