8 #include "firm_types.h"
16 #include "ia32_new_nodes.h"
17 #include "bearch_ia32_t.h"
18 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
21 #define is_NoMem(irn) (get_irn_op(irn) == op_NoMem)
23 static int be_is_NoReg(be_abi_irg_t *babi, const ir_node *irn) {
24 if (be_abi_get_callee_save_irn(babi, &ia32_gp_regs[REG_XXX]) == irn ||
25 be_abi_get_callee_save_irn(babi, &ia32_fp_regs[REG_XXXX]) == irn)
34 * creates a unique ident by adding a number to a tag
36 * @param tag the tag string, must contain a %d if a number
39 static ident *unique_id(const char *tag)
41 static unsigned id = 0;
44 snprintf(str, sizeof(str), tag, ++id);
45 return new_id_from_str(str);
51 * Transforms a SymConst.
53 * @param mod the debug module
54 * @param block the block the new node should belong to
55 * @param node the ir SymConst node
56 * @param mode mode of the SymConst
57 * @return the created ia32 Const node
59 static ir_node *gen_SymConst(ia32_transform_env_t *env) {
61 dbg_info *dbg = env->dbg;
62 ir_mode *mode = env->mode;
63 ir_graph *irg = env->irg;
64 ir_node *block = env->block;
66 cnst = new_rd_ia32_Const(dbg, irg, block, mode);
67 set_ia32_Const_attr(cnst, env->irn);
72 * Get a primitive type for a mode.
74 static ir_type *get_prim_type(pmap *types, ir_mode *mode)
76 pmap_entry *e = pmap_find(types, mode);
81 snprintf(buf, sizeof(buf), "prim_type_%s", get_mode_name(mode));
82 res = new_type_primitive(new_id_from_str(buf), mode);
83 pmap_insert(types, mode, res);
91 * Get an entity that is initialized with a tarval
93 static entity *get_entity_for_tv(ia32_code_gen_t *cg, ir_node *cnst)
95 tarval *tv = get_Const_tarval(cnst);
96 pmap_entry *e = pmap_find(cg->tv_ent, tv);
101 ir_mode *mode = get_irn_mode(cnst);
102 ir_type *tp = get_Const_type(cnst);
103 if (tp == firm_unknown_type)
104 tp = get_prim_type(cg->types, mode);
106 res = new_entity(get_glob_type(), unique_id("ia32FloatCnst_%u"), tp);
108 set_entity_ld_ident(res, get_entity_ident(res));
109 set_entity_visibility(res, visibility_local);
110 set_entity_variability(res, variability_constant);
111 set_entity_allocation(res, allocation_static);
113 /* we create a new entity here: It's initialization must resist on the
115 rem = current_ir_graph;
116 current_ir_graph = get_const_code_irg();
117 set_atomic_ent_value(res, new_Const_type(tv, tp));
118 current_ir_graph = rem;
126 * Transforms a Const.
128 * @param mod the debug module
129 * @param block the block the new node should belong to
130 * @param node the ir Const node
131 * @param mode mode of the Const
132 * @return the created ia32 Const node
134 static ir_node *gen_Const(ia32_transform_env_t *env) {
137 ir_graph *irg = env->irg;
138 ir_node *block = env->block;
139 ir_node *node = env->irn;
140 dbg_info *dbg = env->dbg;
141 ir_mode *mode = env->mode;
143 if (mode_is_float(mode)) {
144 sym.entity_p = get_entity_for_tv(env->cg, node);
146 cnst = new_rd_SymConst(dbg, irg, block, sym, symconst_addr_ent);
148 cnst = gen_SymConst(env);
151 cnst = new_rd_ia32_Const(dbg, irg, block, get_irn_mode(node));
152 set_ia32_Const_attr(cnst, node);
160 * Transforms (all) Const's into ia32_Const and places them in the
161 * block where they are used (or in the cfg-pred Block in case of Phi's)
163 void ia32_place_consts(ir_node *irn, void *env) {
164 ia32_code_gen_t *cg = env;
165 ia32_transform_env_t tenv;
167 ir_node *pred, *cnst;
174 mode = get_irn_mode(irn);
176 tenv.block = get_nodes_block(irn);
181 /* Loop over all predecessors and check for Sym/Const nodes */
182 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
183 pred = get_irn_n(irn, i);
185 opc = get_irn_opcode(pred);
187 tenv.mode = get_irn_mode(pred);
188 tenv.dbg = get_irn_dbg_info(pred);
190 /* If it's a Phi, then we need to create the */
191 /* new Const in it's predecessor block */
193 tenv.block = get_Block_cfgpred_block(get_nodes_block(irn), i);
198 cnst = gen_Const(&tenv);
201 cnst = gen_SymConst(&tenv);
207 /* if we found a const, then set it */
209 set_irn_n(irn, i, cnst);
215 /******************************************************************
217 * /\ | | | | | \/ | | |
218 * / \ __| | __| |_ __ ___ ___ ___| \ / | ___ __| | ___
219 * / /\ \ / _` |/ _` | '__/ _ \/ __/ __| |\/| |/ _ \ / _` |/ _ \
220 * / ____ \ (_| | (_| | | | __/\__ \__ \ | | | (_) | (_| | __/
221 * /_/ \_\__,_|\__,_|_| \___||___/___/_| |_|\___/ \__,_|\___|
223 ******************************************************************/
225 static int node_is_comm(const ir_node *irn) {
226 return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0;
229 static int ia32_get_irn_n_edges(const ir_node *irn) {
230 const ir_edge_t *edge;
233 foreach_out_edge(irn, edge) {
241 * Returns the first mode_M Proj connected to irn.
243 static ir_node *get_mem_proj(const ir_node *irn) {
244 const ir_edge_t *edge;
247 assert(get_irn_mode(irn) == mode_T && "expected mode_T node");
249 foreach_out_edge(irn, edge) {
250 src = get_edge_src_irn(edge);
252 assert(is_Proj(src) && "Proj expected");
254 if (get_irn_mode(src) == mode_M)
262 * Returns the Proj with number 0 connected to irn.
264 static ir_node *get_res_proj(const ir_node *irn) {
265 const ir_edge_t *edge;
268 assert(get_irn_mode(irn) == mode_T && "expected mode_T node");
270 foreach_out_edge(irn, edge) {
271 src = get_edge_src_irn(edge);
273 assert(is_Proj(src) && "Proj expected");
275 if (get_Proj_proj(src) == 0)
283 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor.
285 * @param pred The node to be checked
286 * @param is_op_func The check-function
287 * @return 1 if conditions are fulfilled, 0 otherwise
289 static int pred_is_specific_node(const ir_node *pred, int (*is_op_func)(const ir_node *n)) {
290 if (is_Proj(pred) && is_op_func(get_Proj_pred(pred))) {
298 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor
299 * and if the predecessor is in block bl.
301 * @param bl The block
302 * @param pred The node to be checked
303 * @param is_op_func The check-function
304 * @return 1 if conditions are fulfilled, 0 otherwise
306 static int pred_is_specific_nodeblock(const ir_node *bl, const ir_node *pred,
307 int (*is_op_func)(const ir_node *n))
310 pred = get_Proj_pred(pred);
311 if ((bl == get_nodes_block(pred)) && is_op_func(pred)) {
320 * Folds Add or Sub to LEA if possible
322 static ir_node *fold_addr(be_abi_irg_t *babi, ir_node *irn, firm_dbg_module_t *mod, ir_node *noreg) {
323 ir_graph *irg = get_irn_irg(irn);
324 ir_mode *mode = get_irn_mode(irn);
325 dbg_info *dbg = get_irn_dbg_info(irn);
326 ir_node *block = get_nodes_block(irn);
329 char *offs_cnst = NULL;
330 char *offs_lea = NULL;
334 ir_node *left, *right, *temp;
335 ir_node *base, *index;
336 ia32_am_flavour_t am_flav;
338 if (is_ia32_Add(irn))
341 left = get_irn_n(irn, 2);
342 right = get_irn_n(irn, 3);
350 /* "normalize" arguments in case of add with two operands */
351 if (isadd && ! be_is_NoReg(babi, right)) {
352 /* put LEA == ia32_am_O as right operand */
353 if (is_ia32_Lea(left) && get_ia32_am_flavour(left) == ia32_am_O) {
354 set_irn_n(irn, 2, right);
355 set_irn_n(irn, 3, left);
361 /* put LEA != ia32_am_O as left operand */
362 if (is_ia32_Lea(right) && get_ia32_am_flavour(right) != ia32_am_O) {
363 set_irn_n(irn, 2, right);
364 set_irn_n(irn, 3, left);
370 /* put SHL as left operand iff left is NOT a LEA */
371 if (! is_ia32_Lea(left) && pred_is_specific_node(right, is_ia32_Shl)) {
372 set_irn_n(irn, 2, right);
373 set_irn_n(irn, 3, left);
380 /* check if operand is either const */
381 if (get_ia32_cnst(irn)) {
382 DBG((mod, LEVEL_1, "\tfound op with imm"));
384 offs_cnst = get_ia32_cnst(irn);
388 /* determine the operand which needs to be checked */
389 if (be_is_NoReg(babi, right)) {
396 /* check if right operand is AMConst (LEA with ia32_am_O) */
397 if (is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
398 DBG((mod, LEVEL_1, "\tgot op with LEA am_O"));
400 offs_lea = get_ia32_am_offs(temp);
405 /* default for add -> make right operand to index */
409 DBG((mod, LEVEL_1, "\tgot LEA candidate with index %+F\n", index));
411 /* determine the operand which needs to be checked */
413 if (is_ia32_Lea(left)) {
417 /* check for SHL 1,2,3 */
418 if (pred_is_specific_node(temp, is_ia32_Shl)) {
419 temp = get_Proj_pred(temp);
421 if (get_ia32_Immop_tarval(temp)) {
422 scale = get_tarval_long(get_ia32_Immop_tarval(temp));
425 index = get_irn_n(temp, 2);
427 DBG((mod, LEVEL_1, "\tgot scaled index %+F\n", index));
433 if (! be_is_NoReg(babi, index)) {
434 /* if we have index, but left == right -> no base */
438 else if (! is_ia32_Lea(left) && (index != right)) {
439 /* index != right -> we found a good Shl */
440 /* left != LEA -> this Shl was the left operand */
441 /* -> base is right operand */
447 /* Try to assimilate a LEA as left operand */
448 if (is_ia32_Lea(left) && (get_ia32_am_flavour(left) != ia32_am_O)) {
449 am_flav = get_ia32_am_flavour(left);
451 /* If we have an Add with a real right operand (not NoReg) and */
452 /* the LEA contains already an index calculation then we create */
454 if (isadd && !be_is_NoReg(babi, index) && (am_flav & ia32_am_I)) {
455 DBG((mod, LEVEL_1, "\tleave old LEA, creating new one\n"));
458 DBG((mod, LEVEL_1, "\tgot LEA as left operand ... assimilating\n"));
459 offs = get_ia32_am_offs(left);
460 base = get_irn_n(left, 0);
461 index = get_irn_n(left, 1);
462 scale = get_ia32_am_scale(left);
466 /* ok, we can create a new LEA */
468 res = new_rd_ia32_Lea(dbg, irg, block, base, index, mode_Is);
470 /* add the old offset of a previous LEA */
472 add_ia32_am_offs(res, offs);
475 /* add the new offset */
478 add_ia32_am_offs(res, offs_cnst);
481 add_ia32_am_offs(res, offs_lea);
485 /* either lea_O-cnst, -cnst or -lea_O */
488 add_ia32_am_offs(res, offs_lea);
491 sub_ia32_am_offs(res, offs_cnst);
494 sub_ia32_am_offs(res, offs_lea);
499 set_ia32_am_scale(res, scale);
502 /* determine new am flavour */
503 if (offs || offs_cnst || offs_lea) {
506 if (! be_is_NoReg(babi, base)) {
509 if (! be_is_NoReg(babi, index)) {
515 set_ia32_am_flavour(res, am_flav);
517 set_ia32_op_type(res, ia32_AddrModeS);
519 DBG((mod, LEVEL_1, "\tLEA [%+F + %+F * %d + %s]\n", base, index, scale, get_ia32_am_offs(res)));
521 /* get the result Proj of the Add/Sub */
522 irn = get_res_proj(irn);
524 assert(irn && "Couldn't find result proj");
526 /* exchange the old op with the new LEA */
534 * Optimizes a pattern around irn to address mode if possible.
536 void ia32_optimize_am(ir_node *irn, void *env) {
537 ia32_code_gen_t *cg = env;
538 ir_graph *irg = cg->irg;
539 firm_dbg_module_t *mod = cg->mod;
541 be_abi_irg_t *babi = cg->birg->abi;
544 ir_node *block, *noreg_gp, *noreg_fp;
545 ir_node *left, *right, *temp;
546 ir_node *store, *load, *mem_proj;
547 ir_node *succ, *addr_b, *addr_i;
548 int check_am_src = 0;
550 if (! is_ia32_irn(irn))
553 dbg = get_irn_dbg_info(irn);
554 mode = get_irn_mode(irn);
555 block = get_nodes_block(irn);
556 noreg_gp = ia32_new_NoReg_gp(cg);
557 noreg_fp = ia32_new_NoReg_fp(cg);
559 DBG((mod, LEVEL_1, "checking for AM\n"));
561 /* 1st part: check for address calculations and transform the into Lea */
563 /* Following cases can occur: */
564 /* - Sub (l, imm) -> LEA [base - offset] */
565 /* - Sub (l, r == LEA with ia32_am_O) -> LEA [base - offset] */
566 /* - Add (l, imm) -> LEA [base + offset] */
567 /* - Add (l, r == LEA with ia32_am_O) -> LEA [base + offset] */
568 /* - Add (l == LEA with ia32_am_O, r) -> LEA [base + offset] */
569 /* - Add (l, r) -> LEA [base + index * scale] */
570 /* with scale > 1 iff l/r == shl (1,2,3) */
572 if (is_ia32_Sub(irn) || is_ia32_Add(irn)) {
573 left = get_irn_n(irn, 2);
574 right = get_irn_n(irn, 3);
576 /* Do not try to create a LEA if one of the operands is a Load. */
577 if (! pred_is_specific_nodeblock(block, left, is_ia32_Load) &&
578 ! pred_is_specific_nodeblock(block, right, is_ia32_Load))
580 res = fold_addr(babi, irn, mod, noreg_gp);
584 /* 2nd part: fold following patterns: */
585 /* - Load -> LEA into Load } TODO: If the LEA is used by more than one Load/Store */
586 /* - Store -> LEA into Store } it might be better to keep the LEA */
587 /* - op -> Load into AMop with am_Source */
589 /* - op is am_Source capable AND */
590 /* - the Load is only used by this op AND */
591 /* - the Load is in the same block */
592 /* - Store -> op -> Load into AMop with am_Dest */
594 /* - op is am_Dest capable AND */
595 /* - the Store uses the same address as the Load AND */
596 /* - the Load is only used by this op AND */
597 /* - the Load and Store are in the same block AND */
598 /* - nobody else uses the result of the op */
600 if ((res == irn) && (get_ia32_am_support(irn) != ia32_am_None) && !is_ia32_Lea(irn)) {
601 /* 1st: check for Load/Store -> LEA */
602 if (is_ia32_Ld(irn) || is_ia32_St(irn)) {
603 left = get_irn_n(irn, 0);
605 if (is_ia32_Lea(left)) {
606 /* get the AM attributes from the LEA */
607 add_ia32_am_offs(irn, get_ia32_am_offs(left));
608 set_ia32_am_scale(irn, get_ia32_am_scale(left));
609 set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
611 set_ia32_op_type(irn, is_ia32_St(irn) ? ia32_AddrModeD : ia32_AddrModeS);
613 /* set base and index */
614 set_irn_n(irn, 0, get_irn_n(left, 0));
615 set_irn_n(irn, 1, get_irn_n(left, 1));
618 /* check if at least one operand is a Load */
619 else if (pred_is_specific_nodeblock(block, get_irn_n(irn, 2), is_ia32_Ld) ||
620 pred_is_specific_nodeblock(block, get_irn_n(irn, 3), is_ia32_Ld))
622 left = get_irn_n(irn, 2);
623 if (get_irn_arity(irn) == 4) {
624 /* it's an "unary" operation */
628 right = get_irn_n(irn, 3);
631 /* normalize commutative ops */
632 if (node_is_comm(irn)) {
633 /* Assure that right operand is always a Load if there is one */
634 /* because non-commutative ops can only use Dest AM if the right */
635 /* operand is a load, so we only need to check right operand. */
636 if (pred_is_specific_nodeblock(block, left, is_ia32_Ld))
638 set_irn_n(irn, 2, right);
639 set_irn_n(irn, 3, left);
647 /* check for Store -> op -> Load */
649 /* Store -> op -> Load optimization is only possible if supported by op */
650 /* and if right operand is a Load */
651 if ((get_ia32_am_support(irn) & ia32_am_Dest) &&
652 pred_is_specific_nodeblock(block, right, is_ia32_Ld))
655 /* An address mode capable op always has a result Proj. */
656 /* If this Proj is used by more than one other node, we don't need to */
657 /* check further, otherwise we check for Store and remember the address, */
658 /* the Store points to. */
660 succ = get_res_proj(irn);
661 assert(succ && "Couldn't find result proj");
667 /* now check for users and Store */
668 if (ia32_get_irn_n_edges(succ) == 1) {
669 succ = get_edge_src_irn(get_irn_out_edge_first(succ));
671 if (is_ia32_fStore(succ) || is_ia32_Store(succ)) {
673 addr_b = get_irn_n(store, 0);
675 /* Could be that the Store is connected to the address */
676 /* calculating LEA while the Load is already transformed. */
677 if (is_ia32_Lea(addr_b)) {
679 addr_b = get_irn_n(succ, 0);
680 addr_i = get_irn_n(succ, 1);
689 /* we found a Store as single user: Now check for Load */
691 /* Extra check for commutative ops with two Loads */
692 /* -> put the interesting Load right */
693 if (node_is_comm(irn) &&
694 pred_is_specific_nodeblock(block, left, is_ia32_Ld))
696 if ((addr_b == get_irn_n(get_Proj_pred(left), 0)) &&
697 (addr_i == get_irn_n(get_Proj_pred(left), 1)))
699 /* We exchange left and right, so it's easier to kill */
700 /* the correct Load later and to handle unary operations. */
701 set_irn_n(irn, 2, right);
702 set_irn_n(irn, 3, left);
710 /* skip the Proj for easier access */
711 load = get_Proj_pred(right);
713 /* Compare Load and Store address */
714 if ((addr_b == get_irn_n(load, 0)) && (addr_i == get_irn_n(load, 1)))
716 /* Right Load is from same address, so we can */
717 /* disconnect the Load and Store here */
719 /* set new base, index and attributes */
720 set_irn_n(irn, 0, addr_b);
721 set_irn_n(irn, 1, addr_i);
722 add_ia32_am_offs(irn, get_ia32_am_offs(load));
723 set_ia32_am_scale(irn, get_ia32_am_scale(load));
724 set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
725 set_ia32_op_type(irn, ia32_AddrModeD);
727 /* connect to Load memory and disconnect Load */
728 if (get_irn_arity(irn) == 5) {
730 set_irn_n(irn, 4, get_irn_n(load, 2));
731 set_irn_n(irn, 3, noreg_gp);
735 set_irn_n(irn, 3, get_irn_n(load, 2));
736 set_irn_n(irn, 2, noreg_gp);
739 /* connect the memory Proj of the Store to the op */
740 mem_proj = get_mem_proj(store);
741 set_Proj_pred(mem_proj, irn);
742 set_Proj_proj(mem_proj, 1);
745 else if (get_ia32_am_support(irn) & ia32_am_Source) {
746 /* There was no store, check if we still can optimize for source address mode */
749 } /* if (support AM Dest) */
750 else if (get_ia32_am_support(irn) & ia32_am_Source) {
751 /* op doesn't support am AM Dest -> check for AM Source */
755 /* normalize commutative ops */
756 if (node_is_comm(irn)) {
757 /* Assure that left operand is always a Load if there is one */
758 /* because non-commutative ops can only use Source AM if the */
759 /* left operand is a Load, so we only need to check the left */
760 /* operand afterwards. */
761 if (pred_is_specific_nodeblock(block, right, is_ia32_Ld)) {
762 set_irn_n(irn, 2, right);
763 set_irn_n(irn, 3, left);
771 /* optimize op -> Load iff Load is only used by this op */
772 /* and left operand is a Load which only used by this irn */
774 pred_is_specific_nodeblock(block, left, is_ia32_Ld) &&
775 (ia32_get_irn_n_edges(left) == 1))
777 left = get_Proj_pred(left);
779 addr_b = get_irn_n(left, 0);
780 addr_i = get_irn_n(left, 1);
782 /* set new base, index and attributes */
783 set_irn_n(irn, 0, addr_b);
784 set_irn_n(irn, 1, addr_i);
785 add_ia32_am_offs(irn, get_ia32_am_offs(left));
786 set_ia32_am_scale(irn, get_ia32_am_scale(left));
787 set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
788 set_ia32_op_type(irn, ia32_AddrModeS);
790 /* connect to Load memory */
791 if (get_irn_arity(irn) == 5) {
793 set_irn_n(irn, 4, get_irn_n(left, 2));
797 set_irn_n(irn, 3, get_irn_n(left, 2));
800 /* disconnect from Load */
801 set_irn_n(irn, 2, noreg_gp);
803 /* If Load has a memory Proj, connect it to the op */
804 mem_proj = get_mem_proj(left);
806 set_Proj_pred(mem_proj, irn);
807 set_Proj_proj(mem_proj, 1);