8 #include "firm_types.h"
13 #include "../benode_t.h"
15 #include "ia32_new_nodes.h"
16 #include "bearch_ia32_t.h"
19 #define is_NoMem(irn) (get_irn_op(irn) == op_NoMem)
22 * creates a unique ident by adding a number to a tag
24 * @param tag the tag string, must contain a %d if a number
27 static ident *unique_id(const char *tag)
29 static unsigned id = 0;
32 snprintf(str, sizeof(str), tag, ++id);
33 return new_id_from_str(str);
39 * Transforms a SymConst.
41 * @param mod the debug module
42 * @param block the block the new node should belong to
43 * @param node the ir SymConst node
44 * @param mode mode of the SymConst
45 * @return the created ia32 Const node
47 static ir_node *gen_SymConst(ia32_transform_env_t *env) {
49 dbg_info *dbg = env->dbg;
50 ir_mode *mode = env->mode;
51 ir_graph *irg = env->irg;
52 ir_node *block = env->block;
54 cnst = new_rd_ia32_Const(dbg, irg, block, mode);
55 set_ia32_Const_attr(cnst, env->irn);
60 * Get a primitive type for a mode.
62 static ir_type *get_prim_type(pmap *types, ir_mode *mode)
64 pmap_entry *e = pmap_find(types, mode);
69 snprintf(buf, sizeof(buf), "prim_type_%s", get_mode_name(mode));
70 res = new_type_primitive(new_id_from_str(buf), mode);
71 pmap_insert(types, mode, res);
79 * Get an entity that is initialized with a tarval
81 static entity *get_entity_for_tv(ia32_code_gen_t *cg, ir_node *cnst)
83 tarval *tv = get_Const_tarval(cnst);
84 pmap_entry *e = pmap_find(cg->tv_ent, tv);
89 ir_mode *mode = get_irn_mode(cnst);
90 ir_type *tp = get_Const_type(cnst);
91 if (tp == firm_unknown_type)
92 tp = get_prim_type(cg->types, mode);
94 res = new_entity(get_glob_type(), unique_id("ia32FloatCnst_%u"), tp);
96 set_entity_ld_ident(res, get_entity_ident(res));
97 set_entity_visibility(res, visibility_local);
98 set_entity_variability(res, variability_constant);
99 set_entity_allocation(res, allocation_static);
101 /* we create a new entity here: It's initialization must resist on the
103 rem = current_ir_graph;
104 current_ir_graph = get_const_code_irg();
105 set_atomic_ent_value(res, new_Const_type(tv, tp));
106 current_ir_graph = rem;
114 * Transforms a Const.
116 * @param mod the debug module
117 * @param block the block the new node should belong to
118 * @param node the ir Const node
119 * @param mode mode of the Const
120 * @return the created ia32 Const node
122 static ir_node *gen_Const(ia32_transform_env_t *env) {
125 ir_graph *irg = env->irg;
126 ir_node *block = env->block;
127 ir_node *node = env->irn;
128 dbg_info *dbg = env->dbg;
129 ir_mode *mode = env->mode;
131 if (mode_is_float(mode)) {
132 sym.entity_p = get_entity_for_tv(env->cg, node);
134 cnst = new_rd_SymConst(dbg, irg, block, sym, symconst_addr_ent);
136 cnst = gen_SymConst(env);
139 cnst = new_rd_ia32_Const(dbg, irg, block, get_irn_mode(node));
140 set_ia32_Const_attr(cnst, node);
148 * Transforms (all) Const's into ia32_Const and places them in the
149 * block where they are used (or in the cfg-pred Block in case of Phi's)
151 void ia32_place_consts(ir_node *irn, void *env) {
152 ia32_code_gen_t *cg = env;
153 ia32_transform_env_t tenv;
155 ir_node *pred, *cnst;
162 mode = get_irn_mode(irn);
164 tenv.arch_env = cg->arch_env;
165 tenv.block = get_nodes_block(irn);
170 /* Loop over all predecessors and check for Sym/Const nodes */
171 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
172 pred = get_irn_n(irn, i);
174 opc = get_irn_opcode(pred);
176 tenv.mode = get_irn_mode(pred);
177 tenv.dbg = get_irn_dbg_info(pred);
179 /* If it's a Phi, then we need to create the */
180 /* new Const in it's predecessor block */
182 tenv.block = get_Block_cfgpred_block(get_nodes_block(irn), i);
187 cnst = gen_Const(&tenv);
190 cnst = gen_SymConst(&tenv);
196 /* if we found a const, then set it */
198 set_irn_n(irn, i, cnst);
204 /******************************************************************
206 * /\ | | | | | \/ | | |
207 * / \ __| | __| |_ __ ___ ___ ___| \ / | ___ __| | ___
208 * / /\ \ / _` |/ _` | '__/ _ \/ __/ __| |\/| |/ _ \ / _` |/ _ \
209 * / ____ \ (_| | (_| | | | __/\__ \__ \ | | | (_) | (_| | __/
210 * /_/ \_\__,_|\__,_|_| \___||___/___/_| |_|\___/ \__,_|\___|
212 ******************************************************************/
214 static int node_is_comm(const ir_node *irn) {
215 if (is_ia32_Add(irn) ||
238 * Returns the first mode_M Proj connected to irn.
240 static ir_node *get_mem_proj(const ir_node *irn) {
241 const ir_edge_t *edge;
244 assert(get_irn_mode(irn) == mode_T && "expected mode_T node");
246 foreach_out_edge(irn, edge) {
247 src = get_edge_src_irn(edge);
249 assert(is_Proj(src) && "Proj expected");
251 if (get_irn_mode(src) == mode_M)
259 * Determines if irn is a Proj and if is_op_func returns true for it's predecessor.
261 static int pred_is_specific_node(const ir_node *irn, int (*is_op_func)(const ir_node *n)) {
262 if (is_Proj(irn) && is_op_func(get_Proj_pred(irn))) {
270 * Folds Add or Sub to LEA if possible
272 static ir_node *fold_addr(ir_node *irn, firm_dbg_module_t *mod, ir_node *noreg) {
273 ir_graph *irg = get_irn_irg(irn);
274 ir_mode *mode = get_irn_mode(irn);
275 dbg_info *dbg = get_irn_dbg_info(irn);
276 ir_node *block = get_nodes_block(irn);
279 char *new_offs = NULL;
283 ir_node *left, *right, *temp;
284 ir_node *base, *index;
285 ia32_am_flavour_t am_flav;
287 if (is_ia32_Add(irn))
290 left = get_irn_n(irn, 2);
291 right = get_irn_n(irn, 3);
293 /* "normalize" arguments in case of add */
295 /* put LEA == ia32_am_O as right operand */
296 if (is_ia32_Lea(left) && get_ia32_am_flavour(left) == ia32_am_O) {
302 /* put LEA != ia32_am_O as left operand */
303 if (is_ia32_Lea(right) && get_ia32_am_flavour(right) != ia32_am_O) {
309 /* put SHL as right operand */
310 if (pred_is_specific_node(left, is_ia32_Shl)) {
317 /* Left operand could already be a LEA */
318 if (is_ia32_Lea(left)) {
319 DBG((mod, LEVEL_1, "\tgot LEA as left operand\n"));
321 base = get_irn_n(left, 0);
322 index = get_irn_n(left, 1);
323 offs = get_ia32_am_offs(left);
324 scale = get_ia32_am_scale(left);
334 /* check if operand is either const or right operand is AMConst (LEA with ia32_am_O) */
335 if (get_ia32_cnst(irn)) {
336 DBG((mod, LEVEL_1, "\tfound op with imm"));
338 new_offs = get_ia32_cnst(irn);
341 else if (is_ia32_Lea(right) && get_ia32_am_flavour(right) == ia32_am_O) {
342 DBG((mod, LEVEL_1, "\tgot op with LEA am_O"));
344 new_offs = get_ia32_am_offs(right);
347 /* we can only get an additional index if there isn't already one */
348 else if (isadd && be_is_NoReg(index)) {
349 /* default for add -> make right operand to index */
353 DBG((mod, LEVEL_1, "\tgot LEA candidate with index %+F\n", index));
354 /* check for SHL 1,2,3 */
355 if (pred_is_specific_node(right, is_ia32_Shl)) {
356 temp = get_Proj_pred(right);
358 if (get_ia32_Immop_tarval(temp)) {
359 scale = get_tarval_long(get_ia32_Immop_tarval(temp));
362 index = get_irn_n(temp, 2);
364 DBG((mod, LEVEL_1, "\tgot scaled index %+F\n", index));
370 /* ok, we can create a new LEA */
372 res = new_rd_ia32_Lea(dbg, irg, block, base, index, mode);
374 /* add the old offset of a previous LEA */
376 add_ia32_am_offs(res, offs);
379 /* add the new offset */
382 add_ia32_am_offs(res, new_offs);
386 sub_ia32_am_offs(res, new_offs);
390 set_ia32_am_scale(res, scale);
393 /* determine new am flavour */
394 if (offs || new_offs) {
397 if (! be_is_NoReg(base)) {
400 if (! be_is_NoReg(index)) {
406 set_ia32_am_flavour(res, am_flav);
408 set_ia32_op_type(res, ia32_AddrModeS);
410 DBG((mod, LEVEL_1, "\tLEA [%+F + %+F * %d + %s]\n", base, index, scale, get_ia32_am_offs(res)));
412 /* exchange the old op with the new LEA */
420 * Optimizes a pattern around irn to address mode if possible.
422 void ia32_optimize_am(ir_node *irn, void *env) {
423 ia32_code_gen_t *cg = env;
424 ir_graph *irg = cg->irg;
425 firm_dbg_module_t *mod = cg->mod;
429 ir_node *block, *noreg_gp, *noreg_fp;
430 ir_node *left, *right, *temp;
431 ir_node *store, *mem_proj;
432 ir_node *succ, *addr_b, *addr_i;
433 int check_am_src = 0;
435 if (! is_ia32_irn(irn))
438 dbg = get_irn_dbg_info(irn);
439 mode = get_irn_mode(irn);
440 block = get_nodes_block(irn);
441 noreg_gp = ia32_new_NoReg_gp(cg);
442 noreg_fp = ia32_new_NoReg_fp(cg);
444 DBG((mod, LEVEL_1, "checking for AM\n"));
446 /* 1st part: check for address calculations and transform the into Lea */
448 /* Following cases can occur: */
449 /* - Sub (l, imm) -> LEA [base - offset] */
450 /* - Sub (l, r == LEA with ia32_am_O) -> LEA [base - offset] */
451 /* - Add (l, imm) -> LEA [base + offset] */
452 /* - Add (l, r == LEA with ia32_am_O) -> LEA [base + offset] */
453 /* - Add (l == LEA with ia32_am_O, r) -> LEA [base + offset] */
454 /* - Add (l, r) -> LEA [base + index * scale] */
455 /* with scale > 1 iff l/r == shl (1,2,3) */
457 if (is_ia32_Sub(irn) || is_ia32_Add(irn)) {
458 left = get_irn_n(irn, 2);
459 right = get_irn_n(irn, 3);
461 /* Do not try to create a LEA if one of the operands is a Load. */
462 if (! pred_is_specific_node(left, is_ia32_Load) &&
463 ! pred_is_specific_node(right, is_ia32_Load))
465 res = fold_addr(irn, mod, noreg_gp);
469 /* 2nd part: fold following patterns:
470 /* - Load -> LEA into Load } TODO: If the LEA is used by more than one Load/Store */
471 /* - Store -> LEA into Store } it might be better to keep the LEA */
472 /* - op -> Load into AMop with am_Source
474 /* - op is am_Source capable AND */
475 /* - the Load is only used by this op AND */
476 /* - the Load is in the same block */
477 /* - Store -> op -> Load into AMop with am_Dest */
479 /* - op is am_Dest capable AND */
480 /* - the Store uses the same address as the Load AND */
481 /* - the Load is only used by this op AND */
482 /* - the Load and Store are in the same block AND */
483 /* - nobody else uses the result of the op */
485 if ((res == irn) && (get_ia32_am_support(irn) != ia32_am_None) && !is_ia32_Lea(irn)) {
486 /* 1st: check for Load/Store -> LEA */
487 if (is_ia32_Load(irn) || is_ia32_fLoad(irn) ||
488 is_ia32_Store(irn) || is_ia32_fStore(irn))
490 left = get_irn_n(irn, 0);
492 if (is_ia32_Lea(left)) {
493 /* get the AM attributes from the LEA */
494 add_ia32_am_offs(irn, get_ia32_am_offs(left));
495 set_ia32_am_scale(irn, get_ia32_am_scale(left));
496 set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
497 set_ia32_op_type(irn, get_ia32_op_type(left));
499 /* set base and index */
500 set_irn_n(irn, 0, get_irn_n(left, 0));
501 set_irn_n(irn, 1, get_irn_n(left, 1));
504 /* check if at least one operand is a Load */
505 else if (pred_is_specific_node(get_irn_n(irn, 2), is_ia32_Load) ||
506 pred_is_specific_node(get_irn_n(irn, 2), is_ia32_fLoad) ||
507 pred_is_specific_node(get_irn_n(irn, 3), is_ia32_Load) ||
508 pred_is_specific_node(get_irn_n(irn, 3), is_ia32_fLoad))
511 /* normalize commutative ops */
512 if (node_is_comm(irn)) {
513 left = get_irn_n(irn, 2);
514 right = get_irn_n(irn, 3);
516 /* assure that Left operand is always a Load if there is one */
517 if (pred_is_specific_node(right, is_ia32_Load) ||
518 pred_is_specific_node(right, is_ia32_fLoad))
520 set_irn_n(irn, 2, right);
521 set_irn_n(irn, 3, left);
529 /* check for Store -> op -> Load */
531 /* Store -> op -> Load optimization is only possible if supported by op */
532 if (get_ia32_am_support(irn) & ia32_am_Dest) {
534 /* An address mode capable op always has a result Proj. */
535 /* If this Proj is used by more than one other node, we don't need to */
536 /* check further, otherwise we check for Store and remember the address, */
537 /* the Store points to. */
539 succ = get_edge_src_irn(get_irn_out_edge_first(irn));
540 assert(is_Proj(succ) && "successor of AM node is not Proj");
542 if (get_Proj_proj(succ) != 0) {
543 succ = get_edge_src_irn(get_irn_out_edge_next(irn, get_irn_out_edge_first(irn)));
544 assert(is_Proj(succ) && "successor of AM node is not Proj");
545 assert(get_Proj_proj(succ) == 0 && "Couldn't find result proj");
552 /* now check for users and Store */
553 if (get_irn_n_edges(succ) == 1) {
554 succ = get_edge_src_irn(get_irn_out_edge_first(succ));
556 if (is_ia32_fStore(succ) || is_ia32_Store(succ)) {
558 addr_b = get_irn_n(store, 0);
560 /* Could be that the Store is connected to the address */
561 /* calculating LEA while the Load is already transformed. */
562 if (is_ia32_Lea(addr_b)) {
564 addr_b = get_irn_n(succ, 0);
565 addr_i = get_irn_n(succ, 1);
574 /* we found a Store as single user: Now check for Load */
575 left = get_irn_n(irn, 2);
576 right = get_irn_n(irn, 3);
578 /* Could be that the right operand is also a Load, so we make */
579 /* sure that the "interesting" Load is always the left one */
581 /* right != NoMem means, we have a "binary" operation */
582 if (! is_NoMem(right) &&
583 (pred_is_specific_node(right, is_ia32_Load) ||
584 pred_is_specific_node(right, is_ia32_fLoad)))
586 if ((addr_b == get_irn_n(get_Proj_pred(right), 0)) &&
587 (addr_i == get_irn_n(get_Proj_pred(right), 1)))
589 /* We exchange left and right, so it's easier to kill */
590 /* the correct Load later and to handle unary operations. */
591 set_irn_n(irn, 2, right);
592 set_irn_n(irn, 3, left);
600 /* skip the Proj for easier access */
601 left = get_Proj_pred(left);
603 /* Compare Load and Store address */
604 if ((addr_b == get_irn_n(left, 0)) && (addr_i == get_irn_n(left, 1)))
606 /* Left Load is from same address, so we can */
607 /* disconnect the Load and Store here */
609 /* set new base, index and attributes */
610 set_irn_n(irn, 0, addr_b);
611 set_irn_n(irn, 1, addr_i);
612 add_ia32_am_offs(irn, get_ia32_am_offs(left));
613 set_ia32_am_scale(irn, get_ia32_am_scale(left));
614 set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
615 set_ia32_op_type(irn, ia32_AddrModeD);
617 /* connect to Load memory */
618 if (get_irn_arity(irn) == 5) {
620 set_irn_n(irn, 4, get_irn_n(left, 2));
624 set_irn_n(irn, 3, get_irn_n(left, 2));
627 /* disconnect from Load */
628 set_irn_n(irn, 2, noreg_gp);
630 /* connect the memory Proj of the Store to the op */
631 mem_proj = get_mem_proj(store);
632 set_Proj_pred(mem_proj, irn);
633 set_Proj_proj(mem_proj, 1);
636 else if (get_ia32_am_support(irn) & ia32_am_Source) {
637 /* There was no store, check if we still can optimize for source address mode */
640 } /* if (support AM Dest) */
642 /* op doesn't support am AM Dest -> check for AM Source */
646 /* optimize op -> Load iff Load is only used by this op */
648 left = get_irn_n(irn, 2);
650 if (get_irn_n_edges(left) == 1) {
651 left = get_Proj_pred(left);
653 addr_b = get_irn_n(left, 0);
654 addr_i = get_irn_n(left, 1);
656 /* set new base, index and attributes */
657 set_irn_n(irn, 0, addr_b);
658 set_irn_n(irn, 1, addr_i);
659 add_ia32_am_offs(irn, get_ia32_am_offs(left));
660 set_ia32_am_scale(irn, get_ia32_am_scale(left));
661 set_ia32_am_flavour(irn, get_ia32_am_flavour(left));
662 set_ia32_op_type(irn, ia32_AddrModeS);
664 /* connect to Load memory */
665 if (get_irn_arity(irn) == 5) {
667 set_irn_n(irn, 4, get_irn_n(left, 2));
671 set_irn_n(irn, 3, get_irn_n(left, 2));
674 /* disconnect from Load */
675 set_irn_n(irn, 2, noreg_gp);
677 /* If Load has a memory Proj, connect it to the op */
678 mem_proj = get_mem_proj(left);
680 set_Proj_pred(mem_proj, irn);
681 set_Proj_proj(mem_proj, 1);