3 * File name: ir/be/ia32/ia32_optimize.c
4 * Purpose: Implements several optimizations for IA32
5 * Author: Christian Wuerdig
7 * Copyright: (c) 2006 Universitaet Karlsruhe
8 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
17 #include "firm_types.h"
27 #include "../benode_t.h"
28 #include "../besched_t.h"
30 #include "ia32_new_nodes.h"
31 #include "bearch_ia32_t.h"
32 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
33 #include "ia32_transform.h"
34 #include "ia32_dbg_stat.h"
35 #include "ia32_util.h"
37 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
42 IA32_AM_CAND_NONE = 0, /**< no addressmode possible with irn inputs */
43 IA32_AM_CAND_LEFT = 1, /**< addressmode possible with left input */
44 IA32_AM_CAND_RIGHT = 2, /**< addressmode possible with right input */
45 IA32_AM_CAND_BOTH = 3 /**< addressmode possible with both inputs */
48 typedef int is_op_func_t(const ir_node *n);
49 typedef ir_node *load_func_t(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, ir_node *mem);
52 * checks if a node represents the NOREG value
54 static INLINE int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) {
55 return irn == cg->noreg_gp || irn == cg->noreg_xmm || irn == cg->noreg_vfp;
58 void ia32_pre_transform_phase(ia32_code_gen_t *cg) {
60 We need to transform the consts twice:
61 - the psi condition tree transformer needs existing constants to be ia32 constants
62 - the psi condition tree transformer inserts new firm constants which need to be transformed
64 //ia32_transform_all_firm_consts(cg);
65 irg_walk_graph(cg->irg, NULL, ia32_transform_psi_cond_tree, cg);
66 //ia32_transform_all_firm_consts(cg);
69 /********************************************************************************************************
70 * _____ _ _ ____ _ _ _ _ _
71 * | __ \ | | | | / __ \ | | (_) (_) | | (_)
72 * | |__) |__ ___ _ __ | |__ ___ | | ___ | | | |_ __ | |_ _ _ __ ___ _ ______ _| |_ _ ___ _ __
73 * | ___/ _ \/ _ \ '_ \| '_ \ / _ \| |/ _ \ | | | | '_ \| __| | '_ ` _ \| |_ / _` | __| |/ _ \| '_ \
74 * | | | __/ __/ |_) | | | | (_) | | __/ | |__| | |_) | |_| | | | | | | |/ / (_| | |_| | (_) | | | |
75 * |_| \___|\___| .__/|_| |_|\___/|_|\___| \____/| .__/ \__|_|_| |_| |_|_/___\__,_|\__|_|\___/|_| |_|
78 ********************************************************************************************************/
81 * NOTE: THESE PEEPHOLE OPTIMIZATIONS MUST BE CALLED AFTER SCHEDULING AND REGISTER ALLOCATION.
84 static int ia32_const_equal(const ir_node *n1, const ir_node *n2) {
85 if(get_ia32_immop_type(n1) != get_ia32_immop_type(n2))
88 if(get_ia32_immop_type(n1) == ia32_ImmConst) {
89 return get_ia32_Immop_tarval(n1) == get_ia32_Immop_tarval(n2);
90 } else if(get_ia32_immop_type(n1) == ia32_ImmSymConst) {
91 return get_ia32_Immop_symconst(n1) == get_ia32_Immop_symconst(n2);
94 assert(get_ia32_immop_type(n1) == ia32_ImmNone);
99 * Checks for potential CJmp/CJmpAM optimization candidates.
101 static ir_node *ia32_determine_cjmp_cand(ir_node *irn, is_op_func_t *is_op_func) {
102 ir_node *cand = NULL;
103 ir_node *prev = sched_prev(irn);
105 if (is_Block(prev)) {
106 if (get_Block_n_cfgpreds(prev) == 1)
107 prev = get_Block_cfgpred(prev, 0);
112 /* The predecessor must be a ProjX. */
113 if (prev && is_Proj(prev) && get_irn_mode(prev) == mode_X) {
114 prev = get_Proj_pred(prev);
116 if (is_op_func(prev))
123 static int is_TestJmp_cand(const ir_node *irn) {
124 return is_ia32_TestJmp(irn) || is_ia32_And(irn);
128 * Checks if two consecutive arguments of cand matches
129 * the two arguments of irn (TestJmp).
131 static int is_TestJmp_replacement(ir_node *cand, ir_node *irn) {
132 ir_node *in1 = get_irn_n(irn, 0);
133 ir_node *in2 = get_irn_n(irn, 1);
134 int i, n = get_irn_arity(cand);
137 for (i = 0; i < n - 1; i++) {
138 if (get_irn_n(cand, i) == in1 &&
139 get_irn_n(cand, i + 1) == in2)
149 return ia32_const_equal(cand, irn);
153 * Tries to replace a TestJmp by a CJmp or CJmpAM (in case of And)
155 static void ia32_optimize_TestJmp(ir_node *irn, ia32_code_gen_t *cg) {
156 ir_node *cand = ia32_determine_cjmp_cand(irn, is_TestJmp_cand);
159 /* we found a possible candidate */
160 replace = cand ? is_TestJmp_replacement(cand, irn) : 0;
163 DBG((dbg, LEVEL_1, "replacing %+F by ", irn));
165 if (is_ia32_And(cand))
166 set_irn_op(irn, op_ia32_CJmpAM);
168 set_irn_op(irn, op_ia32_CJmp);
170 DB((dbg, LEVEL_1, "%+F\n", irn));
174 static int is_CondJmp_cand(const ir_node *irn) {
175 return is_ia32_CondJmp(irn) || is_ia32_Sub(irn);
179 * Checks if the arguments of cand are the same of irn.
181 static int is_CondJmp_replacement(ir_node *cand, ir_node *irn) {
184 arity = get_irn_arity(cand);
185 for (i = 0; i < arity; i++) {
186 if (get_irn_n(cand, i) != get_irn_n(irn, i)) {
191 return ia32_const_equal(cand, irn);
195 * Tries to replace a CondJmp by a CJmpAM
197 static void ia32_optimize_CondJmp(ir_node *irn, ia32_code_gen_t *cg) {
198 ir_node *cand = ia32_determine_cjmp_cand(irn, is_CondJmp_cand);
201 /* we found a possible candidate */
202 replace = cand ? is_CondJmp_replacement(cand, irn) : 0;
205 DBG((dbg, LEVEL_1, "replacing %+F by ", irn));
208 set_irn_op(irn, op_ia32_CJmpAM);
210 DB((dbg, LEVEL_1, "%+F\n", irn));
214 // only optimize up to 48 stores behind IncSPs
215 #define MAXPUSH_OPTIMIZE 48
218 * Tries to create pushs from IncSP,Store combinations
220 static void ia32_create_Pushs(ir_node *irn, ia32_code_gen_t *cg) {
224 ir_node *stores[MAXPUSH_OPTIMIZE];
225 ir_node *block = get_nodes_block(irn);
226 ir_graph *irg = cg->irg;
228 ir_mode *spmode = get_irn_mode(irn);
230 memset(stores, 0, sizeof(stores));
232 assert(be_is_IncSP(irn));
234 offset = be_get_IncSP_offset(irn);
239 * We first walk the schedule after the IncSP node as long as we find
240 * suitable stores that could be transformed to a push.
241 * We save them into the stores array which is sorted by the frame offset/4
242 * attached to the node
244 for(node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
249 // it has to be a store
250 if(!is_ia32_Store(node))
253 // it has to use our sp value
254 if(get_irn_n(node, 0) != irn)
256 // store has to be attached to NoMem
257 mem = get_irn_n(node, 3);
262 if( (get_ia32_am_flavour(node) & ia32_am_IS) != 0)
265 offset = get_ia32_am_offs_int(node);
267 storeslot = offset / 4;
268 if(storeslot >= MAXPUSH_OPTIMIZE)
271 // storing into the same slot twice is bad (and shouldn't happen...)
272 if(stores[storeslot] != NULL)
275 // storing at half-slots is bad
279 stores[storeslot] = node;
282 curr_sp = get_irn_n(irn, 0);
284 // walk the stores in inverse order and create pushs for them
285 i = (offset / 4) - 1;
286 if(i >= MAXPUSH_OPTIMIZE) {
287 i = MAXPUSH_OPTIMIZE - 1;
290 for( ; i >= 0; --i) {
291 const arch_register_t *spreg;
293 ir_node *val, *mem, *mem_proj;
294 ir_node *store = stores[i];
295 ir_node *noreg = ia32_new_NoReg_gp(cg);
297 if(store == NULL || is_Bad(store))
300 val = get_irn_n(store, 2);
301 mem = get_irn_n(store, 3);
302 spreg = arch_get_irn_register(cg->arch_env, curr_sp);
305 push = new_rd_ia32_Push(NULL, irg, block, noreg, noreg, val, curr_sp, mem);
307 set_ia32_am_support(push, ia32_am_Source);
308 copy_ia32_Immop_attr(push, store);
310 sched_add_before(irn, push);
312 // create stackpointer proj
313 curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack);
314 arch_set_irn_register(cg->arch_env, curr_sp, spreg);
315 sched_add_before(irn, curr_sp);
317 // create memory proj
318 mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
319 sched_add_before(irn, mem_proj);
321 // use the memproj now
322 exchange(store, mem_proj);
324 // we can remove the store now
330 be_set_IncSP_offset(irn, offset);
332 // can we remove the IncSP now?
334 const ir_edge_t *edge, *next;
336 foreach_out_edge_safe(irn, edge, next) {
337 ir_node *arg = get_edge_src_irn(edge);
338 int pos = get_edge_src_pos(edge);
340 set_irn_n(arg, pos, curr_sp);
343 set_irn_n(irn, 0, new_Bad());
346 set_irn_n(irn, 0, curr_sp);
352 * Tries to optimize two following IncSP.
354 static void ia32_optimize_IncSP(ir_node *irn, ia32_code_gen_t *cg) {
355 ir_node *prev = be_get_IncSP_pred(irn);
356 int real_uses = get_irn_n_edges(prev);
358 if (be_is_IncSP(prev) && real_uses == 1) {
359 /* first IncSP has only one IncSP user, kill the first one */
360 int prev_offs = be_get_IncSP_offset(prev);
361 int curr_offs = be_get_IncSP_offset(irn);
363 be_set_IncSP_offset(prev, prev_offs + curr_offs);
365 /* Omit the optimized IncSP */
366 be_set_IncSP_pred(irn, be_get_IncSP_pred(prev));
368 set_irn_n(prev, 0, new_Bad());
375 * Performs Peephole Optimizations.
377 static void ia32_peephole_optimize_node(ir_node *irn, void *env) {
378 ia32_code_gen_t *cg = env;
380 /* AMD CPUs want explicit compare before conditional jump */
381 if (! ARCH_AMD(cg->opt_arch)) {
382 if (is_ia32_TestJmp(irn))
383 ia32_optimize_TestJmp(irn, cg);
384 else if (is_ia32_CondJmp(irn))
385 ia32_optimize_CondJmp(irn, cg);
388 if (be_is_IncSP(irn)) {
389 // optimize_IncSP doesn't respect dependency edges yet...
390 //ia32_optimize_IncSP(irn, cg);
392 if (cg->opt & IA32_OPT_PUSHARGS)
393 ia32_create_Pushs(irn, cg);
397 void ia32_peephole_optimization(ir_graph *irg, ia32_code_gen_t *cg) {
398 irg_walk_graph(irg, ia32_peephole_optimize_node, NULL, cg);
401 /******************************************************************
403 * /\ | | | | | \/ | | |
404 * / \ __| | __| |_ __ ___ ___ ___| \ / | ___ __| | ___
405 * / /\ \ / _` |/ _` | '__/ _ \/ __/ __| |\/| |/ _ \ / _` |/ _ \
406 * / ____ \ (_| | (_| | | | __/\__ \__ \ | | | (_) | (_| | __/
407 * /_/ \_\__,_|\__,_|_| \___||___/___/_| |_|\___/ \__,_|\___|
409 ******************************************************************/
416 static int node_is_ia32_comm(const ir_node *irn) {
417 return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0;
420 static int ia32_get_irn_n_edges(const ir_node *irn) {
421 const ir_edge_t *edge;
424 foreach_out_edge(irn, edge) {
432 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor.
434 * @param pred The node to be checked
435 * @param is_op_func The check-function
436 * @return 1 if conditions are fulfilled, 0 otherwise
438 static int pred_is_specific_node(const ir_node *pred, is_op_func_t *is_op_func) {
439 return is_op_func(pred);
443 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor
444 * and if the predecessor is in block bl.
446 * @param bl The block
447 * @param pred The node to be checked
448 * @param is_op_func The check-function
449 * @return 1 if conditions are fulfilled, 0 otherwise
451 static int pred_is_specific_nodeblock(const ir_node *bl, const ir_node *pred,
452 int (*is_op_func)(const ir_node *n))
455 pred = get_Proj_pred(pred);
456 if ((bl == get_nodes_block(pred)) && is_op_func(pred)) {
465 * Checks if irn is a candidate for address calculation. We avoid transforming
466 * adds to leas if they have a load as pred, because then we can use AM mode
469 * - none of the operand must be a Load within the same block OR
470 * - all Loads must have more than one user OR
472 * @param block The block the Loads must/mustnot be in
473 * @param irn The irn to check
474 * return 1 if irn is a candidate, 0 otherwise
476 static int is_addr_candidate(const ir_node *irn) {
477 #ifndef AGGRESSIVE_AM
478 const ir_node *block = get_nodes_block(irn);
479 ir_node *left, *right;
482 left = get_irn_n(irn, 2);
483 right = get_irn_n(irn, 3);
485 if (pred_is_specific_nodeblock(block, left, is_ia32_Ld)) {
486 n = ia32_get_irn_n_edges(left);
487 /* load with only one user: don't create LEA */
492 if (pred_is_specific_nodeblock(block, right, is_ia32_Ld)) {
493 n = ia32_get_irn_n_edges(right);
503 * Checks if irn is a candidate for address mode.
506 * - at least one operand has to be a Load within the same block AND
507 * - the load must not have other users than the irn AND
508 * - the irn must not have a frame entity set
510 * @param cg The ia32 code generator
511 * @param h The height information of the irg
512 * @param block The block the Loads must/mustnot be in
513 * @param irn The irn to check
514 * return 0 if irn is no candidate, 1 if left load can be used, 2 if right one, 3 for both
516 static ia32_am_cand_t is_am_candidate(ia32_code_gen_t *cg, heights_t *h, const ir_node *block, ir_node *irn) {
517 ir_node *in, *load, *other, *left, *right;
518 int is_cand = 0, cand;
521 if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn) || is_ia32_vfild(irn) || is_ia32_vfist(irn) ||
522 is_ia32_GetST0(irn) || is_ia32_SetST0(irn) || is_ia32_xStoreSimple(irn))
525 left = get_irn_n(irn, 2);
526 arity = get_irn_arity(irn);
527 assert(arity == 5 || arity == 4);
530 right = get_irn_n(irn, 3);
538 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
539 #ifndef AGGRESSIVE_AM
541 n = ia32_get_irn_n_edges(in);
542 is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */
547 load = get_Proj_pred(in);
550 /* 8bit Loads are not supported (for binary ops),
551 * they cannot be used with every register */
552 if (get_irn_arity(irn) != 4 && get_mode_size_bits(get_ia32_ls_mode(load)) < 16) {
553 assert(get_irn_arity(irn) == 5);
557 /* If there is a data dependency of other irn from load: cannot use AM */
558 if (is_cand && get_nodes_block(other) == block) {
559 other = skip_Proj(other);
560 is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand;
561 /* this could happen in loops */
562 is_cand = heights_reachable_in_block(h, load, irn) ? 0 : is_cand;
566 cand = is_cand ? IA32_AM_CAND_LEFT : IA32_AM_CAND_NONE;
570 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
571 #ifndef AGGRESSIVE_AM
573 n = ia32_get_irn_n_edges(in);
574 is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */
579 load = get_Proj_pred(in);
582 /* 8bit Loads are not supported, they cannot be used with every register */
583 if (get_mode_size_bits(get_ia32_ls_mode(load)) < 16)
586 /* If there is a data dependency of other irn from load: cannot use load */
587 if (is_cand && get_nodes_block(other) == block) {
588 other = skip_Proj(other);
589 is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand;
590 /* this could happen in loops */
591 is_cand = heights_reachable_in_block(h, load, irn) ? 0 : is_cand;
595 cand = is_cand ? (cand | IA32_AM_CAND_RIGHT) : cand;
597 /* if the irn has a frame entity: we do not use address mode */
598 return get_ia32_frame_ent(irn) ? IA32_AM_CAND_NONE : cand;
602 * Compares the base and index addr and the load/store entities
603 * and returns 1 if they are equal.
605 static int load_store_addr_is_equal(const ir_node *load, const ir_node *store,
606 const ir_node *addr_b, const ir_node *addr_i)
608 if(get_irn_n(load, 0) != addr_b)
610 if(get_irn_n(load, 1) != addr_i)
613 if(get_ia32_frame_ent(load) != get_ia32_frame_ent(store))
616 if(get_ia32_am_sc(load) != get_ia32_am_sc(store))
618 if(is_ia32_am_sc_sign(load) != is_ia32_am_sc_sign(store))
620 if(get_ia32_am_offs_int(load) != get_ia32_am_offs_int(store))
622 if(get_ia32_ls_mode(load) != get_ia32_ls_mode(store))
628 typedef enum _ia32_take_lea_attr {
629 IA32_LEA_ATTR_NONE = 0,
630 IA32_LEA_ATTR_BASE = (1 << 0),
631 IA32_LEA_ATTR_INDEX = (1 << 1),
632 IA32_LEA_ATTR_OFFS = (1 << 2),
633 IA32_LEA_ATTR_SCALE = (1 << 3),
634 IA32_LEA_ATTR_AMSC = (1 << 4),
635 IA32_LEA_ATTR_FENT = (1 << 5)
636 } ia32_take_lea_attr;
639 * Decides if we have to keep the LEA operand or if we can assimilate it.
641 static int do_new_lea(ir_node *irn, ir_node *base, ir_node *index, ir_node *lea,
642 int have_am_sc, ia32_code_gen_t *cg)
644 ir_entity *irn_ent = get_ia32_frame_ent(irn);
645 ir_entity *lea_ent = get_ia32_frame_ent(lea);
647 int is_noreg_base = be_is_NoReg(cg, base);
648 int is_noreg_index = be_is_NoReg(cg, index);
649 ia32_am_flavour_t am_flav = get_ia32_am_flavour(lea);
651 /* If the Add and the LEA both have a different frame entity set: keep */
652 if (irn_ent && lea_ent && (irn_ent != lea_ent))
653 return IA32_LEA_ATTR_NONE;
654 else if (! irn_ent && lea_ent)
655 ret_val |= IA32_LEA_ATTR_FENT;
657 /* If the Add and the LEA both have already an address mode symconst: keep */
658 if (have_am_sc && get_ia32_am_sc(lea))
659 return IA32_LEA_ATTR_NONE;
660 else if (get_ia32_am_sc(lea))
661 ret_val |= IA32_LEA_ATTR_AMSC;
663 /* Check the different base-index combinations */
665 if (! is_noreg_base && ! is_noreg_index) {
666 /* Assimilate if base is the lea and the LEA is just a Base + Offset calculation */
667 if ((base == lea) && ! (am_flav & ia32_I ? 1 : 0)) {
668 if (am_flav & ia32_O)
669 ret_val |= IA32_LEA_ATTR_OFFS;
671 ret_val |= IA32_LEA_ATTR_BASE;
674 return IA32_LEA_ATTR_NONE;
676 else if (! is_noreg_base && is_noreg_index) {
677 /* Base is set but index not */
679 /* Base points to LEA: assimilate everything */
680 if (am_flav & ia32_O)
681 ret_val |= IA32_LEA_ATTR_OFFS;
682 if (am_flav & ia32_S)
683 ret_val |= IA32_LEA_ATTR_SCALE;
684 if (am_flav & ia32_I)
685 ret_val |= IA32_LEA_ATTR_INDEX;
687 ret_val |= IA32_LEA_ATTR_BASE;
689 else if (am_flav & ia32_B ? 0 : 1) {
690 /* Base is not the LEA but the LEA is an index only calculation: assimilate */
691 if (am_flav & ia32_O)
692 ret_val |= IA32_LEA_ATTR_OFFS;
693 if (am_flav & ia32_S)
694 ret_val |= IA32_LEA_ATTR_SCALE;
696 ret_val |= IA32_LEA_ATTR_INDEX;
699 return IA32_LEA_ATTR_NONE;
701 else if (is_noreg_base && ! is_noreg_index) {
702 /* Index is set but not base */
704 /* Index points to LEA: assimilate everything */
705 if (am_flav & ia32_O)
706 ret_val |= IA32_LEA_ATTR_OFFS;
707 if (am_flav & ia32_S)
708 ret_val |= IA32_LEA_ATTR_SCALE;
709 if (am_flav & ia32_B)
710 ret_val |= IA32_LEA_ATTR_BASE;
712 ret_val |= IA32_LEA_ATTR_INDEX;
714 else if (am_flav & ia32_I ? 0 : 1) {
715 /* Index is not the LEA but the LEA is a base only calculation: assimilate */
716 if (am_flav & ia32_O)
717 ret_val |= IA32_LEA_ATTR_OFFS;
718 if (am_flav & ia32_S)
719 ret_val |= IA32_LEA_ATTR_SCALE;
721 ret_val |= IA32_LEA_ATTR_BASE;
724 return IA32_LEA_ATTR_NONE;
727 assert(0 && "There must have been set base or index");
734 * Adds res before irn into schedule if irn was scheduled.
735 * @param irn The schedule point
736 * @param res The node to be scheduled
738 static INLINE void try_add_to_sched(ir_node *irn, ir_node *res) {
739 if (sched_is_scheduled(irn))
740 sched_add_before(irn, res);
744 * Removes node from schedule if it is not used anymore. If irn is a mode_T node
745 * all it's Projs are removed as well.
746 * @param irn The irn to be removed from schedule
748 static INLINE void try_remove_from_sched(ir_node *node) {
751 if(get_irn_mode(node) == mode_T) {
752 const ir_edge_t *edge;
753 foreach_out_edge(node, edge) {
754 ir_node *proj = get_edge_src_irn(edge);
755 try_remove_from_sched(proj);
759 if(get_irn_n_edges(node) != 0)
762 if (sched_is_scheduled(node)) {
766 arity = get_irn_arity(node);
767 for(i = 0; i < arity; ++i) {
768 set_irn_n(node, i, new_Bad());
773 * Folds Add or Sub to LEA if possible
775 static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn) {
776 ir_graph *irg = get_irn_irg(irn);
777 dbg_info *dbg_info = get_irn_dbg_info(irn);
778 ir_node *block = get_nodes_block(irn);
780 ir_node *shift = NULL;
781 ir_node *lea_o = NULL;
791 ir_entity *am_sc = NULL;
792 ir_entity *lea_ent = NULL;
793 ir_node *noreg = ia32_new_NoReg_gp(cg);
794 ir_node *left, *right, *temp;
795 ir_node *base, *index;
796 int consumed_left_shift;
797 ia32_am_flavour_t am_flav;
799 if (is_ia32_Add(irn))
802 left = get_irn_n(irn, 2);
803 right = get_irn_n(irn, 3);
805 /* "normalize" arguments in case of add with two operands */
806 if (isadd && ! be_is_NoReg(cg, right)) {
807 /* put LEA == ia32_am_O as right operand */
808 if (is_ia32_Lea(left) && get_ia32_am_flavour(left) == ia32_am_O) {
809 set_irn_n(irn, 2, right);
810 set_irn_n(irn, 3, left);
816 /* put LEA != ia32_am_O as left operand */
817 if (is_ia32_Lea(right) && get_ia32_am_flavour(right) != ia32_am_O) {
818 set_irn_n(irn, 2, right);
819 set_irn_n(irn, 3, left);
825 /* put SHL as left operand iff left is NOT a LEA */
826 if (! is_ia32_Lea(left) && pred_is_specific_node(right, is_ia32_Shl)) {
827 set_irn_n(irn, 2, right);
828 set_irn_n(irn, 3, left);
841 /* check for operation with immediate */
842 if (is_ia32_ImmConst(irn)) {
843 tarval *tv = get_ia32_Immop_tarval(irn);
845 DBG((dbg, LEVEL_1, "\tfound op with imm const"));
847 offs_cnst = get_tarval_long(tv);
850 else if (isadd && is_ia32_ImmSymConst(irn)) {
851 DBG((dbg, LEVEL_1, "\tfound op with imm symconst"));
855 am_sc = get_ia32_Immop_symconst(irn);
856 am_sc_sign = is_ia32_am_sc_sign(irn);
859 /* determine the operand which needs to be checked */
860 temp = be_is_NoReg(cg, right) ? left : right;
862 /* check if right operand is AMConst (LEA with ia32_am_O) */
863 /* but we can only eat it up if there is no other symconst */
864 /* because the linker won't accept two symconsts */
865 if (! have_am_sc && is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
866 DBG((dbg, LEVEL_1, "\tgot op with LEA am_O"));
868 offs_lea = get_ia32_am_offs_int(temp);
869 am_sc = get_ia32_am_sc(temp);
870 am_sc_sign = is_ia32_am_sc_sign(temp);
877 else if (temp == right)
882 /* default for add -> make right operand to index */
885 consumed_left_shift = -1;
887 DBG((dbg, LEVEL_1, "\tgot LEA candidate with index %+F\n", index));
889 /* determine the operand which needs to be checked */
891 if (is_ia32_Lea(left)) {
893 consumed_left_shift = 0;
896 /* check for SHL 1,2,3 */
897 if (pred_is_specific_node(temp, is_ia32_Shl)) {
899 if (is_ia32_ImmConst(temp)) {
900 long shiftval = get_tarval_long(get_ia32_Immop_tarval(temp));
903 index = get_irn_n(temp, 2);
904 consumed_left_shift = consumed_left_shift < 0 ? 1 : 0;
908 DBG((dbg, LEVEL_1, "\tgot scaled index %+F\n", index));
914 if (! be_is_NoReg(cg, index)) {
915 /* if we have index, but left == right -> no base */
919 else if (consumed_left_shift == 1) {
920 /* -> base is right operand */
921 base = (right == lea_o) ? noreg : right;
926 /* Try to assimilate a LEA as left operand */
927 if (is_ia32_Lea(left) && (get_ia32_am_flavour(left) != ia32_am_O)) {
928 /* check if we can assimilate the LEA */
929 int take_attr = do_new_lea(irn, base, index, left, have_am_sc, cg);
931 if (take_attr == IA32_LEA_ATTR_NONE) {
932 DBG((dbg, LEVEL_1, "\tleave old LEA, creating new one\n"));
935 DBG((dbg, LEVEL_1, "\tgot LEA as left operand ... assimilating\n"));
936 lea = left; /* for statistics */
938 if (take_attr & IA32_LEA_ATTR_OFFS)
939 offs = get_ia32_am_offs_int(left);
941 if (take_attr & IA32_LEA_ATTR_AMSC) {
942 am_sc = get_ia32_am_sc(left);
944 am_sc_sign = is_ia32_am_sc_sign(left);
947 if (take_attr & IA32_LEA_ATTR_SCALE)
948 scale = get_ia32_am_scale(left);
950 if (take_attr & IA32_LEA_ATTR_BASE)
951 base = get_irn_n(left, 0);
953 if (take_attr & IA32_LEA_ATTR_INDEX)
954 index = get_irn_n(left, 1);
956 if (take_attr & IA32_LEA_ATTR_FENT)
957 lea_ent = get_ia32_frame_ent(left);
961 /* ok, we can create a new LEA */
963 res = new_rd_ia32_Lea(dbg_info, irg, block, base, index);
965 /* add the old offset of a previous LEA */
966 add_ia32_am_offs_int(res, offs);
968 /* add the new offset */
970 add_ia32_am_offs_int(res, offs_cnst);
971 add_ia32_am_offs_int(res, offs_lea);
973 /* either lea_O-cnst, -cnst or -lea_O */
974 if (offs_cnst != 0) {
975 add_ia32_am_offs_int(res, offs_lea);
976 add_ia32_am_offs_int(res, -offs_cnst);
978 add_ia32_am_offs_int(res, offs_lea);
982 /* set the address mode symconst */
984 set_ia32_am_sc(res, am_sc);
986 set_ia32_am_sc_sign(res);
989 /* copy the frame entity (could be set in case of Add */
990 /* which was a FrameAddr) */
991 if (lea_ent != NULL) {
992 set_ia32_frame_ent(res, lea_ent);
993 set_ia32_use_frame(res);
995 set_ia32_frame_ent(res, get_ia32_frame_ent(irn));
996 if(is_ia32_use_frame(irn))
997 set_ia32_use_frame(res);
1001 set_ia32_am_scale(res, scale);
1003 am_flav = ia32_am_N;
1004 /* determine new am flavour */
1005 if (offs || offs_cnst || offs_lea || have_am_sc) {
1008 if (! be_is_NoReg(cg, base)) {
1011 if (! be_is_NoReg(cg, index)) {
1017 set_ia32_am_flavour(res, am_flav);
1019 set_ia32_op_type(res, ia32_AddrModeS);
1021 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
1023 DBG((dbg, LEVEL_1, "\tLEA [%+F + %+F * %d + %d]\n", base, index, scale, get_ia32_am_offs_int(res)));
1025 assert(irn && "Couldn't find result proj");
1027 /* get the result Proj of the Add/Sub */
1028 try_add_to_sched(irn, res);
1030 /* exchange the old op with the new LEA */
1031 try_remove_from_sched(irn);
1034 /* we will exchange it, report here before the Proj is created */
1035 if (shift && lea && lea_o) {
1036 try_remove_from_sched(shift);
1037 try_remove_from_sched(lea);
1038 try_remove_from_sched(lea_o);
1039 DBG_OPT_LEA4(irn, lea_o, lea, shift, res);
1040 } else if (shift && lea) {
1041 try_remove_from_sched(shift);
1042 try_remove_from_sched(lea);
1043 DBG_OPT_LEA3(irn, lea, shift, res);
1044 } else if (shift && lea_o) {
1045 try_remove_from_sched(shift);
1046 try_remove_from_sched(lea_o);
1047 DBG_OPT_LEA3(irn, lea_o, shift, res);
1048 } else if (lea && lea_o) {
1049 try_remove_from_sched(lea);
1050 try_remove_from_sched(lea_o);
1051 DBG_OPT_LEA3(irn, lea_o, lea, res);
1053 try_remove_from_sched(shift);
1054 DBG_OPT_LEA2(irn, shift, res);
1056 try_remove_from_sched(lea);
1057 DBG_OPT_LEA2(irn, lea, res);
1059 try_remove_from_sched(lea_o);
1060 DBG_OPT_LEA2(irn, lea_o, res);
1062 DBG_OPT_LEA1(irn, res);
1071 * Merges a Load/Store node with a LEA.
1072 * @param irn The Load/Store node
1073 * @param lea The LEA
1075 static void merge_loadstore_lea(ir_node *irn, ir_node *lea) {
1076 ir_entity *irn_ent = get_ia32_frame_ent(irn);
1077 ir_entity *lea_ent = get_ia32_frame_ent(lea);
1079 /* If the irn and the LEA both have a different frame entity set: do not merge */
1080 if (irn_ent != NULL && lea_ent != NULL && (irn_ent != lea_ent))
1082 else if (irn_ent == NULL && lea_ent != NULL) {
1083 set_ia32_frame_ent(irn, lea_ent);
1084 set_ia32_use_frame(irn);
1087 /* get the AM attributes from the LEA */
1088 add_ia32_am_offs_int(irn, get_ia32_am_offs_int(lea));
1089 set_ia32_am_scale(irn, get_ia32_am_scale(lea));
1090 set_ia32_am_flavour(irn, get_ia32_am_flavour(lea));
1092 set_ia32_am_sc(irn, get_ia32_am_sc(lea));
1093 if (is_ia32_am_sc_sign(lea))
1094 set_ia32_am_sc_sign(irn);
1096 set_ia32_op_type(irn, is_ia32_Ld(irn) ? ia32_AddrModeS : ia32_AddrModeD);
1098 /* set base and index */
1099 set_irn_n(irn, 0, get_irn_n(lea, 0));
1100 set_irn_n(irn, 1, get_irn_n(lea, 1));
1102 try_remove_from_sched(lea);
1104 /* clear remat flag */
1105 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1107 if (is_ia32_Ld(irn))
1108 DBG_OPT_LOAD_LEA(lea, irn);
1110 DBG_OPT_STORE_LEA(lea, irn);
1115 * Sets new_right index of irn to right and new_left index to left.
1116 * Also exchange left and right
1118 static void exchange_left_right(ir_node *irn, ir_node **left, ir_node **right, int new_left, int new_right) {
1121 set_irn_n(irn, new_right, *right);
1122 set_irn_n(irn, new_left, *left);
1128 /* this is only needed for Compares, but currently ALL nodes
1129 * have this attribute :-) */
1130 set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn)));
1134 * Performs address calculation optimization (create LEAs if possible)
1136 static void optimize_lea(ir_node *irn, void *env) {
1137 ia32_code_gen_t *cg = env;
1139 if (! is_ia32_irn(irn))
1142 /* Following cases can occur: */
1143 /* - Sub (l, imm) -> LEA [base - offset] */
1144 /* - Sub (l, r == LEA with ia32_am_O) -> LEA [base - offset] */
1145 /* - Add (l, imm) -> LEA [base + offset] */
1146 /* - Add (l, r == LEA with ia32_am_O) -> LEA [base + offset] */
1147 /* - Add (l == LEA with ia32_am_O, r) -> LEA [base + offset] */
1148 /* - Add (l, r) -> LEA [base + index * scale] */
1149 /* with scale > 1 iff l/r == shl (1,2,3) */
1150 if (is_ia32_Sub(irn) || is_ia32_Add(irn)) {
1153 if(!is_addr_candidate(irn))
1156 DBG((dbg, LEVEL_1, "\tfound address calculation candidate %+F ... ", irn));
1157 res = fold_addr(cg, irn);
1160 DB((dbg, LEVEL_1, "transformed into %+F\n", res));
1162 DB((dbg, LEVEL_1, "not transformed\n"));
1163 } else if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn)) {
1164 /* - Load -> LEA into Load } TODO: If the LEA is used by more than one Load/Store */
1165 /* - Store -> LEA into Store } it might be better to keep the LEA */
1166 ir_node *left = get_irn_n(irn, 0);
1168 if (is_ia32_Lea(left)) {
1169 const ir_edge_t *edge, *ne;
1172 /* merge all Loads/Stores connected to this LEA with the LEA */
1173 foreach_out_edge_safe(left, edge, ne) {
1174 src = get_edge_src_irn(edge);
1176 if (src && (get_edge_src_pos(edge) == 0) && (is_ia32_Ld(src) || is_ia32_St(src) || is_ia32_Store8Bit(src))) {
1177 DBG((dbg, LEVEL_1, "\nmerging %+F into %+F\n", left, irn));
1178 if (! is_ia32_got_lea(src))
1179 merge_loadstore_lea(src, left);
1180 set_ia32_got_lea(src);
1188 * Checks for address mode patterns and performs the
1189 * necessary transformations.
1190 * This function is called by a walker.
1192 static void optimize_am(ir_node *irn, void *env) {
1193 ia32_am_opt_env_t *am_opt_env = env;
1194 ia32_code_gen_t *cg = am_opt_env->cg;
1195 ir_graph *irg = get_irn_irg(irn);
1196 heights_t *h = am_opt_env->h;
1197 ir_node *block, *left, *right;
1198 ir_node *store, *load, *mem_proj;
1199 ir_node *addr_b, *addr_i;
1200 int need_exchange_on_fail = 0;
1201 ia32_am_type_t am_support;
1202 ia32_am_cand_t cand;
1203 ia32_am_cand_t orig_cand;
1205 int source_possible;
1207 if (!is_ia32_irn(irn) || is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn))
1209 if (is_ia32_Lea(irn))
1212 am_support = get_ia32_am_support(irn);
1213 block = get_nodes_block(irn);
1215 DBG((dbg, LEVEL_1, "checking for AM\n"));
1217 /* fold following patterns: */
1218 /* - op -> Load into AMop with am_Source */
1220 /* - op is am_Source capable AND */
1221 /* - the Load is only used by this op AND */
1222 /* - the Load is in the same block */
1223 /* - Store -> op -> Load into AMop with am_Dest */
1225 /* - op is am_Dest capable AND */
1226 /* - the Store uses the same address as the Load AND */
1227 /* - the Load is only used by this op AND */
1228 /* - the Load and Store are in the same block AND */
1229 /* - nobody else uses the result of the op */
1230 if (get_ia32_am_support(irn) == ia32_am_None)
1233 cand = is_am_candidate(cg, h, block, irn);
1234 if (cand == IA32_AM_CAND_NONE)
1238 DBG((dbg, LEVEL_1, "\tfound address mode candidate %+F ... ", irn));
1240 left = get_irn_n(irn, 2);
1241 if (get_irn_arity(irn) == 4) {
1242 /* it's an "unary" operation */
1244 assert(cand == IA32_AM_CAND_BOTH);
1246 right = get_irn_n(irn, 3);
1249 dest_possible = am_support & ia32_am_Dest ? 1 : 0;
1250 source_possible = am_support & ia32_am_Source ? 1 : 0;
1252 if (dest_possible) {
1257 /* we should only have 1 user which is a store */
1258 if (ia32_get_irn_n_edges(irn) == 1) {
1259 ir_node *succ = get_edge_src_irn(get_irn_out_edge_first(irn));
1261 if (is_ia32_xStore(succ) || is_ia32_Store(succ)) {
1263 addr_b = get_irn_n(store, 0);
1264 addr_i = get_irn_n(store, 1);
1268 if (store == NULL) {
1273 if (dest_possible) {
1274 /* normalize nodes, we need the interesting load on the left side */
1275 if (cand & IA32_AM_CAND_RIGHT) {
1276 load = get_Proj_pred(right);
1277 if (load_store_addr_is_equal(load, store, addr_b, addr_i)) {
1278 exchange_left_right(irn, &left, &right, 3, 2);
1279 need_exchange_on_fail ^= 1;
1280 if (cand == IA32_AM_CAND_RIGHT)
1281 cand = IA32_AM_CAND_LEFT;
1286 if (dest_possible) {
1287 if(cand & IA32_AM_CAND_LEFT && is_Proj(left)) {
1288 load = get_Proj_pred(left);
1290 #ifndef AGGRESSIVE_AM
1291 /* we have to be the only user of the load */
1292 if (get_irn_n_edges(left) > 1) {
1301 if (dest_possible) {
1302 /* the store has to use the loads memory or the same memory
1304 ir_node *loadmem = get_irn_n(load, 2);
1305 ir_node *storemem = get_irn_n(store, 3);
1306 assert(get_irn_mode(loadmem) == mode_M);
1307 assert(get_irn_mode(storemem) == mode_M);
1308 if(storemem != loadmem || !is_Proj(storemem)
1309 || get_Proj_pred(storemem) != load) {
1314 if (dest_possible) {
1315 /* Compare Load and Store address */
1316 if (!load_store_addr_is_equal(load, store, addr_b, addr_i))
1320 if (dest_possible) {
1321 /* all conditions fullfilled, do the transformation */
1322 assert(cand & IA32_AM_CAND_LEFT);
1324 /* set new base, index and attributes */
1325 set_irn_n(irn, 0, addr_b);
1326 set_irn_n(irn, 1, addr_i);
1327 add_ia32_am_offs_int(irn, get_ia32_am_offs_int(load));
1328 set_ia32_am_scale(irn, get_ia32_am_scale(load));
1329 set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
1330 set_ia32_op_type(irn, ia32_AddrModeD);
1331 set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
1332 set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
1334 set_ia32_am_sc(irn, get_ia32_am_sc(load));
1335 if (is_ia32_am_sc_sign(load))
1336 set_ia32_am_sc_sign(irn);
1338 /* connect to Load memory and disconnect Load */
1339 if (get_irn_arity(irn) == 5) {
1341 set_irn_n(irn, 4, get_irn_n(load, 2));
1342 set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
1345 set_irn_n(irn, 3, get_irn_n(load, 2));
1346 set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
1349 set_irn_mode(irn, mode_M);
1351 /* connect the memory Proj of the Store to the op */
1352 mem_proj = ia32_get_proj_for_mode(store, mode_M);
1353 edges_reroute(mem_proj, irn, irg);
1355 /* clear remat flag */
1356 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1358 try_remove_from_sched(load);
1359 try_remove_from_sched(store);
1360 DBG_OPT_AM_D(load, store, irn);
1362 DB((dbg, LEVEL_1, "merged with %+F and %+F into dest AM\n", load, store));
1363 need_exchange_on_fail = 0;
1364 source_possible = 0;
1367 if (source_possible) {
1368 /* normalize ops, we need the load on the right */
1369 if(cand == IA32_AM_CAND_LEFT) {
1370 if(node_is_ia32_comm(irn)) {
1371 exchange_left_right(irn, &left, &right, 3, 2);
1372 need_exchange_on_fail ^= 1;
1373 cand = IA32_AM_CAND_RIGHT;
1375 source_possible = 0;
1380 if (source_possible) {
1381 /* all conditions fullfilled, do transform */
1382 assert(cand & IA32_AM_CAND_RIGHT);
1383 load = get_Proj_pred(right);
1385 if(get_irn_n_edges(load) > 1) {
1386 source_possible = 0;
1390 if (source_possible) {
1391 addr_b = get_irn_n(load, 0);
1392 addr_i = get_irn_n(load, 1);
1394 /* set new base, index and attributes */
1395 set_irn_n(irn, 0, addr_b);
1396 set_irn_n(irn, 1, addr_i);
1397 add_ia32_am_offs_int(irn, get_ia32_am_offs_int(load));
1398 set_ia32_am_scale(irn, get_ia32_am_scale(load));
1399 set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
1400 set_ia32_op_type(irn, ia32_AddrModeS);
1401 set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
1402 set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
1404 set_ia32_am_sc(irn, get_ia32_am_sc(load));
1405 if (is_ia32_am_sc_sign(load))
1406 set_ia32_am_sc_sign(irn);
1408 /* clear remat flag */
1409 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1411 if (is_ia32_use_frame(load)) {
1412 if(get_ia32_frame_ent(load) == NULL) {
1413 set_ia32_need_stackent(irn);
1415 set_ia32_use_frame(irn);
1418 /* connect to Load memory and disconnect Load */
1419 if (get_irn_arity(irn) == 5) {
1421 set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3));
1422 set_irn_n(irn, 4, get_irn_n(load, 2));
1424 assert(get_irn_arity(irn) == 4);
1426 set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
1427 set_irn_n(irn, 3, get_irn_n(load, 2));
1430 DBG_OPT_AM_S(load, irn);
1432 /* If Load has a memory Proj, connect it to the op */
1433 mem_proj = ia32_get_proj_for_mode(load, mode_M);
1434 if (mem_proj != NULL) {
1436 ir_mode *mode = get_irn_mode(irn);
1438 res_proj = new_rd_Proj(get_irn_dbg_info(irn), irg,
1439 get_nodes_block(irn), new_Unknown(mode_T),
1441 set_irn_mode(irn, mode_T);
1442 edges_reroute(irn, res_proj, irg);
1443 set_Proj_pred(res_proj, irn);
1445 set_Proj_pred(mem_proj, irn);
1446 set_Proj_proj(mem_proj, 1);
1448 if(sched_is_scheduled(irn)) {
1449 sched_add_after(irn, res_proj);
1450 sched_add_after(irn, mem_proj);
1454 if(get_irn_n_edges(load) == 0) {
1455 try_remove_from_sched(load);
1457 need_exchange_on_fail = 0;
1459 DB((dbg, LEVEL_1, "merged with %+F into source AM\n", load));
1462 /* was exchanged but optimize failed: exchange back */
1463 if (need_exchange_on_fail) {
1464 exchange_left_right(irn, &left, &right, 3, 2);
1469 * Performs address mode optimization.
1471 void ia32_optimize_addressmode(ia32_code_gen_t *cg) {
1472 /* if we are supposed to do AM or LEA optimization: recalculate edges */
1473 if (cg->opt & (IA32_OPT_DOAM | IA32_OPT_LEA)) {
1475 edges_deactivate(cg->irg);
1476 edges_activate(cg->irg);
1480 /* no optimizations at all */
1484 /* beware: we cannot optimize LEA and AM in one run because */
1485 /* LEA optimization adds new nodes to the irg which */
1486 /* invalidates the phase data */
1488 if (cg->opt & IA32_OPT_LEA) {
1489 irg_walk_blkwise_graph(cg->irg, NULL, optimize_lea, cg);
1493 be_dump(cg->irg, "-lea", dump_ir_block_graph_sched);
1495 /* hack for now, so these don't get created during optimize, because then
1496 * they will be unknown to the heights module
1498 ia32_new_NoReg_gp(cg);
1499 ia32_new_NoReg_fp(cg);
1500 ia32_new_NoReg_vfp(cg);
1502 if (cg->opt & IA32_OPT_DOAM) {
1503 /* we need height information for am optimization */
1504 heights_t *h = heights_new(cg->irg);
1505 ia32_am_opt_env_t env;
1510 irg_walk_blkwise_graph(cg->irg, NULL, optimize_am, &env);
1516 void ia32_init_optimize(void)
1518 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize");