2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Implements several optimizations for IA32.
23 * @author Christian Wuerdig
33 #include "firm_types.h"
43 #include "../benode_t.h"
44 #include "../besched_t.h"
46 #include "ia32_new_nodes.h"
47 #include "bearch_ia32_t.h"
48 #include "gen_ia32_regalloc_if_t.h"
49 #include "ia32_transform.h"
50 #include "ia32_dbg_stat.h"
51 #include "ia32_util.h"
53 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
58 IA32_AM_CAND_NONE = 0, /**< no addressmode possible with irn inputs */
59 IA32_AM_CAND_LEFT = 1, /**< addressmode possible with left input */
60 IA32_AM_CAND_RIGHT = 2, /**< addressmode possible with right input */
61 IA32_AM_CAND_BOTH = 3 /**< addressmode possible with both inputs */
64 typedef int is_op_func_t(const ir_node *n);
65 typedef ir_node *load_func_t(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, ir_node *mem);
68 * checks if a node represents the NOREG value
70 static INLINE int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) {
71 return irn == cg->noreg_gp || irn == cg->noreg_xmm || irn == cg->noreg_vfp;
74 void ia32_pre_transform_phase(ia32_code_gen_t *cg) {
76 We need to transform the consts twice:
77 - the psi condition tree transformer needs existing constants to be ia32 constants
78 - the psi condition tree transformer inserts new firm constants which need to be transformed
80 irg_walk_graph(cg->irg, NULL, ia32_transform_psi_cond_tree, cg);
83 /********************************************************************************************************
84 * _____ _ _ ____ _ _ _ _ _
85 * | __ \ | | | | / __ \ | | (_) (_) | | (_)
86 * | |__) |__ ___ _ __ | |__ ___ | | ___ | | | |_ __ | |_ _ _ __ ___ _ ______ _| |_ _ ___ _ __
87 * | ___/ _ \/ _ \ '_ \| '_ \ / _ \| |/ _ \ | | | | '_ \| __| | '_ ` _ \| |_ / _` | __| |/ _ \| '_ \
88 * | | | __/ __/ |_) | | | | (_) | | __/ | |__| | |_) | |_| | | | | | | |/ / (_| | |_| | (_) | | | |
89 * |_| \___|\___| .__/|_| |_|\___/|_|\___| \____/| .__/ \__|_|_| |_| |_|_/___\__,_|\__|_|\___/|_| |_|
92 ********************************************************************************************************/
95 * NOTE: THESE PEEPHOLE OPTIMIZATIONS MUST BE CALLED AFTER SCHEDULING AND REGISTER ALLOCATION.
98 static int ia32_const_equal(const ir_node *n1, const ir_node *n2) {
99 if(get_ia32_immop_type(n1) != get_ia32_immop_type(n2))
102 if(get_ia32_immop_type(n1) == ia32_ImmConst) {
103 return get_ia32_Immop_tarval(n1) == get_ia32_Immop_tarval(n2);
104 } else if(get_ia32_immop_type(n1) == ia32_ImmSymConst) {
105 return get_ia32_Immop_symconst(n1) == get_ia32_Immop_symconst(n2);
108 assert(get_ia32_immop_type(n1) == ia32_ImmNone);
113 * Checks for potential CJmp/CJmpAM optimization candidates.
115 static ir_node *ia32_determine_cjmp_cand(ir_node *irn, is_op_func_t *is_op_func) {
116 ir_node *cand = NULL;
117 ir_node *prev = sched_prev(irn);
119 if (is_Block(prev)) {
120 if (get_Block_n_cfgpreds(prev) == 1)
121 prev = get_Block_cfgpred(prev, 0);
126 /* The predecessor must be a ProjX. */
127 if (prev && is_Proj(prev) && get_irn_mode(prev) == mode_X) {
128 prev = get_Proj_pred(prev);
130 if (is_op_func(prev))
137 static int is_TestJmp_cand(const ir_node *irn) {
138 return is_ia32_TestJmp(irn) || is_ia32_And(irn);
142 * Checks if two consecutive arguments of cand matches
143 * the two arguments of irn (TestJmp).
145 static int is_TestJmp_replacement(ir_node *cand, ir_node *irn) {
146 ir_node *in1 = get_irn_n(irn, 0);
147 ir_node *in2 = get_irn_n(irn, 1);
148 int i, n = get_irn_arity(cand);
151 for (i = 0; i < n - 1; i++) {
152 if (get_irn_n(cand, i) == in1 &&
153 get_irn_n(cand, i + 1) == in2)
163 return ia32_const_equal(cand, irn);
167 * Tries to replace a TestJmp by a CJmp or CJmpAM (in case of And)
169 static void ia32_optimize_TestJmp(ir_node *irn, ia32_code_gen_t *cg) {
170 ir_node *cand = ia32_determine_cjmp_cand(irn, is_TestJmp_cand);
173 /* we found a possible candidate */
174 replace = cand ? is_TestJmp_replacement(cand, irn) : 0;
177 DBG((dbg, LEVEL_1, "replacing %+F by ", irn));
179 if (is_ia32_And(cand))
180 set_irn_op(irn, op_ia32_CJmpAM);
182 set_irn_op(irn, op_ia32_CJmp);
184 DB((dbg, LEVEL_1, "%+F\n", irn));
188 static int is_CondJmp_cand(const ir_node *irn) {
189 return is_ia32_CondJmp(irn) || is_ia32_Sub(irn);
193 * Checks if the arguments of cand are the same of irn.
195 static int is_CondJmp_replacement(ir_node *cand, ir_node *irn) {
198 arity = get_irn_arity(cand);
199 for (i = 0; i < arity; i++) {
200 if (get_irn_n(cand, i) != get_irn_n(irn, i)) {
205 return ia32_const_equal(cand, irn);
209 * Tries to replace a CondJmp by a CJmpAM
211 static void ia32_optimize_CondJmp(ir_node *irn, ia32_code_gen_t *cg) {
212 ir_node *cand = ia32_determine_cjmp_cand(irn, is_CondJmp_cand);
215 /* we found a possible candidate */
216 replace = cand ? is_CondJmp_replacement(cand, irn) : 0;
219 DBG((dbg, LEVEL_1, "replacing %+F by ", irn));
222 set_irn_op(irn, op_ia32_CJmpAM);
224 DB((dbg, LEVEL_1, "%+F\n", irn));
228 // only optimize up to 48 stores behind IncSPs
229 #define MAXPUSH_OPTIMIZE 48
232 * Tries to create pushs from IncSP,Store combinations
234 static void ia32_create_Pushs(ir_node *irn, ia32_code_gen_t *cg) {
238 ir_node *stores[MAXPUSH_OPTIMIZE];
239 ir_node *block = get_nodes_block(irn);
240 ir_graph *irg = cg->irg;
242 ir_mode *spmode = get_irn_mode(irn);
244 memset(stores, 0, sizeof(stores));
246 assert(be_is_IncSP(irn));
248 offset = be_get_IncSP_offset(irn);
253 * We first walk the schedule after the IncSP node as long as we find
254 * suitable stores that could be transformed to a push.
255 * We save them into the stores array which is sorted by the frame offset/4
256 * attached to the node
258 for(node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
263 // it has to be a store
264 if(!is_ia32_Store(node))
267 // it has to use our sp value
268 if(get_irn_n(node, 0) != irn)
270 // store has to be attached to NoMem
271 mem = get_irn_n(node, 3);
276 if( (get_ia32_am_flavour(node) & ia32_am_IS) != 0)
279 offset = get_ia32_am_offs_int(node);
281 storeslot = offset / 4;
282 if(storeslot >= MAXPUSH_OPTIMIZE)
285 // storing into the same slot twice is bad (and shouldn't happen...)
286 if(stores[storeslot] != NULL)
289 // storing at half-slots is bad
293 stores[storeslot] = node;
296 curr_sp = get_irn_n(irn, 0);
298 // walk the stores in inverse order and create pushs for them
299 i = (offset / 4) - 1;
300 if(i >= MAXPUSH_OPTIMIZE) {
301 i = MAXPUSH_OPTIMIZE - 1;
304 for( ; i >= 0; --i) {
305 const arch_register_t *spreg;
307 ir_node *val, *mem, *mem_proj;
308 ir_node *store = stores[i];
309 ir_node *noreg = ia32_new_NoReg_gp(cg);
311 if(store == NULL || is_Bad(store))
314 val = get_irn_n(store, 2);
315 mem = get_irn_n(store, 3);
316 spreg = arch_get_irn_register(cg->arch_env, curr_sp);
319 push = new_rd_ia32_Push(NULL, irg, block, noreg, noreg, val, curr_sp, mem);
321 set_ia32_am_support(push, ia32_am_Source);
322 copy_ia32_Immop_attr(push, store);
324 sched_add_before(irn, push);
326 // create stackpointer proj
327 curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack);
328 arch_set_irn_register(cg->arch_env, curr_sp, spreg);
329 sched_add_before(irn, curr_sp);
331 // create memory proj
332 mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
333 sched_add_before(irn, mem_proj);
335 // use the memproj now
336 exchange(store, mem_proj);
338 // we can remove the store now
344 be_set_IncSP_offset(irn, offset);
346 // can we remove the IncSP now?
348 const ir_edge_t *edge, *next;
350 foreach_out_edge_safe(irn, edge, next) {
351 ir_node *arg = get_edge_src_irn(edge);
352 int pos = get_edge_src_pos(edge);
354 set_irn_n(arg, pos, curr_sp);
357 set_irn_n(irn, 0, new_Bad());
360 set_irn_n(irn, 0, curr_sp);
366 * Tries to optimize two following IncSP.
368 static void ia32_optimize_IncSP(ir_node *irn, ia32_code_gen_t *cg) {
369 ir_node *prev = be_get_IncSP_pred(irn);
370 int real_uses = get_irn_n_edges(prev);
372 if (be_is_IncSP(prev) && real_uses == 1) {
373 /* first IncSP has only one IncSP user, kill the first one */
374 int prev_offs = be_get_IncSP_offset(prev);
375 int curr_offs = be_get_IncSP_offset(irn);
377 be_set_IncSP_offset(prev, prev_offs + curr_offs);
379 /* Omit the optimized IncSP */
380 be_set_IncSP_pred(irn, be_get_IncSP_pred(prev));
382 set_irn_n(prev, 0, new_Bad());
389 * Performs Peephole Optimizations.
391 static void ia32_peephole_optimize_node(ir_node *irn, void *env) {
392 ia32_code_gen_t *cg = env;
394 /* AMD CPUs want explicit compare before conditional jump */
395 if (! ARCH_AMD(cg->opt_arch)) {
396 if (is_ia32_TestJmp(irn))
397 ia32_optimize_TestJmp(irn, cg);
398 else if (is_ia32_CondJmp(irn))
399 ia32_optimize_CondJmp(irn, cg);
402 if (be_is_IncSP(irn)) {
403 // optimize_IncSP doesn't respect dependency edges yet...
404 //ia32_optimize_IncSP(irn, cg);
406 if (cg->opt & IA32_OPT_PUSHARGS)
407 ia32_create_Pushs(irn, cg);
411 void ia32_peephole_optimization(ir_graph *irg, ia32_code_gen_t *cg) {
412 irg_walk_graph(irg, ia32_peephole_optimize_node, NULL, cg);
415 /******************************************************************
417 * /\ | | | | | \/ | | |
418 * / \ __| | __| |_ __ ___ ___ ___| \ / | ___ __| | ___
419 * / /\ \ / _` |/ _` | '__/ _ \/ __/ __| |\/| |/ _ \ / _` |/ _ \
420 * / ____ \ (_| | (_| | | | __/\__ \__ \ | | | (_) | (_| | __/
421 * /_/ \_\__,_|\__,_|_| \___||___/___/_| |_|\___/ \__,_|\___|
423 ******************************************************************/
430 static int node_is_ia32_comm(const ir_node *irn) {
431 return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0;
434 static int ia32_get_irn_n_edges(const ir_node *irn) {
435 const ir_edge_t *edge;
438 foreach_out_edge(irn, edge) {
446 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor.
448 * @param pred The node to be checked
449 * @param is_op_func The check-function
450 * @return 1 if conditions are fulfilled, 0 otherwise
452 static int pred_is_specific_node(const ir_node *pred, is_op_func_t *is_op_func) {
453 return is_op_func(pred);
457 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor
458 * and if the predecessor is in block bl.
460 * @param bl The block
461 * @param pred The node to be checked
462 * @param is_op_func The check-function
463 * @return 1 if conditions are fulfilled, 0 otherwise
465 static int pred_is_specific_nodeblock(const ir_node *bl, const ir_node *pred,
466 int (*is_op_func)(const ir_node *n))
469 pred = get_Proj_pred(pred);
470 if ((bl == get_nodes_block(pred)) && is_op_func(pred)) {
479 * Checks if irn is a candidate for address calculation. We avoid transforming
480 * adds to leas if they have a load as pred, because then we can use AM mode
483 * - none of the operand must be a Load within the same block OR
484 * - all Loads must have more than one user OR
486 * @param block The block the Loads must/mustnot be in
487 * @param irn The irn to check
488 * return 1 if irn is a candidate, 0 otherwise
490 static int is_addr_candidate(const ir_node *irn) {
491 #ifndef AGGRESSIVE_AM
492 const ir_node *block = get_nodes_block(irn);
493 ir_node *left, *right;
496 left = get_irn_n(irn, 2);
497 right = get_irn_n(irn, 3);
499 if (pred_is_specific_nodeblock(block, left, is_ia32_Ld)) {
500 n = ia32_get_irn_n_edges(left);
501 /* load with only one user: don't create LEA */
506 if (pred_is_specific_nodeblock(block, right, is_ia32_Ld)) {
507 n = ia32_get_irn_n_edges(right);
517 * Checks if irn is a candidate for address mode.
520 * - at least one operand has to be a Load within the same block AND
521 * - the load must not have other users than the irn AND
522 * - the irn must not have a frame entity set
524 * @param cg The ia32 code generator
525 * @param h The height information of the irg
526 * @param block The block the Loads must/mustnot be in
527 * @param irn The irn to check
528 * return 0 if irn is no candidate, 1 if left load can be used, 2 if right one, 3 for both
530 static ia32_am_cand_t is_am_candidate(ia32_code_gen_t *cg, heights_t *h, const ir_node *block, ir_node *irn) {
531 ir_node *in, *load, *other, *left, *right;
532 int is_cand = 0, cand;
535 if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn) || is_ia32_vfild(irn) || is_ia32_vfist(irn) ||
536 is_ia32_GetST0(irn) || is_ia32_SetST0(irn) || is_ia32_xStoreSimple(irn))
539 if(get_ia32_frame_ent(irn) != NULL)
540 return IA32_AM_CAND_NONE;
542 left = get_irn_n(irn, 2);
543 arity = get_irn_arity(irn);
544 assert(arity == 5 || arity == 4);
547 right = get_irn_n(irn, 3);
555 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
556 #ifndef AGGRESSIVE_AM
558 n = ia32_get_irn_n_edges(in);
559 is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */
564 load = get_Proj_pred(in);
567 /* 8bit Loads are not supported (for binary ops),
568 * they cannot be used with every register */
569 if (get_irn_arity(irn) != 4 && get_mode_size_bits(get_ia32_ls_mode(load)) < 16) {
570 assert(get_irn_arity(irn) == 5);
574 /* If there is a data dependency of other irn from load: cannot use AM */
575 if (is_cand && get_nodes_block(other) == block) {
576 other = skip_Proj(other);
577 is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand;
578 /* this could happen in loops */
579 is_cand = heights_reachable_in_block(h, load, irn) ? 0 : is_cand;
583 cand = is_cand ? IA32_AM_CAND_LEFT : IA32_AM_CAND_NONE;
587 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
588 #ifndef AGGRESSIVE_AM
590 n = ia32_get_irn_n_edges(in);
591 is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */
596 load = get_Proj_pred(in);
599 /* 8bit Loads are not supported, they cannot be used with every register */
600 if (get_mode_size_bits(get_ia32_ls_mode(load)) < 16)
603 /* If there is a data dependency of other irn from load: cannot use load */
604 if (is_cand && get_nodes_block(other) == block) {
605 other = skip_Proj(other);
606 is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand;
607 /* this could happen in loops */
608 is_cand = heights_reachable_in_block(h, load, irn) ? 0 : is_cand;
612 cand = is_cand ? (cand | IA32_AM_CAND_RIGHT) : cand;
614 /* if the irn has a frame entity: we do not use address mode */
619 * Compares the base and index addr and the load/store entities
620 * and returns 1 if they are equal.
622 static int load_store_addr_is_equal(const ir_node *load, const ir_node *store,
623 const ir_node *addr_b, const ir_node *addr_i)
625 if(get_irn_n(load, 0) != addr_b)
627 if(get_irn_n(load, 1) != addr_i)
630 if(get_ia32_frame_ent(load) != get_ia32_frame_ent(store))
633 if(get_ia32_am_sc(load) != get_ia32_am_sc(store))
635 if(is_ia32_am_sc_sign(load) != is_ia32_am_sc_sign(store))
637 if(get_ia32_am_offs_int(load) != get_ia32_am_offs_int(store))
639 if(get_ia32_ls_mode(load) != get_ia32_ls_mode(store))
645 typedef enum _ia32_take_lea_attr {
646 IA32_LEA_ATTR_NONE = 0,
647 IA32_LEA_ATTR_BASE = (1 << 0),
648 IA32_LEA_ATTR_INDEX = (1 << 1),
649 IA32_LEA_ATTR_OFFS = (1 << 2),
650 IA32_LEA_ATTR_SCALE = (1 << 3),
651 IA32_LEA_ATTR_AMSC = (1 << 4),
652 IA32_LEA_ATTR_FENT = (1 << 5)
653 } ia32_take_lea_attr;
656 * Decides if we have to keep the LEA operand or if we can assimilate it.
658 static int do_new_lea(ir_node *irn, ir_node *base, ir_node *index, ir_node *lea,
659 int have_am_sc, ia32_code_gen_t *cg)
661 ir_entity *irn_ent = get_ia32_frame_ent(irn);
662 ir_entity *lea_ent = get_ia32_frame_ent(lea);
664 int is_noreg_base = be_is_NoReg(cg, base);
665 int is_noreg_index = be_is_NoReg(cg, index);
666 ia32_am_flavour_t am_flav = get_ia32_am_flavour(lea);
668 /* If the Add and the LEA both have a different frame entity set: keep */
669 if (irn_ent && lea_ent && (irn_ent != lea_ent))
670 return IA32_LEA_ATTR_NONE;
671 else if (! irn_ent && lea_ent)
672 ret_val |= IA32_LEA_ATTR_FENT;
674 /* If the Add and the LEA both have already an address mode symconst: keep */
675 if (have_am_sc && get_ia32_am_sc(lea))
676 return IA32_LEA_ATTR_NONE;
677 else if (get_ia32_am_sc(lea))
678 ret_val |= IA32_LEA_ATTR_AMSC;
680 /* Check the different base-index combinations */
682 if (! is_noreg_base && ! is_noreg_index) {
683 /* Assimilate if base is the lea and the LEA is just a Base + Offset calculation */
684 if ((base == lea) && ! (am_flav & ia32_I ? 1 : 0)) {
685 if (am_flav & ia32_O)
686 ret_val |= IA32_LEA_ATTR_OFFS;
688 ret_val |= IA32_LEA_ATTR_BASE;
691 return IA32_LEA_ATTR_NONE;
693 else if (! is_noreg_base && is_noreg_index) {
694 /* Base is set but index not */
696 /* Base points to LEA: assimilate everything */
697 if (am_flav & ia32_O)
698 ret_val |= IA32_LEA_ATTR_OFFS;
699 if (am_flav & ia32_S)
700 ret_val |= IA32_LEA_ATTR_SCALE;
701 if (am_flav & ia32_I)
702 ret_val |= IA32_LEA_ATTR_INDEX;
704 ret_val |= IA32_LEA_ATTR_BASE;
706 else if (am_flav & ia32_B ? 0 : 1) {
707 /* Base is not the LEA but the LEA is an index only calculation: assimilate */
708 if (am_flav & ia32_O)
709 ret_val |= IA32_LEA_ATTR_OFFS;
710 if (am_flav & ia32_S)
711 ret_val |= IA32_LEA_ATTR_SCALE;
713 ret_val |= IA32_LEA_ATTR_INDEX;
716 return IA32_LEA_ATTR_NONE;
718 else if (is_noreg_base && ! is_noreg_index) {
719 /* Index is set but not base */
721 /* Index points to LEA: assimilate everything */
722 if (am_flav & ia32_O)
723 ret_val |= IA32_LEA_ATTR_OFFS;
724 if (am_flav & ia32_S)
725 ret_val |= IA32_LEA_ATTR_SCALE;
726 if (am_flav & ia32_B)
727 ret_val |= IA32_LEA_ATTR_BASE;
729 ret_val |= IA32_LEA_ATTR_INDEX;
731 else if (am_flav & ia32_I ? 0 : 1) {
732 /* Index is not the LEA but the LEA is a base only calculation: assimilate */
733 if (am_flav & ia32_O)
734 ret_val |= IA32_LEA_ATTR_OFFS;
735 if (am_flav & ia32_S)
736 ret_val |= IA32_LEA_ATTR_SCALE;
738 ret_val |= IA32_LEA_ATTR_BASE;
741 return IA32_LEA_ATTR_NONE;
744 assert(0 && "There must have been set base or index");
751 * Adds res before irn into schedule if irn was scheduled.
752 * @param irn The schedule point
753 * @param res The node to be scheduled
755 static INLINE void try_add_to_sched(ir_node *irn, ir_node *res) {
756 if (sched_is_scheduled(irn))
757 sched_add_before(irn, res);
761 * Removes node from schedule if it is not used anymore. If irn is a mode_T node
762 * all it's Projs are removed as well.
763 * @param irn The irn to be removed from schedule
765 static INLINE void try_remove_from_sched(ir_node *node) {
768 if(get_irn_mode(node) == mode_T) {
769 const ir_edge_t *edge, *next;
770 foreach_out_edge_safe(node, edge, next) {
771 ir_node *proj = get_edge_src_irn(edge);
772 try_remove_from_sched(proj);
776 if(get_irn_n_edges(node) != 0)
779 if (sched_is_scheduled(node)) {
783 arity = get_irn_arity(node);
784 for(i = 0; i < arity; ++i) {
785 set_irn_n(node, i, new_Bad());
790 * Folds Add or Sub to LEA if possible
792 static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn) {
793 ir_graph *irg = get_irn_irg(irn);
794 dbg_info *dbg_info = get_irn_dbg_info(irn);
795 ir_node *block = get_nodes_block(irn);
797 ir_node *shift = NULL;
798 ir_node *lea_o = NULL;
808 ir_entity *am_sc = NULL;
809 ir_entity *lea_ent = NULL;
810 ir_node *noreg = ia32_new_NoReg_gp(cg);
811 ir_node *left, *right, *temp;
812 ir_node *base, *index;
813 int consumed_left_shift;
814 ia32_am_flavour_t am_flav;
816 if (is_ia32_Add(irn))
819 left = get_irn_n(irn, 2);
820 right = get_irn_n(irn, 3);
822 /* "normalize" arguments in case of add with two operands */
823 if (isadd && ! be_is_NoReg(cg, right)) {
824 /* put LEA == ia32_am_O as right operand */
825 if (is_ia32_Lea(left) && get_ia32_am_flavour(left) == ia32_am_O) {
826 set_irn_n(irn, 2, right);
827 set_irn_n(irn, 3, left);
833 /* put LEA != ia32_am_O as left operand */
834 if (is_ia32_Lea(right) && get_ia32_am_flavour(right) != ia32_am_O) {
835 set_irn_n(irn, 2, right);
836 set_irn_n(irn, 3, left);
842 /* put SHL as left operand iff left is NOT a LEA */
843 if (! is_ia32_Lea(left) && pred_is_specific_node(right, is_ia32_Shl)) {
844 set_irn_n(irn, 2, right);
845 set_irn_n(irn, 3, left);
858 /* check for operation with immediate */
859 if (is_ia32_ImmConst(irn)) {
860 tarval *tv = get_ia32_Immop_tarval(irn);
862 DBG((dbg, LEVEL_1, "\tfound op with imm const"));
864 offs_cnst = get_tarval_long(tv);
867 else if (isadd && is_ia32_ImmSymConst(irn)) {
868 DBG((dbg, LEVEL_1, "\tfound op with imm symconst"));
872 am_sc = get_ia32_Immop_symconst(irn);
873 am_sc_sign = is_ia32_am_sc_sign(irn);
876 /* determine the operand which needs to be checked */
877 temp = be_is_NoReg(cg, right) ? left : right;
879 /* check if right operand is AMConst (LEA with ia32_am_O) */
880 /* but we can only eat it up if there is no other symconst */
881 /* because the linker won't accept two symconsts */
882 if (! have_am_sc && is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
883 DBG((dbg, LEVEL_1, "\tgot op with LEA am_O"));
885 offs_lea = get_ia32_am_offs_int(temp);
886 am_sc = get_ia32_am_sc(temp);
887 am_sc_sign = is_ia32_am_sc_sign(temp);
894 else if (temp == right)
899 /* default for add -> make right operand to index */
902 consumed_left_shift = -1;
904 DBG((dbg, LEVEL_1, "\tgot LEA candidate with index %+F\n", index));
906 /* determine the operand which needs to be checked */
908 if (is_ia32_Lea(left)) {
910 consumed_left_shift = 0;
913 /* check for SHL 1,2,3 */
914 if (pred_is_specific_node(temp, is_ia32_Shl)) {
916 if (is_ia32_ImmConst(temp)) {
917 long shiftval = get_tarval_long(get_ia32_Immop_tarval(temp));
920 index = get_irn_n(temp, 2);
921 consumed_left_shift = consumed_left_shift < 0 ? 1 : 0;
925 DBG((dbg, LEVEL_1, "\tgot scaled index %+F\n", index));
931 if (! be_is_NoReg(cg, index)) {
932 /* if we have index, but left == right -> no base */
936 else if (consumed_left_shift == 1) {
937 /* -> base is right operand */
938 base = (right == lea_o) ? noreg : right;
943 /* Try to assimilate a LEA as left operand */
944 if (is_ia32_Lea(left) && (get_ia32_am_flavour(left) != ia32_am_O)) {
945 /* check if we can assimilate the LEA */
946 int take_attr = do_new_lea(irn, base, index, left, have_am_sc, cg);
948 if (take_attr == IA32_LEA_ATTR_NONE) {
949 DBG((dbg, LEVEL_1, "\tleave old LEA, creating new one\n"));
952 DBG((dbg, LEVEL_1, "\tgot LEA as left operand ... assimilating\n"));
953 lea = left; /* for statistics */
955 if (take_attr & IA32_LEA_ATTR_OFFS)
956 offs = get_ia32_am_offs_int(left);
958 if (take_attr & IA32_LEA_ATTR_AMSC) {
959 am_sc = get_ia32_am_sc(left);
961 am_sc_sign = is_ia32_am_sc_sign(left);
964 if (take_attr & IA32_LEA_ATTR_SCALE)
965 scale = get_ia32_am_scale(left);
967 if (take_attr & IA32_LEA_ATTR_BASE)
968 base = get_irn_n(left, 0);
970 if (take_attr & IA32_LEA_ATTR_INDEX)
971 index = get_irn_n(left, 1);
973 if (take_attr & IA32_LEA_ATTR_FENT)
974 lea_ent = get_ia32_frame_ent(left);
978 /* ok, we can create a new LEA */
980 res = new_rd_ia32_Lea(dbg_info, irg, block, base, index);
981 /* we don't want stuff before the barrier... */
982 if(be_is_NoReg(cg, base) && be_is_NoReg(cg, index)) {
983 add_irn_dep(res, get_irg_frame(irg));
986 /* add the old offset of a previous LEA */
987 add_ia32_am_offs_int(res, offs);
989 /* add the new offset */
991 add_ia32_am_offs_int(res, offs_cnst);
992 add_ia32_am_offs_int(res, offs_lea);
994 /* either lea_O-cnst, -cnst or -lea_O */
995 if (offs_cnst != 0) {
996 add_ia32_am_offs_int(res, offs_lea);
997 add_ia32_am_offs_int(res, -offs_cnst);
999 add_ia32_am_offs_int(res, offs_lea);
1003 /* set the address mode symconst */
1005 set_ia32_am_sc(res, am_sc);
1007 set_ia32_am_sc_sign(res);
1010 /* copy the frame entity (could be set in case of Add */
1011 /* which was a FrameAddr) */
1012 if (lea_ent != NULL) {
1013 set_ia32_frame_ent(res, lea_ent);
1014 set_ia32_use_frame(res);
1016 set_ia32_frame_ent(res, get_ia32_frame_ent(irn));
1017 if(is_ia32_use_frame(irn))
1018 set_ia32_use_frame(res);
1022 set_ia32_am_scale(res, scale);
1024 am_flav = ia32_am_N;
1025 /* determine new am flavour */
1026 if (offs || offs_cnst || offs_lea || have_am_sc) {
1029 if (! be_is_NoReg(cg, base)) {
1032 if (! be_is_NoReg(cg, index)) {
1038 set_ia32_am_flavour(res, am_flav);
1040 set_ia32_op_type(res, ia32_AddrModeS);
1042 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
1044 DBG((dbg, LEVEL_1, "\tLEA [%+F + %+F * %d + %d]\n", base, index, scale, get_ia32_am_offs_int(res)));
1046 assert(irn && "Couldn't find result proj");
1048 /* get the result Proj of the Add/Sub */
1049 try_add_to_sched(irn, res);
1051 /* exchange the old op with the new LEA */
1052 try_remove_from_sched(irn);
1055 /* we will exchange it, report here before the Proj is created */
1056 if (shift && lea && lea_o) {
1057 try_remove_from_sched(shift);
1058 try_remove_from_sched(lea);
1059 try_remove_from_sched(lea_o);
1060 DBG_OPT_LEA4(irn, lea_o, lea, shift, res);
1061 } else if (shift && lea) {
1062 try_remove_from_sched(shift);
1063 try_remove_from_sched(lea);
1064 DBG_OPT_LEA3(irn, lea, shift, res);
1065 } else if (shift && lea_o) {
1066 try_remove_from_sched(shift);
1067 try_remove_from_sched(lea_o);
1068 DBG_OPT_LEA3(irn, lea_o, shift, res);
1069 } else if (lea && lea_o) {
1070 try_remove_from_sched(lea);
1071 try_remove_from_sched(lea_o);
1072 DBG_OPT_LEA3(irn, lea_o, lea, res);
1074 try_remove_from_sched(shift);
1075 DBG_OPT_LEA2(irn, shift, res);
1077 try_remove_from_sched(lea);
1078 DBG_OPT_LEA2(irn, lea, res);
1080 try_remove_from_sched(lea_o);
1081 DBG_OPT_LEA2(irn, lea_o, res);
1083 DBG_OPT_LEA1(irn, res);
1092 * Merges a Load/Store node with a LEA.
1093 * @param irn The Load/Store node
1094 * @param lea The LEA
1096 static void merge_loadstore_lea(ir_node *irn, ir_node *lea) {
1097 ir_entity *irn_ent = get_ia32_frame_ent(irn);
1098 ir_entity *lea_ent = get_ia32_frame_ent(lea);
1100 /* If the irn and the LEA both have a different frame entity set: do not merge */
1101 if (irn_ent != NULL && lea_ent != NULL && (irn_ent != lea_ent))
1103 else if (irn_ent == NULL && lea_ent != NULL) {
1104 set_ia32_frame_ent(irn, lea_ent);
1105 set_ia32_use_frame(irn);
1108 /* get the AM attributes from the LEA */
1109 add_ia32_am_offs_int(irn, get_ia32_am_offs_int(lea));
1110 set_ia32_am_scale(irn, get_ia32_am_scale(lea));
1111 set_ia32_am_flavour(irn, get_ia32_am_flavour(lea));
1113 set_ia32_am_sc(irn, get_ia32_am_sc(lea));
1114 if (is_ia32_am_sc_sign(lea))
1115 set_ia32_am_sc_sign(irn);
1117 set_ia32_op_type(irn, is_ia32_Ld(irn) ? ia32_AddrModeS : ia32_AddrModeD);
1119 /* set base and index */
1120 set_irn_n(irn, 0, get_irn_n(lea, 0));
1121 set_irn_n(irn, 1, get_irn_n(lea, 1));
1123 try_remove_from_sched(lea);
1125 /* clear remat flag */
1126 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1128 if (is_ia32_Ld(irn))
1129 DBG_OPT_LOAD_LEA(lea, irn);
1131 DBG_OPT_STORE_LEA(lea, irn);
1136 * Sets new_right index of irn to right and new_left index to left.
1137 * Also exchange left and right
1139 static void exchange_left_right(ir_node *irn, ir_node **left, ir_node **right, int new_left, int new_right) {
1142 set_irn_n(irn, new_right, *right);
1143 set_irn_n(irn, new_left, *left);
1149 /* this is only needed for Compares, but currently ALL nodes
1150 * have this attribute :-) */
1151 set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn)));
1155 * Performs address calculation optimization (create LEAs if possible)
1157 static void optimize_lea(ia32_code_gen_t *cg, ir_node *irn) {
1158 if (! is_ia32_irn(irn))
1161 /* Following cases can occur: */
1162 /* - Sub (l, imm) -> LEA [base - offset] */
1163 /* - Sub (l, r == LEA with ia32_am_O) -> LEA [base - offset] */
1164 /* - Add (l, imm) -> LEA [base + offset] */
1165 /* - Add (l, r == LEA with ia32_am_O) -> LEA [base + offset] */
1166 /* - Add (l == LEA with ia32_am_O, r) -> LEA [base + offset] */
1167 /* - Add (l, r) -> LEA [base + index * scale] */
1168 /* with scale > 1 iff l/r == shl (1,2,3) */
1169 if (is_ia32_Sub(irn) || is_ia32_Add(irn)) {
1172 if(!is_addr_candidate(irn))
1175 DBG((dbg, LEVEL_1, "\tfound address calculation candidate %+F ... ", irn));
1176 res = fold_addr(cg, irn);
1179 DB((dbg, LEVEL_1, "transformed into %+F\n", res));
1181 DB((dbg, LEVEL_1, "not transformed\n"));
1182 } else if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn)) {
1183 /* - Load -> LEA into Load } TODO: If the LEA is used by more than one Load/Store */
1184 /* - Store -> LEA into Store } it might be better to keep the LEA */
1185 ir_node *left = get_irn_n(irn, 0);
1187 if (is_ia32_Lea(left)) {
1188 const ir_edge_t *edge, *ne;
1191 /* merge all Loads/Stores connected to this LEA with the LEA */
1192 foreach_out_edge_safe(left, edge, ne) {
1193 src = get_edge_src_irn(edge);
1195 if (src && (get_edge_src_pos(edge) == 0) && (is_ia32_Ld(src) || is_ia32_St(src) || is_ia32_Store8Bit(src))) {
1196 DBG((dbg, LEVEL_1, "\nmerging %+F into %+F\n", left, irn));
1197 if (! is_ia32_got_lea(src))
1198 merge_loadstore_lea(src, left);
1199 set_ia32_got_lea(src);
1206 static void optimize_conv_store(ia32_code_gen_t *cg, ir_node *node)
1210 ir_mode *store_mode;
1212 if(!is_ia32_Store(node) && !is_ia32_Store8Bit(node))
1215 pred = get_irn_n(node, 2);
1216 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1219 /* the store only stores the lower bits, so we only need the conv
1220 * it it shrinks the mode */
1221 conv_mode = get_ia32_ls_mode(pred);
1222 store_mode = get_ia32_ls_mode(node);
1223 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode))
1226 set_irn_n(node, 2, get_irn_n(pred, 2));
1227 if(get_irn_n_edges(pred) == 0) {
1232 static void optimize_load_conv(ia32_code_gen_t *cg, ir_node *node)
1234 ir_node *pred, *predpred;
1238 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1241 pred = get_irn_n(node, 2);
1245 predpred = get_Proj_pred(pred);
1246 if(!is_ia32_Load(predpred))
1249 /* the load is sign extending the upper bits, so we only need the conv
1250 * if it shrinks the mode */
1251 load_mode = get_ia32_ls_mode(predpred);
1252 conv_mode = get_ia32_ls_mode(node);
1253 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(load_mode))
1256 if(get_mode_sign(conv_mode) != get_mode_sign(load_mode)) {
1257 /* change the load if it has only 1 user */
1258 if(get_irn_n_edges(pred) == 1) {
1260 if(get_mode_sign(conv_mode)) {
1261 newmode = find_signed_mode(load_mode);
1263 newmode = find_unsigned_mode(load_mode);
1265 assert(newmode != NULL);
1266 set_ia32_ls_mode(predpred, newmode);
1268 /* otherwise we have to keep the conv */
1274 exchange(node, pred);
1277 static void optimize_conv_conv(ia32_code_gen_t *cg, ir_node *node)
1283 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1286 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
1287 pred = get_irn_n(node, n_ia32_Conv_I2I_val);
1288 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1291 /* we know that after a conv, the upper bits are sign extended
1292 * so we only need the 2nd conv if it shrinks the mode */
1293 conv_mode = get_ia32_ls_mode(node);
1294 pred_mode = get_ia32_ls_mode(pred);
1295 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(pred_mode))
1298 /* we can't eliminate an upconv signed->unsigned */
1299 if (get_mode_size_bits(conv_mode) != get_mode_size_bits(pred_mode) &&
1300 !get_mode_sign(conv_mode) && get_mode_sign(pred_mode))
1304 exchange(node, pred);
1307 static void optimize_node(ir_node *node, void *env)
1309 ia32_code_gen_t *cg = env;
1311 optimize_load_conv(cg, node);
1312 optimize_conv_store(cg, node);
1313 optimize_conv_conv(cg, node);
1314 optimize_lea(cg, node);
1318 * Checks for address mode patterns and performs the
1319 * necessary transformations.
1320 * This function is called by a walker.
1322 static void optimize_am(ir_node *irn, void *env) {
1323 ia32_am_opt_env_t *am_opt_env = env;
1324 ia32_code_gen_t *cg = am_opt_env->cg;
1325 ir_graph *irg = get_irn_irg(irn);
1326 heights_t *h = am_opt_env->h;
1327 ir_node *block, *left, *right;
1328 ir_node *store, *load, *mem_proj;
1329 ir_node *addr_b, *addr_i;
1330 int need_exchange_on_fail = 0;
1331 ia32_am_type_t am_support;
1332 ia32_am_cand_t cand;
1333 ia32_am_cand_t orig_cand;
1335 int source_possible;
1337 static const arch_register_req_t dest_out_reg_req_0 = {
1338 arch_register_req_type_none,
1339 NULL, /* regclass */
1340 NULL, /* limit bitset */
1342 -1 /* different pos */
1344 static const arch_register_req_t *dest_am_out_reqs[] = {
1348 if (!is_ia32_irn(irn) || is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn))
1350 if (is_ia32_Lea(irn))
1353 am_support = get_ia32_am_support(irn);
1354 block = get_nodes_block(irn);
1356 /* fold following patterns: */
1357 /* - op -> Load into AMop with am_Source */
1359 /* - op is am_Source capable AND */
1360 /* - the Load is only used by this op AND */
1361 /* - the Load is in the same block */
1362 /* - Store -> op -> Load into AMop with am_Dest */
1364 /* - op is am_Dest capable AND */
1365 /* - the Store uses the same address as the Load AND */
1366 /* - the Load is only used by this op AND */
1367 /* - the Load and Store are in the same block AND */
1368 /* - nobody else uses the result of the op */
1369 if (get_ia32_am_support(irn) == ia32_am_None)
1372 cand = is_am_candidate(cg, h, block, irn);
1373 if (cand == IA32_AM_CAND_NONE)
1377 DBG((dbg, LEVEL_1, "\tfound address mode candidate %+F (candleft %d candright %d)... \n", irn,
1378 cand & IA32_AM_CAND_LEFT, cand & IA32_AM_CAND_RIGHT));
1380 left = get_irn_n(irn, 2);
1381 if (get_irn_arity(irn) == 4) {
1382 /* it's an "unary" operation */
1384 assert(cand == IA32_AM_CAND_BOTH);
1386 right = get_irn_n(irn, 3);
1389 dest_possible = am_support & ia32_am_Dest ? 1 : 0;
1390 source_possible = am_support & ia32_am_Source ? 1 : 0;
1392 DBG((dbg, LEVEL_2, "\tdest_possible %d source_possible %d ... \n", dest_possible, source_possible));
1394 if (dest_possible) {
1399 /* we should only have 1 user which is a store */
1400 if (ia32_get_irn_n_edges(irn) == 1) {
1401 ir_node *succ = get_edge_src_irn(get_irn_out_edge_first(irn));
1403 if (is_ia32_xStore(succ) || is_ia32_Store(succ)) {
1405 addr_b = get_irn_n(store, 0);
1406 addr_i = get_irn_n(store, 1);
1410 if (store == NULL) {
1411 DBG((dbg, LEVEL_2, "\tno store found, not using dest_mode\n"));
1416 if (dest_possible) {
1417 /* normalize nodes, we need the interesting load on the left side */
1418 if (cand & IA32_AM_CAND_RIGHT) {
1419 load = get_Proj_pred(right);
1420 if (load_store_addr_is_equal(load, store, addr_b, addr_i)
1421 && node_is_ia32_comm(irn)) {
1422 DBG((dbg, LEVEL_2, "\texchanging left/right\n"));
1423 exchange_left_right(irn, &left, &right, 3, 2);
1424 need_exchange_on_fail ^= 1;
1425 if (cand == IA32_AM_CAND_RIGHT)
1426 cand = IA32_AM_CAND_LEFT;
1431 if (dest_possible) {
1432 if(cand & IA32_AM_CAND_LEFT && is_Proj(left)) {
1433 load = get_Proj_pred(left);
1435 #ifndef AGGRESSIVE_AM
1436 /* we have to be the only user of the load */
1437 if (get_irn_n_edges(left) > 1) {
1438 DBG((dbg, LEVEL_2, "\tmatching load has too may users, not using dest_mode\n"));
1443 DBG((dbg, LEVEL_2, "\tno matching load found, not using dest_mode"));
1448 if (dest_possible) {
1449 /* the store has to use the loads memory or the same memory
1451 ir_node *loadmem = get_irn_n(load, 2);
1452 ir_node *storemem = get_irn_n(store, 3);
1453 assert(get_irn_mode(loadmem) == mode_M);
1454 assert(get_irn_mode(storemem) == mode_M);
1455 /* TODO there could be a sync between store and load... */
1456 if(storemem != loadmem && (!is_Proj(storemem) || get_Proj_pred(storemem) != load)) {
1457 DBG((dbg, LEVEL_2, "\tload/store using different memories, not using dest_mode"));
1462 if (dest_possible) {
1463 /* Compare Load and Store address */
1464 if (!load_store_addr_is_equal(load, store, addr_b, addr_i)) {
1465 DBG((dbg, LEVEL_2, "\taddresses not equal, not using dest_mode"));
1470 if (dest_possible) {
1471 ir_mode *lsmode = get_ia32_ls_mode(load);
1472 if(get_mode_size_bits(lsmode) != 32) {
1477 if (dest_possible) {
1478 /* all conditions fullfilled, do the transformation */
1479 assert(cand & IA32_AM_CAND_LEFT);
1481 /* set new base, index and attributes */
1482 set_irn_n(irn, 0, addr_b);
1483 set_irn_n(irn, 1, addr_i);
1484 add_ia32_am_offs_int(irn, get_ia32_am_offs_int(load));
1485 set_ia32_am_scale(irn, get_ia32_am_scale(load));
1486 set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
1487 set_ia32_op_type(irn, ia32_AddrModeD);
1488 set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
1489 set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
1491 set_ia32_am_sc(irn, get_ia32_am_sc(load));
1492 if (is_ia32_am_sc_sign(load))
1493 set_ia32_am_sc_sign(irn);
1495 /* connect to Load memory and disconnect Load */
1496 if (get_irn_arity(irn) == 5) {
1498 set_irn_n(irn, 4, get_irn_n(load, 2));
1499 set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
1502 set_irn_n(irn, 3, get_irn_n(load, 2));
1503 set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
1506 /* change node mode and out register requirements */
1507 set_irn_mode(irn, mode_M);
1508 set_ia32_out_req_all(irn, dest_am_out_reqs);
1510 /* connect the memory Proj of the Store to the op */
1511 edges_reroute(store, irn, irg);
1513 /* clear remat flag */
1514 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1516 try_remove_from_sched(store);
1517 try_remove_from_sched(load);
1518 DBG_OPT_AM_D(load, store, irn);
1520 DB((dbg, LEVEL_1, "merged with %+F and %+F into dest AM\n", load, store));
1521 need_exchange_on_fail = 0;
1522 source_possible = 0;
1525 if (source_possible) {
1526 /* normalize ops, we need the load on the right */
1527 if(cand == IA32_AM_CAND_LEFT) {
1528 if(node_is_ia32_comm(irn)) {
1529 exchange_left_right(irn, &left, &right, 3, 2);
1530 need_exchange_on_fail ^= 1;
1531 cand = IA32_AM_CAND_RIGHT;
1533 source_possible = 0;
1538 if (source_possible) {
1539 /* all conditions fullfilled, do transform */
1540 assert(cand & IA32_AM_CAND_RIGHT);
1541 load = get_Proj_pred(right);
1543 if(get_irn_n_edges(load) > 1) {
1544 source_possible = 0;
1548 if (source_possible) {
1549 ir_mode *ls_mode = get_ia32_ls_mode(load);
1550 if(get_mode_size_bits(ls_mode) != 32)
1551 source_possible = 0;
1555 if (source_possible) {
1556 addr_b = get_irn_n(load, 0);
1557 addr_i = get_irn_n(load, 1);
1559 /* set new base, index and attributes */
1560 set_irn_n(irn, 0, addr_b);
1561 set_irn_n(irn, 1, addr_i);
1562 add_ia32_am_offs_int(irn, get_ia32_am_offs_int(load));
1563 set_ia32_am_scale(irn, get_ia32_am_scale(load));
1564 set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
1565 set_ia32_op_type(irn, ia32_AddrModeS);
1566 set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
1567 set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
1569 set_ia32_am_sc(irn, get_ia32_am_sc(load));
1570 if (is_ia32_am_sc_sign(load))
1571 set_ia32_am_sc_sign(irn);
1573 /* clear remat flag */
1574 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1576 if (is_ia32_use_frame(load)) {
1577 if(get_ia32_frame_ent(load) == NULL) {
1578 set_ia32_need_stackent(irn);
1580 set_ia32_use_frame(irn);
1583 /* connect to Load memory and disconnect Load */
1584 if (get_irn_arity(irn) == 5) {
1586 set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3));
1587 set_irn_n(irn, 4, get_irn_n(load, 2));
1589 assert(get_irn_arity(irn) == 4);
1591 set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
1592 set_irn_n(irn, 3, get_irn_n(load, 2));
1595 DBG_OPT_AM_S(load, irn);
1597 /* If Load has a memory Proj, connect it to the op */
1598 mem_proj = ia32_get_proj_for_mode(load, mode_M);
1599 if (mem_proj != NULL) {
1601 ir_mode *mode = get_irn_mode(irn);
1603 res_proj = new_rd_Proj(get_irn_dbg_info(irn), irg,
1604 get_nodes_block(irn), new_Unknown(mode_T),
1606 set_irn_mode(irn, mode_T);
1607 edges_reroute(irn, res_proj, irg);
1608 set_Proj_pred(res_proj, irn);
1610 set_Proj_pred(mem_proj, irn);
1611 set_Proj_proj(mem_proj, 1);
1613 if(sched_is_scheduled(irn)) {
1614 sched_add_after(irn, res_proj);
1615 sched_add_after(irn, mem_proj);
1619 if(get_irn_n_edges(load) == 0) {
1620 try_remove_from_sched(load);
1622 need_exchange_on_fail = 0;
1624 DB((dbg, LEVEL_1, "merged with %+F into source AM\n", load));
1627 /* was exchanged but optimize failed: exchange back */
1628 if (need_exchange_on_fail) {
1629 exchange_left_right(irn, &left, &right, 3, 2);
1634 * Performs conv and address mode optimization.
1636 void ia32_optimize_graph(ia32_code_gen_t *cg) {
1637 /* if we are supposed to do AM or LEA optimization: recalculate edges */
1638 if (! (cg->opt & (IA32_OPT_DOAM | IA32_OPT_LEA))) {
1639 /* no optimizations at all */
1643 /* beware: we cannot optimize LEA and AM in one run because */
1644 /* LEA optimization adds new nodes to the irg which */
1645 /* invalidates the phase data */
1647 if (cg->opt & IA32_OPT_LEA) {
1648 irg_walk_blkwise_graph(cg->irg, NULL, optimize_node, cg);
1652 be_dump(cg->irg, "-lea", dump_ir_block_graph_sched);
1654 /* hack for now, so these don't get created during optimize, because then
1655 * they will be unknown to the heights module
1657 ia32_new_NoReg_gp(cg);
1658 ia32_new_NoReg_fp(cg);
1659 ia32_new_NoReg_vfp(cg);
1661 if (cg->opt & IA32_OPT_DOAM) {
1662 /* we need height information for am optimization */
1663 heights_t *h = heights_new(cg->irg);
1664 ia32_am_opt_env_t env;
1669 irg_walk_blkwise_graph(cg->irg, NULL, optimize_am, &env);
1675 void ia32_init_optimize(void)
1677 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize");