2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Implements several optimizations for IA32.
23 * @author Christian Wuerdig
33 #include "firm_types.h"
43 #include "../benode_t.h"
44 #include "../besched_t.h"
46 #include "ia32_new_nodes.h"
47 #include "bearch_ia32_t.h"
48 #include "gen_ia32_regalloc_if.h"
49 #include "ia32_transform.h"
50 #include "ia32_dbg_stat.h"
51 #include "ia32_util.h"
53 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
58 IA32_AM_CAND_NONE = 0, /**< no addressmode possible with irn inputs */
59 IA32_AM_CAND_LEFT = 1, /**< addressmode possible with left input */
60 IA32_AM_CAND_RIGHT = 2, /**< addressmode possible with right input */
61 IA32_AM_CAND_BOTH = 3 /**< addressmode possible with both inputs */
64 typedef int is_op_func_t(const ir_node *n);
65 typedef ir_node *load_func_t(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, ir_node *mem);
68 * checks if a node represents the NOREG value
70 static INLINE int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) {
71 return irn == cg->noreg_gp || irn == cg->noreg_xmm || irn == cg->noreg_vfp;
74 /********************************************************************************************************
75 * _____ _ _ ____ _ _ _ _ _
76 * | __ \ | | | | / __ \ | | (_) (_) | | (_)
77 * | |__) |__ ___ _ __ | |__ ___ | | ___ | | | |_ __ | |_ _ _ __ ___ _ ______ _| |_ _ ___ _ __
78 * | ___/ _ \/ _ \ '_ \| '_ \ / _ \| |/ _ \ | | | | '_ \| __| | '_ ` _ \| |_ / _` | __| |/ _ \| '_ \
79 * | | | __/ __/ |_) | | | | (_) | | __/ | |__| | |_) | |_| | | | | | | |/ / (_| | |_| | (_) | | | |
80 * |_| \___|\___| .__/|_| |_|\___/|_|\___| \____/| .__/ \__|_|_| |_| |_|_/___\__,_|\__|_|\___/|_| |_|
83 ********************************************************************************************************/
86 * NOTE: THESE PEEPHOLE OPTIMIZATIONS MUST BE CALLED AFTER SCHEDULING AND REGISTER ALLOCATION.
89 // only optimize up to 48 stores behind IncSPs
90 #define MAXPUSH_OPTIMIZE 48
93 * Tries to create pushs from IncSP,Store combinations
95 static void ia32_create_Pushs(ir_node *irn, ia32_code_gen_t *cg) {
99 ir_node *stores[MAXPUSH_OPTIMIZE];
100 ir_node *block = get_nodes_block(irn);
101 ir_graph *irg = cg->irg;
103 ir_mode *spmode = get_irn_mode(irn);
105 memset(stores, 0, sizeof(stores));
107 assert(be_is_IncSP(irn));
109 offset = be_get_IncSP_offset(irn);
114 * We first walk the schedule after the IncSP node as long as we find
115 * suitable stores that could be transformed to a push.
116 * We save them into the stores array which is sorted by the frame offset/4
117 * attached to the node
119 for(node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
124 // it has to be a store
125 if(!is_ia32_Store(node))
128 // it has to use our sp value
129 if(get_irn_n(node, 0) != irn)
131 // store has to be attached to NoMem
132 mem = get_irn_n(node, 3);
137 if( (get_ia32_am_flavour(node) & ia32_am_IS) != 0)
140 offset = get_ia32_am_offs_int(node);
142 storeslot = offset / 4;
143 if(storeslot >= MAXPUSH_OPTIMIZE)
146 // storing into the same slot twice is bad (and shouldn't happen...)
147 if(stores[storeslot] != NULL)
150 // storing at half-slots is bad
154 stores[storeslot] = node;
157 curr_sp = get_irn_n(irn, 0);
159 // walk the stores in inverse order and create pushs for them
160 i = (offset / 4) - 1;
161 if(i >= MAXPUSH_OPTIMIZE) {
162 i = MAXPUSH_OPTIMIZE - 1;
165 for( ; i >= 0; --i) {
166 const arch_register_t *spreg;
168 ir_node *val, *mem, *mem_proj;
169 ir_node *store = stores[i];
170 ir_node *noreg = ia32_new_NoReg_gp(cg);
172 if(store == NULL || is_Bad(store))
175 val = get_irn_n(store, 2);
176 mem = get_irn_n(store, 3);
177 spreg = arch_get_irn_register(cg->arch_env, curr_sp);
180 push = new_rd_ia32_Push(NULL, irg, block, noreg, noreg, val, curr_sp, mem);
182 set_ia32_am_support(push, ia32_am_Source, ia32_am_unary);
183 copy_ia32_Immop_attr(push, store);
185 sched_add_before(irn, push);
187 // create stackpointer proj
188 curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack);
189 arch_set_irn_register(cg->arch_env, curr_sp, spreg);
190 #ifdef SCHEDULE_PROJS
191 sched_add_before(irn, curr_sp);
193 // create memory proj
194 mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
196 // use the memproj now
197 exchange(store, mem_proj);
199 // we can remove the store now
205 be_set_IncSP_offset(irn, offset);
207 // can we remove the IncSP now?
209 const ir_edge_t *edge, *next;
211 foreach_out_edge_safe(irn, edge, next) {
212 ir_node *arg = get_edge_src_irn(edge);
213 int pos = get_edge_src_pos(edge);
215 set_irn_n(arg, pos, curr_sp);
218 set_irn_n(irn, 0, new_Bad());
221 set_irn_n(irn, 0, curr_sp);
226 * Tries to optimize two following IncSP.
228 static void ia32_optimize_IncSP(ir_node *node)
233 ir_node *pred = be_get_IncSP_pred(node);
236 if(!be_is_IncSP(pred))
239 if(get_irn_n_edges(pred) > 1)
242 pred_offs = be_get_IncSP_offset(pred);
243 curr_offs = be_get_IncSP_offset(node);
245 if(pred_offs == BE_STACK_FRAME_SIZE_EXPAND) {
246 if(curr_offs != BE_STACK_FRAME_SIZE_SHRINK) {
250 } else if(pred_offs == BE_STACK_FRAME_SIZE_SHRINK) {
251 if(curr_offs != BE_STACK_FRAME_SIZE_EXPAND) {
255 } else if(curr_offs == BE_STACK_FRAME_SIZE_EXPAND
256 || curr_offs == BE_STACK_FRAME_SIZE_SHRINK) {
259 offs = curr_offs + pred_offs;
262 be_set_IncSP_offset(node, offs);
264 /* rewire dependency edges */
265 predpred = be_get_IncSP_pred(pred);
266 edges_reroute_kind(pred, predpred, EDGE_KIND_DEP, current_ir_graph);
269 be_set_IncSP_pred(node, predpred);
275 * Performs Peephole Optimizations.
277 static void ia32_peephole_optimize_node(ir_node *node, void *env) {
278 ia32_code_gen_t *cg = env;
280 if (be_is_IncSP(node)) {
281 ia32_optimize_IncSP(node);
283 if (cg->opt & IA32_OPT_PUSHARGS)
284 ia32_create_Pushs(node, cg);
288 void ia32_peephole_optimization(ir_graph *irg, ia32_code_gen_t *cg) {
289 irg_walk_graph(irg, ia32_peephole_optimize_node, NULL, cg);
292 /******************************************************************
294 * /\ | | | | | \/ | | |
295 * / \ __| | __| |_ __ ___ ___ ___| \ / | ___ __| | ___
296 * / /\ \ / _` |/ _` | '__/ _ \/ __/ __| |\/| |/ _ \ / _` |/ _ \
297 * / ____ \ (_| | (_| | | | __/\__ \__ \ | | | (_) | (_| | __/
298 * /_/ \_\__,_|\__,_|_| \___||___/___/_| |_|\___/ \__,_|\___|
300 ******************************************************************/
307 static int node_is_ia32_comm(const ir_node *irn) {
308 return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0;
311 static int ia32_get_irn_n_edges(const ir_node *irn) {
312 const ir_edge_t *edge;
315 foreach_out_edge(irn, edge) {
323 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor.
325 * @param pred The node to be checked
326 * @param is_op_func The check-function
327 * @return 1 if conditions are fulfilled, 0 otherwise
329 static int pred_is_specific_node(const ir_node *pred, is_op_func_t *is_op_func) {
330 return is_op_func(pred);
334 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor
335 * and if the predecessor is in block bl.
337 * @param bl The block
338 * @param pred The node to be checked
339 * @param is_op_func The check-function
340 * @return 1 if conditions are fulfilled, 0 otherwise
342 static int pred_is_specific_nodeblock(const ir_node *bl, const ir_node *pred,
343 int (*is_op_func)(const ir_node *n))
346 pred = get_Proj_pred(pred);
347 if ((bl == get_nodes_block(pred)) && is_op_func(pred)) {
356 * Checks if irn is a candidate for address calculation. We avoid transforming
357 * adds to leas if they have a load as pred, because then we can use AM mode
360 * - none of the operand must be a Load within the same block OR
361 * - all Loads must have more than one user OR
363 * @param block The block the Loads must/mustnot be in
364 * @param irn The irn to check
365 * return 1 if irn is a candidate, 0 otherwise
367 static int is_addr_candidate(const ir_node *irn)
369 #ifndef AGGRESSIVE_AM
370 const ir_node *block = get_nodes_block(irn);
371 ir_node *left, *right;
374 left = get_irn_n(irn, 2);
375 right = get_irn_n(irn, 3);
377 if (pred_is_specific_nodeblock(block, left, is_ia32_Ld)) {
378 n = ia32_get_irn_n_edges(left);
379 /* load with only one user: don't create LEA */
384 if (pred_is_specific_nodeblock(block, right, is_ia32_Ld)) {
385 n = ia32_get_irn_n_edges(right);
396 * Checks if irn is a candidate for address mode.
399 * - at least one operand has to be a Load within the same block AND
400 * - the load must not have other users than the irn AND
401 * - the irn must not have a frame entity set
403 * @param cg The ia32 code generator
404 * @param h The height information of the irg
405 * @param block The block the Loads must/mustnot be in
406 * @param irn The irn to check
407 * return 0 if irn is no candidate, 1 if left load can be used, 2 if right one, 3 for both
409 static ia32_am_cand_t is_am_candidate(heights_t *h, const ir_node *block, ir_node *irn) {
410 ir_node *in, *load, *other, *left, *right;
411 int is_cand = 0, cand;
414 if (is_ia32_Ld(irn) || is_ia32_St(irn) ||
415 is_ia32_vfild(irn) || is_ia32_vfist(irn) ||
416 is_ia32_GetST0(irn) || is_ia32_SetST0(irn) || is_ia32_xStoreSimple(irn))
419 if(get_ia32_frame_ent(irn) != NULL)
420 return IA32_AM_CAND_NONE;
422 left = get_irn_n(irn, 2);
423 arity = get_irn_arity(irn);
424 if(get_ia32_am_arity(irn) == ia32_am_binary) {
426 right = get_irn_n(irn, 3);
428 assert(get_ia32_am_arity(irn) == ia32_am_unary);
435 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
436 #ifndef AGGRESSIVE_AM
438 n = ia32_get_irn_n_edges(in);
439 is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */
444 load = get_Proj_pred(in);
447 /* 8bit Loads are not supported (for binary ops),
448 * they cannot be used with every register */
449 if (get_ia32_am_arity(irn) == ia32_am_binary &&
450 get_mode_size_bits(get_ia32_ls_mode(load)) < 16) {
454 /* If there is a data dependency of other irn from load: cannot use AM */
455 if (is_cand && get_nodes_block(other) == block) {
456 other = skip_Proj(other);
457 is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand;
458 /* this could happen in loops */
459 is_cand = heights_reachable_in_block(h, load, irn) ? 0 : is_cand;
463 cand = is_cand ? IA32_AM_CAND_LEFT : IA32_AM_CAND_NONE;
467 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
468 #ifndef AGGRESSIVE_AM
470 n = ia32_get_irn_n_edges(in);
471 is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */
476 load = get_Proj_pred(in);
479 /* 8bit Loads are not supported, they cannot be used with every register */
480 if (get_mode_size_bits(get_ia32_ls_mode(load)) < 16)
483 /* If there is a data dependency of other irn from load: cannot use load */
484 if (is_cand && get_nodes_block(other) == block) {
485 other = skip_Proj(other);
486 is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand;
487 /* this could happen in loops */
488 is_cand = heights_reachable_in_block(h, load, irn) ? 0 : is_cand;
492 cand = is_cand ? (cand | IA32_AM_CAND_RIGHT) : cand;
494 /* if the irn has a frame entity: we do not use address mode */
499 * Compares the base and index addr and the load/store entities
500 * and returns 1 if they are equal.
502 static int load_store_addr_is_equal(const ir_node *load, const ir_node *store,
503 const ir_node *addr_b, const ir_node *addr_i)
505 if(get_irn_n(load, 0) != addr_b)
507 if(get_irn_n(load, 1) != addr_i)
510 if(get_ia32_frame_ent(load) != get_ia32_frame_ent(store))
513 if(get_ia32_am_sc(load) != get_ia32_am_sc(store))
515 if(is_ia32_am_sc_sign(load) != is_ia32_am_sc_sign(store))
517 if(get_ia32_am_offs_int(load) != get_ia32_am_offs_int(store))
519 if(get_ia32_ls_mode(load) != get_ia32_ls_mode(store))
525 typedef enum _ia32_take_lea_attr {
526 IA32_LEA_ATTR_NONE = 0,
527 IA32_LEA_ATTR_BASE = (1 << 0),
528 IA32_LEA_ATTR_INDEX = (1 << 1),
529 IA32_LEA_ATTR_OFFS = (1 << 2),
530 IA32_LEA_ATTR_SCALE = (1 << 3),
531 IA32_LEA_ATTR_AMSC = (1 << 4),
532 IA32_LEA_ATTR_FENT = (1 << 5)
533 } ia32_take_lea_attr;
536 * Decides if we have to keep the LEA operand or if we can assimilate it.
538 static int do_new_lea(ir_node *irn, ir_node *base, ir_node *index, ir_node *lea,
539 int have_am_sc, ia32_code_gen_t *cg)
541 ir_entity *irn_ent = get_ia32_frame_ent(irn);
542 ir_entity *lea_ent = get_ia32_frame_ent(lea);
544 int is_noreg_base = be_is_NoReg(cg, base);
545 int is_noreg_index = be_is_NoReg(cg, index);
546 ia32_am_flavour_t am_flav = get_ia32_am_flavour(lea);
548 /* If the Add and the LEA both have a different frame entity set: keep */
549 if (irn_ent && lea_ent && (irn_ent != lea_ent))
550 return IA32_LEA_ATTR_NONE;
551 else if (! irn_ent && lea_ent)
552 ret_val |= IA32_LEA_ATTR_FENT;
554 /* If the Add and the LEA both have already an address mode symconst: keep */
555 if (have_am_sc && get_ia32_am_sc(lea))
556 return IA32_LEA_ATTR_NONE;
557 else if (get_ia32_am_sc(lea))
558 ret_val |= IA32_LEA_ATTR_AMSC;
560 /* Check the different base-index combinations */
562 if (! is_noreg_base && ! is_noreg_index) {
563 /* Assimilate if base is the lea and the LEA is just a Base + Offset calculation */
564 if ((base == lea) && ! (am_flav & ia32_I ? 1 : 0)) {
565 if (am_flav & ia32_O)
566 ret_val |= IA32_LEA_ATTR_OFFS;
568 ret_val |= IA32_LEA_ATTR_BASE;
571 return IA32_LEA_ATTR_NONE;
573 else if (! is_noreg_base && is_noreg_index) {
574 /* Base is set but index not */
576 /* Base points to LEA: assimilate everything */
577 if (am_flav & ia32_O)
578 ret_val |= IA32_LEA_ATTR_OFFS;
579 if (am_flav & ia32_S)
580 ret_val |= IA32_LEA_ATTR_SCALE;
581 if (am_flav & ia32_I)
582 ret_val |= IA32_LEA_ATTR_INDEX;
584 ret_val |= IA32_LEA_ATTR_BASE;
586 else if (am_flav & ia32_B ? 0 : 1) {
587 /* Base is not the LEA but the LEA is an index only calculation: assimilate */
588 if (am_flav & ia32_O)
589 ret_val |= IA32_LEA_ATTR_OFFS;
590 if (am_flav & ia32_S)
591 ret_val |= IA32_LEA_ATTR_SCALE;
593 ret_val |= IA32_LEA_ATTR_INDEX;
596 return IA32_LEA_ATTR_NONE;
598 else if (is_noreg_base && ! is_noreg_index) {
599 /* Index is set but not base */
601 /* Index points to LEA: assimilate everything */
602 if (am_flav & ia32_O)
603 ret_val |= IA32_LEA_ATTR_OFFS;
604 if (am_flav & ia32_S)
605 ret_val |= IA32_LEA_ATTR_SCALE;
606 if (am_flav & ia32_B)
607 ret_val |= IA32_LEA_ATTR_BASE;
609 ret_val |= IA32_LEA_ATTR_INDEX;
611 else if (am_flav & ia32_I ? 0 : 1) {
612 /* Index is not the LEA but the LEA is a base only calculation: assimilate */
613 if (am_flav & ia32_O)
614 ret_val |= IA32_LEA_ATTR_OFFS;
615 if (am_flav & ia32_S)
616 ret_val |= IA32_LEA_ATTR_SCALE;
618 ret_val |= IA32_LEA_ATTR_BASE;
621 return IA32_LEA_ATTR_NONE;
624 assert(0 && "There must have been set base or index");
631 * Adds res before irn into schedule if irn was scheduled.
632 * @param irn The schedule point
633 * @param res The node to be scheduled
635 static INLINE void try_add_to_sched(ir_node *irn, ir_node *res) {
636 if (sched_is_scheduled(irn))
637 sched_add_before(irn, res);
641 * Removes node from schedule if it is not used anymore. If irn is a mode_T node
642 * all it's Projs are removed as well.
643 * @param irn The irn to be removed from schedule
645 static INLINE void try_kill(ir_node *node)
647 if(get_irn_mode(node) == mode_T) {
648 const ir_edge_t *edge, *next;
649 foreach_out_edge_safe(node, edge, next) {
650 ir_node *proj = get_edge_src_irn(edge);
655 if(get_irn_n_edges(node) != 0)
658 if (sched_is_scheduled(node)) {
666 * Folds Add or Sub to LEA if possible
668 static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn) {
669 ir_graph *irg = get_irn_irg(irn);
670 dbg_info *dbg_info = get_irn_dbg_info(irn);
671 ir_node *block = get_nodes_block(irn);
673 ir_node *shift = NULL;
674 ir_node *lea_o = NULL;
684 ir_entity *am_sc = NULL;
685 ir_entity *lea_ent = NULL;
686 ir_node *noreg = ia32_new_NoReg_gp(cg);
687 ir_node *left, *right, *temp;
688 ir_node *base, *index;
689 int consumed_left_shift;
690 ia32_am_flavour_t am_flav;
692 if (is_ia32_Add(irn))
695 left = get_irn_n(irn, 2);
696 right = get_irn_n(irn, 3);
698 /* "normalize" arguments in case of add with two operands */
699 if (isadd && ! be_is_NoReg(cg, right)) {
700 /* put LEA == ia32_am_O as right operand */
701 if (is_ia32_Lea(left) && get_ia32_am_flavour(left) == ia32_am_O) {
702 set_irn_n(irn, 2, right);
703 set_irn_n(irn, 3, left);
709 /* put LEA != ia32_am_O as left operand */
710 if (is_ia32_Lea(right) && get_ia32_am_flavour(right) != ia32_am_O) {
711 set_irn_n(irn, 2, right);
712 set_irn_n(irn, 3, left);
718 /* put SHL as left operand iff left is NOT a LEA */
719 if (! is_ia32_Lea(left) && pred_is_specific_node(right, is_ia32_Shl)) {
720 set_irn_n(irn, 2, right);
721 set_irn_n(irn, 3, left);
734 /* check for operation with immediate */
735 if (is_ia32_ImmConst(irn)) {
736 tarval *tv = get_ia32_Immop_tarval(irn);
738 DBG((dbg, LEVEL_1, "\tfound op with imm const"));
740 offs_cnst = get_tarval_long(tv);
743 else if (isadd && is_ia32_ImmSymConst(irn)) {
744 DBG((dbg, LEVEL_1, "\tfound op with imm symconst"));
748 am_sc = get_ia32_Immop_symconst(irn);
749 am_sc_sign = is_ia32_am_sc_sign(irn);
752 /* determine the operand which needs to be checked */
753 temp = be_is_NoReg(cg, right) ? left : right;
755 /* check if right operand is AMConst (LEA with ia32_am_O) */
756 /* but we can only eat it up if there is no other symconst */
757 /* because the linker won't accept two symconsts */
758 if (! have_am_sc && is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
759 DBG((dbg, LEVEL_1, "\tgot op with LEA am_O"));
761 offs_lea = get_ia32_am_offs_int(temp);
762 am_sc = get_ia32_am_sc(temp);
763 am_sc_sign = is_ia32_am_sc_sign(temp);
770 else if (temp == right)
775 /* default for add -> make right operand to index */
778 consumed_left_shift = -1;
780 DBG((dbg, LEVEL_1, "\tgot LEA candidate with index %+F\n", index));
782 /* determine the operand which needs to be checked */
784 if (is_ia32_Lea(left)) {
786 consumed_left_shift = 0;
789 /* check for SHL 1,2,3 */
790 if (pred_is_specific_node(temp, is_ia32_Shl)) {
791 ir_node *right = get_irn_n(temp, n_ia32_Shl_right);
793 if (is_ia32_Immediate(right)) {
794 const ia32_immediate_attr_t *attr
795 = get_ia32_immediate_attr_const(right);
796 long shiftval = attr->offset;
799 index = get_irn_n(temp, 2);
800 consumed_left_shift = consumed_left_shift < 0 ? 1 : 0;
804 DBG((dbg, LEVEL_1, "\tgot scaled index %+F\n", index));
810 if (! be_is_NoReg(cg, index)) {
811 /* if we have index, but left == right -> no base */
815 else if (consumed_left_shift == 1) {
816 /* -> base is right operand */
817 base = (right == lea_o) ? noreg : right;
822 /* Try to assimilate a LEA as left operand */
823 if (is_ia32_Lea(left) && (get_ia32_am_flavour(left) != ia32_am_O)) {
824 /* check if we can assimilate the LEA */
825 int take_attr = do_new_lea(irn, base, index, left, have_am_sc, cg);
827 if (take_attr == IA32_LEA_ATTR_NONE) {
828 DBG((dbg, LEVEL_1, "\tleave old LEA, creating new one\n"));
831 DBG((dbg, LEVEL_1, "\tgot LEA as left operand ... assimilating\n"));
832 lea = left; /* for statistics */
834 if (take_attr & IA32_LEA_ATTR_OFFS)
835 offs = get_ia32_am_offs_int(left);
837 if (take_attr & IA32_LEA_ATTR_AMSC) {
838 am_sc = get_ia32_am_sc(left);
840 am_sc_sign = is_ia32_am_sc_sign(left);
843 if (take_attr & IA32_LEA_ATTR_SCALE)
844 scale = get_ia32_am_scale(left);
846 if (take_attr & IA32_LEA_ATTR_BASE)
847 base = get_irn_n(left, 0);
849 if (take_attr & IA32_LEA_ATTR_INDEX)
850 index = get_irn_n(left, 1);
852 if (take_attr & IA32_LEA_ATTR_FENT)
853 lea_ent = get_ia32_frame_ent(left);
857 /* ok, we can create a new LEA */
859 res = new_rd_ia32_Lea(dbg_info, irg, block, base, index);
860 /* we don't want stuff before the barrier... */
861 if(be_is_NoReg(cg, base) && be_is_NoReg(cg, index)) {
862 add_irn_dep(res, get_irg_frame(irg));
865 /* add the old offset of a previous LEA */
866 add_ia32_am_offs_int(res, offs);
868 /* add the new offset */
870 add_ia32_am_offs_int(res, offs_cnst);
871 add_ia32_am_offs_int(res, offs_lea);
873 /* either lea_O-cnst, -cnst or -lea_O */
874 if (offs_cnst != 0) {
875 add_ia32_am_offs_int(res, offs_lea);
876 add_ia32_am_offs_int(res, -offs_cnst);
878 add_ia32_am_offs_int(res, offs_lea);
882 /* set the address mode symconst */
884 set_ia32_am_sc(res, am_sc);
886 set_ia32_am_sc_sign(res);
889 /* copy the frame entity (could be set in case of Add */
890 /* which was a FrameAddr) */
891 if (lea_ent != NULL) {
892 set_ia32_frame_ent(res, lea_ent);
893 set_ia32_use_frame(res);
895 set_ia32_frame_ent(res, get_ia32_frame_ent(irn));
896 if(is_ia32_use_frame(irn))
897 set_ia32_use_frame(res);
901 set_ia32_am_scale(res, scale);
904 /* determine new am flavour */
905 if (offs || offs_cnst || offs_lea || have_am_sc) {
908 if (! be_is_NoReg(cg, base)) {
911 if (! be_is_NoReg(cg, index)) {
917 set_ia32_am_flavour(res, am_flav);
919 set_ia32_op_type(res, ia32_AddrModeS);
921 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
923 DBG((dbg, LEVEL_1, "\tLEA [%+F + %+F * %d + %d]\n", base, index, scale, get_ia32_am_offs_int(res)));
925 assert(irn && "Couldn't find result proj");
927 /* get the result Proj of the Add/Sub */
928 try_add_to_sched(irn, res);
930 /* exchange the old op with the new LEA */
934 /* we will exchange it, report here before the Proj is created */
935 if (shift && lea && lea_o) {
939 DBG_OPT_LEA4(irn, lea_o, lea, shift, res);
940 } else if (shift && lea) {
943 DBG_OPT_LEA3(irn, lea, shift, res);
944 } else if (shift && lea_o) {
947 DBG_OPT_LEA3(irn, lea_o, shift, res);
948 } else if (lea && lea_o) {
951 DBG_OPT_LEA3(irn, lea_o, lea, res);
954 DBG_OPT_LEA2(irn, shift, res);
957 DBG_OPT_LEA2(irn, lea, res);
960 DBG_OPT_LEA2(irn, lea_o, res);
962 DBG_OPT_LEA1(irn, res);
971 * Merges a Load/Store node with a LEA.
972 * @param irn The Load/Store node
975 static void merge_loadstore_lea(ir_node *irn, ir_node *lea) {
976 ir_entity *irn_ent = get_ia32_frame_ent(irn);
977 ir_entity *lea_ent = get_ia32_frame_ent(lea);
979 /* If the irn and the LEA both have a different frame entity set: do not merge */
980 if (irn_ent != NULL && lea_ent != NULL && (irn_ent != lea_ent))
982 else if (irn_ent == NULL && lea_ent != NULL) {
983 set_ia32_frame_ent(irn, lea_ent);
984 set_ia32_use_frame(irn);
987 /* get the AM attributes from the LEA */
988 add_ia32_am_offs_int(irn, get_ia32_am_offs_int(lea));
989 set_ia32_am_scale(irn, get_ia32_am_scale(lea));
990 set_ia32_am_flavour(irn, get_ia32_am_flavour(lea));
992 set_ia32_am_sc(irn, get_ia32_am_sc(lea));
993 if (is_ia32_am_sc_sign(lea))
994 set_ia32_am_sc_sign(irn);
996 set_ia32_op_type(irn, is_ia32_Ld(irn) ? ia32_AddrModeS : ia32_AddrModeD);
998 /* set base and index */
999 set_irn_n(irn, 0, get_irn_n(lea, 0));
1000 set_irn_n(irn, 1, get_irn_n(lea, 1));
1004 /* clear remat flag */
1005 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1007 if (is_ia32_Ld(irn))
1008 DBG_OPT_LOAD_LEA(lea, irn);
1010 DBG_OPT_STORE_LEA(lea, irn);
1015 * Sets new_right index of irn to right and new_left index to left.
1016 * Also exchange left and right
1018 static void exchange_left_right(ir_node *irn, ir_node **left, ir_node **right,
1019 int new_left, int new_right)
1023 assert(is_ia32_commutative(irn));
1025 set_irn_n(irn, new_right, *right);
1026 set_irn_n(irn, new_left, *left);
1032 /* this is only needed for Compares, but currently ALL nodes
1033 * have this attribute :-) */
1034 set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn)));
1038 * Performs address calculation optimization (create LEAs if possible)
1040 static void optimize_lea(ia32_code_gen_t *cg, ir_node *irn) {
1041 if (! is_ia32_irn(irn))
1044 /* Following cases can occur: */
1045 /* - Sub (l, imm) -> LEA [base - offset] */
1046 /* - Sub (l, r == LEA with ia32_am_O) -> LEA [base - offset] */
1047 /* - Add (l, imm) -> LEA [base + offset] */
1048 /* - Add (l, r == LEA with ia32_am_O) -> LEA [base + offset] */
1049 /* - Add (l == LEA with ia32_am_O, r) -> LEA [base + offset] */
1050 /* - Add (l, r) -> LEA [base + index * scale] */
1051 /* with scale > 1 iff l/r == shl (1,2,3) */
1052 if (is_ia32_Sub(irn) || is_ia32_Add(irn)) {
1055 if(!is_addr_candidate(irn))
1058 DBG((dbg, LEVEL_1, "\tfound address calculation candidate %+F ... ", irn));
1059 res = fold_addr(cg, irn);
1062 DB((dbg, LEVEL_1, "transformed into %+F\n", res));
1064 DB((dbg, LEVEL_1, "not transformed\n"));
1065 } else if (is_ia32_Ld(irn) || is_ia32_St(irn)) {
1066 /* - Load -> LEA into Load } TODO: If the LEA is used by more than one Load/Store */
1067 /* - Store -> LEA into Store } it might be better to keep the LEA */
1068 ir_node *left = get_irn_n(irn, 0);
1070 if (is_ia32_Lea(left)) {
1071 const ir_edge_t *edge, *ne;
1074 /* merge all Loads/Stores connected to this LEA with the LEA */
1075 foreach_out_edge_safe(left, edge, ne) {
1076 src = get_edge_src_irn(edge);
1078 if (src && (get_edge_src_pos(edge) == 0) && (is_ia32_Ld(src) || is_ia32_St(src))) {
1079 DBG((dbg, LEVEL_1, "\nmerging %+F into %+F\n", left, irn));
1080 if (! is_ia32_got_lea(src))
1081 merge_loadstore_lea(src, left);
1082 set_ia32_got_lea(src);
1089 static void optimize_conv_store(ir_node *node)
1093 ir_mode *store_mode;
1095 if(!is_ia32_Store(node) && !is_ia32_Store8Bit(node))
1098 pred = get_irn_n(node, 2);
1099 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1102 /* the store only stores the lower bits, so we only need the conv
1103 * it it shrinks the mode */
1104 conv_mode = get_ia32_ls_mode(pred);
1105 store_mode = get_ia32_ls_mode(node);
1106 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode))
1109 set_irn_n(node, 2, get_irn_n(pred, 2));
1110 if(get_irn_n_edges(pred) == 0) {
1115 static void optimize_load_conv(ir_node *node)
1117 ir_node *pred, *predpred;
1121 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1124 pred = get_irn_n(node, 2);
1128 predpred = get_Proj_pred(pred);
1129 if(!is_ia32_Load(predpred))
1132 /* the load is sign extending the upper bits, so we only need the conv
1133 * if it shrinks the mode */
1134 load_mode = get_ia32_ls_mode(predpred);
1135 conv_mode = get_ia32_ls_mode(node);
1136 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(load_mode))
1139 if(get_mode_sign(conv_mode) != get_mode_sign(load_mode)) {
1140 /* change the load if it has only 1 user */
1141 if(get_irn_n_edges(pred) == 1) {
1143 if(get_mode_sign(conv_mode)) {
1144 newmode = find_signed_mode(load_mode);
1146 newmode = find_unsigned_mode(load_mode);
1148 assert(newmode != NULL);
1149 set_ia32_ls_mode(predpred, newmode);
1151 /* otherwise we have to keep the conv */
1157 exchange(node, pred);
1160 static void optimize_conv_conv(ir_node *node)
1166 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1169 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
1170 pred = get_irn_n(node, n_ia32_Conv_I2I_val);
1171 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1174 /* we know that after a conv, the upper bits are sign extended
1175 * so we only need the 2nd conv if it shrinks the mode */
1176 conv_mode = get_ia32_ls_mode(node);
1177 pred_mode = get_ia32_ls_mode(pred);
1178 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(pred_mode))
1181 /* we can't eliminate an upconv signed->unsigned */
1182 if (get_mode_size_bits(conv_mode) != get_mode_size_bits(pred_mode) &&
1183 !get_mode_sign(conv_mode) && get_mode_sign(pred_mode))
1187 exchange(node, pred);
1190 static void optimize_node(ir_node *node, void *env)
1192 ia32_code_gen_t *cg = env;
1194 optimize_load_conv(node);
1195 optimize_conv_store(node);
1196 optimize_conv_conv(node);
1197 optimize_lea(cg, node);
1201 * Checks for address mode patterns and performs the
1202 * necessary transformations.
1203 * This function is called by a walker.
1205 static void optimize_am(ir_node *irn, void *env) {
1206 ia32_am_opt_env_t *am_opt_env = env;
1207 ia32_code_gen_t *cg = am_opt_env->cg;
1208 ir_graph *irg = get_irn_irg(irn);
1209 heights_t *h = am_opt_env->h;
1210 ir_node *block, *left, *right;
1211 ir_node *store, *load, *mem_proj;
1212 ir_node *addr_b, *addr_i;
1213 int need_exchange_on_fail = 0;
1214 ia32_am_type_t am_support;
1215 ia32_am_arity_t am_arity;
1216 ia32_am_cand_t cand;
1217 ia32_am_cand_t orig_cand;
1219 int source_possible;
1221 static const arch_register_req_t dest_out_reg_req_0 = {
1222 arch_register_req_type_none,
1223 NULL, /* regclass */
1224 NULL, /* limit bitset */
1226 -1 /* different pos */
1228 static const arch_register_req_t *dest_am_out_reqs[] = {
1232 if (!is_ia32_irn(irn) || is_ia32_Ld(irn) || is_ia32_St(irn))
1234 if (is_ia32_Lea(irn))
1237 am_support = get_ia32_am_support(irn);
1238 am_arity = get_ia32_am_arity(irn);
1239 block = get_nodes_block(irn);
1241 /* fold following patterns:
1242 * - op -> Load into AMop with am_Source
1244 * - op is am_Source capable AND
1245 * - the Load is only used by this op AND
1246 * - the Load is in the same block
1247 * - Store -> op -> Load into AMop with am_Dest
1249 * - op is am_Dest capable AND
1250 * - the Store uses the same address as the Load AND
1251 * - the Load is only used by this op AND
1252 * - the Load and Store are in the same block AND
1253 * - nobody else uses the result of the op
1255 if (am_support == ia32_am_None)
1258 assert(am_arity == ia32_am_unary || am_arity == ia32_am_binary);
1260 cand = is_am_candidate(h, block, irn);
1261 if (cand == IA32_AM_CAND_NONE)
1265 DBG((dbg, LEVEL_1, "\tfound address mode candidate %+F (candleft %d candright %d)... \n", irn,
1266 cand & IA32_AM_CAND_LEFT, cand & IA32_AM_CAND_RIGHT));
1268 left = get_irn_n(irn, 2);
1269 if (am_arity == ia32_am_unary) {
1270 assert(get_irn_arity(irn) >= 4);
1272 assert(cand == IA32_AM_CAND_BOTH);
1274 assert(get_irn_arity(irn) >= 5);
1275 right = get_irn_n(irn, 3);
1278 dest_possible = am_support & ia32_am_Dest ? 1 : 0;
1279 source_possible = am_support & ia32_am_Source ? 1 : 0;
1281 DBG((dbg, LEVEL_2, "\tdest_possible %d source_possible %d ... \n", dest_possible, source_possible));
1283 if (dest_possible) {
1288 /* we should only have 1 user which is a store */
1289 if (ia32_get_irn_n_edges(irn) == 1) {
1290 ir_node *succ = get_edge_src_irn(get_irn_out_edge_first(irn));
1292 if (is_ia32_xStore(succ) || is_ia32_Store(succ)) {
1294 addr_b = get_irn_n(store, 0);
1295 addr_i = get_irn_n(store, 1);
1299 if (store == NULL) {
1300 DBG((dbg, LEVEL_2, "\tno store found, not using dest_mode\n"));
1305 if (dest_possible) {
1306 /* normalize nodes, we need the interesting load on the left side */
1307 if (cand & IA32_AM_CAND_RIGHT) {
1308 load = get_Proj_pred(right);
1309 if (load_store_addr_is_equal(load, store, addr_b, addr_i)
1310 && node_is_ia32_comm(irn)) {
1311 DBG((dbg, LEVEL_2, "\texchanging left/right\n"));
1312 exchange_left_right(irn, &left, &right, 3, 2);
1313 need_exchange_on_fail ^= 1;
1314 if (cand == IA32_AM_CAND_RIGHT)
1315 cand = IA32_AM_CAND_LEFT;
1320 if (dest_possible) {
1321 if(cand & IA32_AM_CAND_LEFT && is_Proj(left)) {
1322 load = get_Proj_pred(left);
1324 #ifndef AGGRESSIVE_AM
1325 /* we have to be the only user of the load */
1326 if (get_irn_n_edges(left) > 1) {
1327 DBG((dbg, LEVEL_2, "\tmatching load has too may users, not using dest_mode\n"));
1332 DBG((dbg, LEVEL_2, "\tno matching load found, not using dest_mode"));
1337 if (dest_possible) {
1338 /* the store has to use the loads memory or the same memory
1340 ir_node *loadmem = get_irn_n(load, 2);
1341 ir_node *storemem = get_irn_n(store, 3);
1342 assert(get_irn_mode(loadmem) == mode_M);
1343 assert(get_irn_mode(storemem) == mode_M);
1344 /* TODO there could be a sync between store and load... */
1345 if(storemem != loadmem && (!is_Proj(storemem) || get_Proj_pred(storemem) != load)) {
1346 DBG((dbg, LEVEL_2, "\tload/store using different memories, not using dest_mode"));
1351 if (dest_possible) {
1352 /* Compare Load and Store address */
1353 if (!load_store_addr_is_equal(load, store, addr_b, addr_i)) {
1354 DBG((dbg, LEVEL_2, "\taddresses not equal, not using dest_mode"));
1359 if (dest_possible) {
1360 ir_mode *lsmode = get_ia32_ls_mode(load);
1361 if(get_mode_size_bits(lsmode) != 32) {
1366 if (dest_possible) {
1367 /* all conditions fullfilled, do the transformation */
1368 assert(cand & IA32_AM_CAND_LEFT);
1370 /* set new base, index and attributes */
1371 set_irn_n(irn, 0, addr_b);
1372 set_irn_n(irn, 1, addr_i);
1373 add_ia32_am_offs_int(irn, get_ia32_am_offs_int(load));
1374 set_ia32_am_scale(irn, get_ia32_am_scale(load));
1375 set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
1376 set_ia32_op_type(irn, ia32_AddrModeD);
1377 set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
1378 set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
1380 set_ia32_am_sc(irn, get_ia32_am_sc(load));
1381 if (is_ia32_am_sc_sign(load))
1382 set_ia32_am_sc_sign(irn);
1384 /* connect to Load memory and disconnect Load */
1385 if (am_arity == ia32_am_binary) {
1387 set_irn_n(irn, 4, get_irn_n(load, 2));
1388 set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
1391 set_irn_n(irn, 3, get_irn_n(load, 2));
1392 set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
1395 /* change node mode and out register requirements */
1396 set_irn_mode(irn, mode_M);
1397 set_ia32_out_req_all(irn, dest_am_out_reqs);
1399 /* connect the memory Proj of the Store to the op */
1400 edges_reroute(store, irn, irg);
1402 /* clear remat flag */
1403 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1407 DBG_OPT_AM_D(load, store, irn);
1409 DB((dbg, LEVEL_1, "merged with %+F and %+F into dest AM\n", load, store));
1410 need_exchange_on_fail = 0;
1411 source_possible = 0;
1414 if (source_possible) {
1415 /* normalize ops, we need the load on the right */
1416 if(cand == IA32_AM_CAND_LEFT) {
1417 if(node_is_ia32_comm(irn)) {
1418 exchange_left_right(irn, &left, &right, 3, 2);
1419 need_exchange_on_fail ^= 1;
1420 cand = IA32_AM_CAND_RIGHT;
1422 source_possible = 0;
1427 if (source_possible) {
1428 /* all conditions fullfilled, do transform */
1429 assert(cand & IA32_AM_CAND_RIGHT);
1430 load = get_Proj_pred(right);
1432 if(get_irn_n_edges(load) > 1) {
1433 source_possible = 0;
1437 if (source_possible) {
1438 ir_mode *ls_mode = get_ia32_ls_mode(load);
1439 if(get_mode_size_bits(ls_mode) != 32)
1440 source_possible = 0;
1444 if (source_possible) {
1445 addr_b = get_irn_n(load, 0);
1446 addr_i = get_irn_n(load, 1);
1448 /* set new base, index and attributes */
1449 set_irn_n(irn, 0, addr_b);
1450 set_irn_n(irn, 1, addr_i);
1451 add_ia32_am_offs_int(irn, get_ia32_am_offs_int(load));
1452 set_ia32_am_scale(irn, get_ia32_am_scale(load));
1453 set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
1454 set_ia32_op_type(irn, ia32_AddrModeS);
1455 set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
1456 set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
1458 set_ia32_am_sc(irn, get_ia32_am_sc(load));
1459 if (is_ia32_am_sc_sign(load))
1460 set_ia32_am_sc_sign(irn);
1462 /* clear remat flag */
1463 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1465 if (is_ia32_use_frame(load)) {
1466 if(get_ia32_frame_ent(load) == NULL) {
1467 set_ia32_need_stackent(irn);
1469 set_ia32_use_frame(irn);
1472 /* connect to Load memory and disconnect Load */
1473 if (am_arity == ia32_am_binary) {
1475 right = ia32_get_admissible_noreg(cg, irn, 3);
1476 set_irn_n(irn, 3, right);
1477 set_irn_n(irn, 4, get_irn_n(load, n_ia32_Load_mem));
1480 right = ia32_get_admissible_noreg(cg, irn, 2);
1481 set_irn_n(irn, 2, right);
1482 set_irn_n(irn, 3, get_irn_n(load, n_ia32_Load_mem));
1485 DBG_OPT_AM_S(load, irn);
1487 /* If Load has a memory Proj, connect it to the op */
1488 mem_proj = ia32_get_proj_for_mode(load, mode_M);
1489 if (mem_proj != NULL) {
1491 ir_mode *mode = get_irn_mode(irn);
1493 assert(mode != mode_T);
1495 res_proj = new_rd_Proj(get_irn_dbg_info(irn), irg,
1496 get_nodes_block(irn), new_Unknown(mode_T),
1498 set_irn_mode(irn, mode_T);
1499 edges_reroute(irn, res_proj, irg);
1500 set_Proj_pred(res_proj, irn);
1502 set_Proj_pred(mem_proj, irn);
1503 set_Proj_proj(mem_proj, 1);
1505 #ifdef SCHEDULE_PROJS
1506 if(sched_is_scheduled(irn)) {
1507 sched_add_after(irn, res_proj);
1508 sched_add_after(irn, mem_proj);
1514 need_exchange_on_fail = 0;
1516 /* immediate are only allowed on the right side */
1517 if(is_ia32_Immediate(left)) {
1518 exchange_left_right(irn, &left, &right, 3, 2);
1521 DB((dbg, LEVEL_1, "merged with %+F into source AM\n", load));
1524 /* was exchanged but optimize failed: exchange back */
1525 if (need_exchange_on_fail) {
1526 exchange_left_right(irn, &left, &right, 3, 2);
1531 * Performs conv and address mode optimization.
1533 void ia32_optimize_graph(ia32_code_gen_t *cg) {
1534 /* if we are supposed to do AM or LEA optimization: recalculate edges */
1535 if (! (cg->opt & (IA32_OPT_DOAM | IA32_OPT_LEA))) {
1536 /* no optimizations at all */
1540 /* beware: we cannot optimize LEA and AM in one run because */
1541 /* LEA optimization adds new nodes to the irg which */
1542 /* invalidates the phase data */
1544 if (cg->opt & IA32_OPT_LEA) {
1545 irg_walk_blkwise_graph(cg->irg, NULL, optimize_node, cg);
1549 be_dump(cg->irg, "-lea", dump_ir_block_graph_sched);
1551 /* hack for now, so these don't get created during optimize, because then
1552 * they will be unknown to the heights module
1554 ia32_new_NoReg_gp(cg);
1555 ia32_new_NoReg_fp(cg);
1556 ia32_new_NoReg_vfp(cg);
1558 if (cg->opt & IA32_OPT_DOAM) {
1559 /* we need height information for am optimization */
1560 heights_t *h = heights_new(cg->irg);
1561 ia32_am_opt_env_t env;
1566 irg_walk_blkwise_graph(cg->irg, NULL, optimize_am, &env);
1572 void ia32_init_optimize(void)
1574 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize");