2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * File name: ir/be/ia32/ia32_optimize.c
23 * Purpose: Implements several optimizations for IA32
24 * Author: Christian Wuerdig
26 * Copyright: (c) 2006 Universitaet Karlsruhe
27 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
36 #include "firm_types.h"
46 #include "../benode_t.h"
47 #include "../besched_t.h"
49 #include "ia32_new_nodes.h"
50 #include "bearch_ia32_t.h"
51 #include "gen_ia32_regalloc_if.h" /* the generated interface (register type and class defenitions) */
52 #include "ia32_transform.h"
53 #include "ia32_dbg_stat.h"
54 #include "ia32_util.h"
56 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
61 IA32_AM_CAND_NONE = 0, /**< no addressmode possible with irn inputs */
62 IA32_AM_CAND_LEFT = 1, /**< addressmode possible with left input */
63 IA32_AM_CAND_RIGHT = 2, /**< addressmode possible with right input */
64 IA32_AM_CAND_BOTH = 3 /**< addressmode possible with both inputs */
67 typedef int is_op_func_t(const ir_node *n);
68 typedef ir_node *load_func_t(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, ir_node *mem);
71 * checks if a node represents the NOREG value
73 static INLINE int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) {
74 return irn == cg->noreg_gp || irn == cg->noreg_xmm || irn == cg->noreg_vfp;
77 void ia32_pre_transform_phase(ia32_code_gen_t *cg) {
79 We need to transform the consts twice:
80 - the psi condition tree transformer needs existing constants to be ia32 constants
81 - the psi condition tree transformer inserts new firm constants which need to be transformed
83 //ia32_transform_all_firm_consts(cg);
84 irg_walk_graph(cg->irg, NULL, ia32_transform_psi_cond_tree, cg);
85 //ia32_transform_all_firm_consts(cg);
88 /********************************************************************************************************
89 * _____ _ _ ____ _ _ _ _ _
90 * | __ \ | | | | / __ \ | | (_) (_) | | (_)
91 * | |__) |__ ___ _ __ | |__ ___ | | ___ | | | |_ __ | |_ _ _ __ ___ _ ______ _| |_ _ ___ _ __
92 * | ___/ _ \/ _ \ '_ \| '_ \ / _ \| |/ _ \ | | | | '_ \| __| | '_ ` _ \| |_ / _` | __| |/ _ \| '_ \
93 * | | | __/ __/ |_) | | | | (_) | | __/ | |__| | |_) | |_| | | | | | | |/ / (_| | |_| | (_) | | | |
94 * |_| \___|\___| .__/|_| |_|\___/|_|\___| \____/| .__/ \__|_|_| |_| |_|_/___\__,_|\__|_|\___/|_| |_|
97 ********************************************************************************************************/
100 * NOTE: THESE PEEPHOLE OPTIMIZATIONS MUST BE CALLED AFTER SCHEDULING AND REGISTER ALLOCATION.
103 static int ia32_const_equal(const ir_node *n1, const ir_node *n2) {
104 if(get_ia32_immop_type(n1) != get_ia32_immop_type(n2))
107 if(get_ia32_immop_type(n1) == ia32_ImmConst) {
108 return get_ia32_Immop_tarval(n1) == get_ia32_Immop_tarval(n2);
109 } else if(get_ia32_immop_type(n1) == ia32_ImmSymConst) {
110 return get_ia32_Immop_symconst(n1) == get_ia32_Immop_symconst(n2);
113 assert(get_ia32_immop_type(n1) == ia32_ImmNone);
118 * Checks for potential CJmp/CJmpAM optimization candidates.
120 static ir_node *ia32_determine_cjmp_cand(ir_node *irn, is_op_func_t *is_op_func) {
121 ir_node *cand = NULL;
122 ir_node *prev = sched_prev(irn);
124 if (is_Block(prev)) {
125 if (get_Block_n_cfgpreds(prev) == 1)
126 prev = get_Block_cfgpred(prev, 0);
131 /* The predecessor must be a ProjX. */
132 if (prev && is_Proj(prev) && get_irn_mode(prev) == mode_X) {
133 prev = get_Proj_pred(prev);
135 if (is_op_func(prev))
142 static int is_TestJmp_cand(const ir_node *irn) {
143 return is_ia32_TestJmp(irn) || is_ia32_And(irn);
147 * Checks if two consecutive arguments of cand matches
148 * the two arguments of irn (TestJmp).
150 static int is_TestJmp_replacement(ir_node *cand, ir_node *irn) {
151 ir_node *in1 = get_irn_n(irn, 0);
152 ir_node *in2 = get_irn_n(irn, 1);
153 int i, n = get_irn_arity(cand);
156 for (i = 0; i < n - 1; i++) {
157 if (get_irn_n(cand, i) == in1 &&
158 get_irn_n(cand, i + 1) == in2)
168 return ia32_const_equal(cand, irn);
172 * Tries to replace a TestJmp by a CJmp or CJmpAM (in case of And)
174 static void ia32_optimize_TestJmp(ir_node *irn, ia32_code_gen_t *cg) {
175 ir_node *cand = ia32_determine_cjmp_cand(irn, is_TestJmp_cand);
178 /* we found a possible candidate */
179 replace = cand ? is_TestJmp_replacement(cand, irn) : 0;
182 DBG((dbg, LEVEL_1, "replacing %+F by ", irn));
184 if (is_ia32_And(cand))
185 set_irn_op(irn, op_ia32_CJmpAM);
187 set_irn_op(irn, op_ia32_CJmp);
189 DB((dbg, LEVEL_1, "%+F\n", irn));
193 static int is_CondJmp_cand(const ir_node *irn) {
194 return is_ia32_CondJmp(irn) || is_ia32_Sub(irn);
198 * Checks if the arguments of cand are the same of irn.
200 static int is_CondJmp_replacement(ir_node *cand, ir_node *irn) {
203 arity = get_irn_arity(cand);
204 for (i = 0; i < arity; i++) {
205 if (get_irn_n(cand, i) != get_irn_n(irn, i)) {
210 return ia32_const_equal(cand, irn);
214 * Tries to replace a CondJmp by a CJmpAM
216 static void ia32_optimize_CondJmp(ir_node *irn, ia32_code_gen_t *cg) {
217 ir_node *cand = ia32_determine_cjmp_cand(irn, is_CondJmp_cand);
220 /* we found a possible candidate */
221 replace = cand ? is_CondJmp_replacement(cand, irn) : 0;
224 DBG((dbg, LEVEL_1, "replacing %+F by ", irn));
227 set_irn_op(irn, op_ia32_CJmpAM);
229 DB((dbg, LEVEL_1, "%+F\n", irn));
233 // only optimize up to 48 stores behind IncSPs
234 #define MAXPUSH_OPTIMIZE 48
237 * Tries to create pushs from IncSP,Store combinations
239 static void ia32_create_Pushs(ir_node *irn, ia32_code_gen_t *cg) {
243 ir_node *stores[MAXPUSH_OPTIMIZE];
244 ir_node *block = get_nodes_block(irn);
245 ir_graph *irg = cg->irg;
247 ir_mode *spmode = get_irn_mode(irn);
249 memset(stores, 0, sizeof(stores));
251 assert(be_is_IncSP(irn));
253 offset = be_get_IncSP_offset(irn);
258 * We first walk the schedule after the IncSP node as long as we find
259 * suitable stores that could be transformed to a push.
260 * We save them into the stores array which is sorted by the frame offset/4
261 * attached to the node
263 for(node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
268 // it has to be a store
269 if(!is_ia32_Store(node))
272 // it has to use our sp value
273 if(get_irn_n(node, 0) != irn)
275 // store has to be attached to NoMem
276 mem = get_irn_n(node, 3);
281 if( (get_ia32_am_flavour(node) & ia32_am_IS) != 0)
284 offset = get_ia32_am_offs_int(node);
286 storeslot = offset / 4;
287 if(storeslot >= MAXPUSH_OPTIMIZE)
290 // storing into the same slot twice is bad (and shouldn't happen...)
291 if(stores[storeslot] != NULL)
294 // storing at half-slots is bad
298 stores[storeslot] = node;
301 curr_sp = get_irn_n(irn, 0);
303 // walk the stores in inverse order and create pushs for them
304 i = (offset / 4) - 1;
305 if(i >= MAXPUSH_OPTIMIZE) {
306 i = MAXPUSH_OPTIMIZE - 1;
309 for( ; i >= 0; --i) {
310 const arch_register_t *spreg;
312 ir_node *val, *mem, *mem_proj;
313 ir_node *store = stores[i];
314 ir_node *noreg = ia32_new_NoReg_gp(cg);
316 if(store == NULL || is_Bad(store))
319 val = get_irn_n(store, 2);
320 mem = get_irn_n(store, 3);
321 spreg = arch_get_irn_register(cg->arch_env, curr_sp);
324 push = new_rd_ia32_Push(NULL, irg, block, noreg, noreg, val, curr_sp, mem);
326 set_ia32_am_support(push, ia32_am_Source);
327 copy_ia32_Immop_attr(push, store);
329 sched_add_before(irn, push);
331 // create stackpointer proj
332 curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack);
333 arch_set_irn_register(cg->arch_env, curr_sp, spreg);
334 sched_add_before(irn, curr_sp);
336 // create memory proj
337 mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
338 sched_add_before(irn, mem_proj);
340 // use the memproj now
341 exchange(store, mem_proj);
343 // we can remove the store now
349 be_set_IncSP_offset(irn, offset);
351 // can we remove the IncSP now?
353 const ir_edge_t *edge, *next;
355 foreach_out_edge_safe(irn, edge, next) {
356 ir_node *arg = get_edge_src_irn(edge);
357 int pos = get_edge_src_pos(edge);
359 set_irn_n(arg, pos, curr_sp);
362 set_irn_n(irn, 0, new_Bad());
365 set_irn_n(irn, 0, curr_sp);
371 * Tries to optimize two following IncSP.
373 static void ia32_optimize_IncSP(ir_node *irn, ia32_code_gen_t *cg) {
374 ir_node *prev = be_get_IncSP_pred(irn);
375 int real_uses = get_irn_n_edges(prev);
377 if (be_is_IncSP(prev) && real_uses == 1) {
378 /* first IncSP has only one IncSP user, kill the first one */
379 int prev_offs = be_get_IncSP_offset(prev);
380 int curr_offs = be_get_IncSP_offset(irn);
382 be_set_IncSP_offset(prev, prev_offs + curr_offs);
384 /* Omit the optimized IncSP */
385 be_set_IncSP_pred(irn, be_get_IncSP_pred(prev));
387 set_irn_n(prev, 0, new_Bad());
394 * Performs Peephole Optimizations.
396 static void ia32_peephole_optimize_node(ir_node *irn, void *env) {
397 ia32_code_gen_t *cg = env;
399 /* AMD CPUs want explicit compare before conditional jump */
400 if (! ARCH_AMD(cg->opt_arch)) {
401 if (is_ia32_TestJmp(irn))
402 ia32_optimize_TestJmp(irn, cg);
403 else if (is_ia32_CondJmp(irn))
404 ia32_optimize_CondJmp(irn, cg);
407 if (be_is_IncSP(irn)) {
408 // optimize_IncSP doesn't respect dependency edges yet...
409 //ia32_optimize_IncSP(irn, cg);
411 if (cg->opt & IA32_OPT_PUSHARGS)
412 ia32_create_Pushs(irn, cg);
416 void ia32_peephole_optimization(ir_graph *irg, ia32_code_gen_t *cg) {
417 irg_walk_graph(irg, ia32_peephole_optimize_node, NULL, cg);
420 /******************************************************************
422 * /\ | | | | | \/ | | |
423 * / \ __| | __| |_ __ ___ ___ ___| \ / | ___ __| | ___
424 * / /\ \ / _` |/ _` | '__/ _ \/ __/ __| |\/| |/ _ \ / _` |/ _ \
425 * / ____ \ (_| | (_| | | | __/\__ \__ \ | | | (_) | (_| | __/
426 * /_/ \_\__,_|\__,_|_| \___||___/___/_| |_|\___/ \__,_|\___|
428 ******************************************************************/
435 static int node_is_ia32_comm(const ir_node *irn) {
436 return is_ia32_irn(irn) ? is_ia32_commutative(irn) : 0;
439 static int ia32_get_irn_n_edges(const ir_node *irn) {
440 const ir_edge_t *edge;
443 foreach_out_edge(irn, edge) {
451 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor.
453 * @param pred The node to be checked
454 * @param is_op_func The check-function
455 * @return 1 if conditions are fulfilled, 0 otherwise
457 static int pred_is_specific_node(const ir_node *pred, is_op_func_t *is_op_func) {
458 return is_op_func(pred);
462 * Determines if pred is a Proj and if is_op_func returns true for it's predecessor
463 * and if the predecessor is in block bl.
465 * @param bl The block
466 * @param pred The node to be checked
467 * @param is_op_func The check-function
468 * @return 1 if conditions are fulfilled, 0 otherwise
470 static int pred_is_specific_nodeblock(const ir_node *bl, const ir_node *pred,
471 int (*is_op_func)(const ir_node *n))
474 pred = get_Proj_pred(pred);
475 if ((bl == get_nodes_block(pred)) && is_op_func(pred)) {
484 * Checks if irn is a candidate for address calculation. We avoid transforming
485 * adds to leas if they have a load as pred, because then we can use AM mode
488 * - none of the operand must be a Load within the same block OR
489 * - all Loads must have more than one user OR
491 * @param block The block the Loads must/mustnot be in
492 * @param irn The irn to check
493 * return 1 if irn is a candidate, 0 otherwise
495 static int is_addr_candidate(const ir_node *irn) {
496 #ifndef AGGRESSIVE_AM
497 const ir_node *block = get_nodes_block(irn);
498 ir_node *left, *right;
501 left = get_irn_n(irn, 2);
502 right = get_irn_n(irn, 3);
504 if (pred_is_specific_nodeblock(block, left, is_ia32_Ld)) {
505 n = ia32_get_irn_n_edges(left);
506 /* load with only one user: don't create LEA */
511 if (pred_is_specific_nodeblock(block, right, is_ia32_Ld)) {
512 n = ia32_get_irn_n_edges(right);
522 * Checks if irn is a candidate for address mode.
525 * - at least one operand has to be a Load within the same block AND
526 * - the load must not have other users than the irn AND
527 * - the irn must not have a frame entity set
529 * @param cg The ia32 code generator
530 * @param h The height information of the irg
531 * @param block The block the Loads must/mustnot be in
532 * @param irn The irn to check
533 * return 0 if irn is no candidate, 1 if left load can be used, 2 if right one, 3 for both
535 static ia32_am_cand_t is_am_candidate(ia32_code_gen_t *cg, heights_t *h, const ir_node *block, ir_node *irn) {
536 ir_node *in, *load, *other, *left, *right;
537 int is_cand = 0, cand;
540 if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn) || is_ia32_vfild(irn) || is_ia32_vfist(irn) ||
541 is_ia32_GetST0(irn) || is_ia32_SetST0(irn) || is_ia32_xStoreSimple(irn))
544 left = get_irn_n(irn, 2);
545 arity = get_irn_arity(irn);
546 assert(arity == 5 || arity == 4);
549 right = get_irn_n(irn, 3);
557 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
558 #ifndef AGGRESSIVE_AM
560 n = ia32_get_irn_n_edges(in);
561 is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */
566 load = get_Proj_pred(in);
569 /* 8bit Loads are not supported (for binary ops),
570 * they cannot be used with every register */
571 if (get_irn_arity(irn) != 4 && get_mode_size_bits(get_ia32_ls_mode(load)) < 16) {
572 assert(get_irn_arity(irn) == 5);
576 /* If there is a data dependency of other irn from load: cannot use AM */
577 if (is_cand && get_nodes_block(other) == block) {
578 other = skip_Proj(other);
579 is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand;
580 /* this could happen in loops */
581 is_cand = heights_reachable_in_block(h, load, irn) ? 0 : is_cand;
585 cand = is_cand ? IA32_AM_CAND_LEFT : IA32_AM_CAND_NONE;
589 if (pred_is_specific_nodeblock(block, in, is_ia32_Ld)) {
590 #ifndef AGGRESSIVE_AM
592 n = ia32_get_irn_n_edges(in);
593 is_cand = (n == 1) ? 1 : is_cand; /* load with more than one user: no AM */
598 load = get_Proj_pred(in);
601 /* 8bit Loads are not supported, they cannot be used with every register */
602 if (get_mode_size_bits(get_ia32_ls_mode(load)) < 16)
605 /* If there is a data dependency of other irn from load: cannot use load */
606 if (is_cand && get_nodes_block(other) == block) {
607 other = skip_Proj(other);
608 is_cand = heights_reachable_in_block(h, other, load) ? 0 : is_cand;
609 /* this could happen in loops */
610 is_cand = heights_reachable_in_block(h, load, irn) ? 0 : is_cand;
614 cand = is_cand ? (cand | IA32_AM_CAND_RIGHT) : cand;
616 /* if the irn has a frame entity: we do not use address mode */
617 return get_ia32_frame_ent(irn) ? IA32_AM_CAND_NONE : cand;
621 * Compares the base and index addr and the load/store entities
622 * and returns 1 if they are equal.
624 static int load_store_addr_is_equal(const ir_node *load, const ir_node *store,
625 const ir_node *addr_b, const ir_node *addr_i)
627 if(get_irn_n(load, 0) != addr_b)
629 if(get_irn_n(load, 1) != addr_i)
632 if(get_ia32_frame_ent(load) != get_ia32_frame_ent(store))
635 if(get_ia32_am_sc(load) != get_ia32_am_sc(store))
637 if(is_ia32_am_sc_sign(load) != is_ia32_am_sc_sign(store))
639 if(get_ia32_am_offs_int(load) != get_ia32_am_offs_int(store))
641 if(get_ia32_ls_mode(load) != get_ia32_ls_mode(store))
647 typedef enum _ia32_take_lea_attr {
648 IA32_LEA_ATTR_NONE = 0,
649 IA32_LEA_ATTR_BASE = (1 << 0),
650 IA32_LEA_ATTR_INDEX = (1 << 1),
651 IA32_LEA_ATTR_OFFS = (1 << 2),
652 IA32_LEA_ATTR_SCALE = (1 << 3),
653 IA32_LEA_ATTR_AMSC = (1 << 4),
654 IA32_LEA_ATTR_FENT = (1 << 5)
655 } ia32_take_lea_attr;
658 * Decides if we have to keep the LEA operand or if we can assimilate it.
660 static int do_new_lea(ir_node *irn, ir_node *base, ir_node *index, ir_node *lea,
661 int have_am_sc, ia32_code_gen_t *cg)
663 ir_entity *irn_ent = get_ia32_frame_ent(irn);
664 ir_entity *lea_ent = get_ia32_frame_ent(lea);
666 int is_noreg_base = be_is_NoReg(cg, base);
667 int is_noreg_index = be_is_NoReg(cg, index);
668 ia32_am_flavour_t am_flav = get_ia32_am_flavour(lea);
670 /* If the Add and the LEA both have a different frame entity set: keep */
671 if (irn_ent && lea_ent && (irn_ent != lea_ent))
672 return IA32_LEA_ATTR_NONE;
673 else if (! irn_ent && lea_ent)
674 ret_val |= IA32_LEA_ATTR_FENT;
676 /* If the Add and the LEA both have already an address mode symconst: keep */
677 if (have_am_sc && get_ia32_am_sc(lea))
678 return IA32_LEA_ATTR_NONE;
679 else if (get_ia32_am_sc(lea))
680 ret_val |= IA32_LEA_ATTR_AMSC;
682 /* Check the different base-index combinations */
684 if (! is_noreg_base && ! is_noreg_index) {
685 /* Assimilate if base is the lea and the LEA is just a Base + Offset calculation */
686 if ((base == lea) && ! (am_flav & ia32_I ? 1 : 0)) {
687 if (am_flav & ia32_O)
688 ret_val |= IA32_LEA_ATTR_OFFS;
690 ret_val |= IA32_LEA_ATTR_BASE;
693 return IA32_LEA_ATTR_NONE;
695 else if (! is_noreg_base && is_noreg_index) {
696 /* Base is set but index not */
698 /* Base points to LEA: assimilate everything */
699 if (am_flav & ia32_O)
700 ret_val |= IA32_LEA_ATTR_OFFS;
701 if (am_flav & ia32_S)
702 ret_val |= IA32_LEA_ATTR_SCALE;
703 if (am_flav & ia32_I)
704 ret_val |= IA32_LEA_ATTR_INDEX;
706 ret_val |= IA32_LEA_ATTR_BASE;
708 else if (am_flav & ia32_B ? 0 : 1) {
709 /* Base is not the LEA but the LEA is an index only calculation: assimilate */
710 if (am_flav & ia32_O)
711 ret_val |= IA32_LEA_ATTR_OFFS;
712 if (am_flav & ia32_S)
713 ret_val |= IA32_LEA_ATTR_SCALE;
715 ret_val |= IA32_LEA_ATTR_INDEX;
718 return IA32_LEA_ATTR_NONE;
720 else if (is_noreg_base && ! is_noreg_index) {
721 /* Index is set but not base */
723 /* Index points to LEA: assimilate everything */
724 if (am_flav & ia32_O)
725 ret_val |= IA32_LEA_ATTR_OFFS;
726 if (am_flav & ia32_S)
727 ret_val |= IA32_LEA_ATTR_SCALE;
728 if (am_flav & ia32_B)
729 ret_val |= IA32_LEA_ATTR_BASE;
731 ret_val |= IA32_LEA_ATTR_INDEX;
733 else if (am_flav & ia32_I ? 0 : 1) {
734 /* Index is not the LEA but the LEA is a base only calculation: assimilate */
735 if (am_flav & ia32_O)
736 ret_val |= IA32_LEA_ATTR_OFFS;
737 if (am_flav & ia32_S)
738 ret_val |= IA32_LEA_ATTR_SCALE;
740 ret_val |= IA32_LEA_ATTR_BASE;
743 return IA32_LEA_ATTR_NONE;
746 assert(0 && "There must have been set base or index");
753 * Adds res before irn into schedule if irn was scheduled.
754 * @param irn The schedule point
755 * @param res The node to be scheduled
757 static INLINE void try_add_to_sched(ir_node *irn, ir_node *res) {
758 if (sched_is_scheduled(irn))
759 sched_add_before(irn, res);
763 * Removes node from schedule if it is not used anymore. If irn is a mode_T node
764 * all it's Projs are removed as well.
765 * @param irn The irn to be removed from schedule
767 static INLINE void try_remove_from_sched(ir_node *node) {
770 if(get_irn_mode(node) == mode_T) {
771 const ir_edge_t *edge;
772 foreach_out_edge(node, edge) {
773 ir_node *proj = get_edge_src_irn(edge);
774 try_remove_from_sched(proj);
778 if(get_irn_n_edges(node) != 0)
781 if (sched_is_scheduled(node)) {
785 arity = get_irn_arity(node);
786 for(i = 0; i < arity; ++i) {
787 set_irn_n(node, i, new_Bad());
792 * Folds Add or Sub to LEA if possible
794 static ir_node *fold_addr(ia32_code_gen_t *cg, ir_node *irn) {
795 ir_graph *irg = get_irn_irg(irn);
796 dbg_info *dbg_info = get_irn_dbg_info(irn);
797 ir_node *block = get_nodes_block(irn);
799 ir_node *shift = NULL;
800 ir_node *lea_o = NULL;
810 ir_entity *am_sc = NULL;
811 ir_entity *lea_ent = NULL;
812 ir_node *noreg = ia32_new_NoReg_gp(cg);
813 ir_node *left, *right, *temp;
814 ir_node *base, *index;
815 int consumed_left_shift;
816 ia32_am_flavour_t am_flav;
818 if (is_ia32_Add(irn))
821 left = get_irn_n(irn, 2);
822 right = get_irn_n(irn, 3);
824 /* "normalize" arguments in case of add with two operands */
825 if (isadd && ! be_is_NoReg(cg, right)) {
826 /* put LEA == ia32_am_O as right operand */
827 if (is_ia32_Lea(left) && get_ia32_am_flavour(left) == ia32_am_O) {
828 set_irn_n(irn, 2, right);
829 set_irn_n(irn, 3, left);
835 /* put LEA != ia32_am_O as left operand */
836 if (is_ia32_Lea(right) && get_ia32_am_flavour(right) != ia32_am_O) {
837 set_irn_n(irn, 2, right);
838 set_irn_n(irn, 3, left);
844 /* put SHL as left operand iff left is NOT a LEA */
845 if (! is_ia32_Lea(left) && pred_is_specific_node(right, is_ia32_Shl)) {
846 set_irn_n(irn, 2, right);
847 set_irn_n(irn, 3, left);
860 /* check for operation with immediate */
861 if (is_ia32_ImmConst(irn)) {
862 tarval *tv = get_ia32_Immop_tarval(irn);
864 DBG((dbg, LEVEL_1, "\tfound op with imm const"));
866 offs_cnst = get_tarval_long(tv);
869 else if (isadd && is_ia32_ImmSymConst(irn)) {
870 DBG((dbg, LEVEL_1, "\tfound op with imm symconst"));
874 am_sc = get_ia32_Immop_symconst(irn);
875 am_sc_sign = is_ia32_am_sc_sign(irn);
878 /* determine the operand which needs to be checked */
879 temp = be_is_NoReg(cg, right) ? left : right;
881 /* check if right operand is AMConst (LEA with ia32_am_O) */
882 /* but we can only eat it up if there is no other symconst */
883 /* because the linker won't accept two symconsts */
884 if (! have_am_sc && is_ia32_Lea(temp) && get_ia32_am_flavour(temp) == ia32_am_O) {
885 DBG((dbg, LEVEL_1, "\tgot op with LEA am_O"));
887 offs_lea = get_ia32_am_offs_int(temp);
888 am_sc = get_ia32_am_sc(temp);
889 am_sc_sign = is_ia32_am_sc_sign(temp);
896 else if (temp == right)
901 /* default for add -> make right operand to index */
904 consumed_left_shift = -1;
906 DBG((dbg, LEVEL_1, "\tgot LEA candidate with index %+F\n", index));
908 /* determine the operand which needs to be checked */
910 if (is_ia32_Lea(left)) {
912 consumed_left_shift = 0;
915 /* check for SHL 1,2,3 */
916 if (pred_is_specific_node(temp, is_ia32_Shl)) {
918 if (is_ia32_ImmConst(temp)) {
919 long shiftval = get_tarval_long(get_ia32_Immop_tarval(temp));
922 index = get_irn_n(temp, 2);
923 consumed_left_shift = consumed_left_shift < 0 ? 1 : 0;
927 DBG((dbg, LEVEL_1, "\tgot scaled index %+F\n", index));
933 if (! be_is_NoReg(cg, index)) {
934 /* if we have index, but left == right -> no base */
938 else if (consumed_left_shift == 1) {
939 /* -> base is right operand */
940 base = (right == lea_o) ? noreg : right;
945 /* Try to assimilate a LEA as left operand */
946 if (is_ia32_Lea(left) && (get_ia32_am_flavour(left) != ia32_am_O)) {
947 /* check if we can assimilate the LEA */
948 int take_attr = do_new_lea(irn, base, index, left, have_am_sc, cg);
950 if (take_attr == IA32_LEA_ATTR_NONE) {
951 DBG((dbg, LEVEL_1, "\tleave old LEA, creating new one\n"));
954 DBG((dbg, LEVEL_1, "\tgot LEA as left operand ... assimilating\n"));
955 lea = left; /* for statistics */
957 if (take_attr & IA32_LEA_ATTR_OFFS)
958 offs = get_ia32_am_offs_int(left);
960 if (take_attr & IA32_LEA_ATTR_AMSC) {
961 am_sc = get_ia32_am_sc(left);
963 am_sc_sign = is_ia32_am_sc_sign(left);
966 if (take_attr & IA32_LEA_ATTR_SCALE)
967 scale = get_ia32_am_scale(left);
969 if (take_attr & IA32_LEA_ATTR_BASE)
970 base = get_irn_n(left, 0);
972 if (take_attr & IA32_LEA_ATTR_INDEX)
973 index = get_irn_n(left, 1);
975 if (take_attr & IA32_LEA_ATTR_FENT)
976 lea_ent = get_ia32_frame_ent(left);
980 /* ok, we can create a new LEA */
982 res = new_rd_ia32_Lea(dbg_info, irg, block, base, index);
984 /* add the old offset of a previous LEA */
985 add_ia32_am_offs_int(res, offs);
987 /* add the new offset */
989 add_ia32_am_offs_int(res, offs_cnst);
990 add_ia32_am_offs_int(res, offs_lea);
992 /* either lea_O-cnst, -cnst or -lea_O */
993 if (offs_cnst != 0) {
994 add_ia32_am_offs_int(res, offs_lea);
995 add_ia32_am_offs_int(res, -offs_cnst);
997 add_ia32_am_offs_int(res, offs_lea);
1001 /* set the address mode symconst */
1003 set_ia32_am_sc(res, am_sc);
1005 set_ia32_am_sc_sign(res);
1008 /* copy the frame entity (could be set in case of Add */
1009 /* which was a FrameAddr) */
1010 if (lea_ent != NULL) {
1011 set_ia32_frame_ent(res, lea_ent);
1012 set_ia32_use_frame(res);
1014 set_ia32_frame_ent(res, get_ia32_frame_ent(irn));
1015 if(is_ia32_use_frame(irn))
1016 set_ia32_use_frame(res);
1020 set_ia32_am_scale(res, scale);
1022 am_flav = ia32_am_N;
1023 /* determine new am flavour */
1024 if (offs || offs_cnst || offs_lea || have_am_sc) {
1027 if (! be_is_NoReg(cg, base)) {
1030 if (! be_is_NoReg(cg, index)) {
1036 set_ia32_am_flavour(res, am_flav);
1038 set_ia32_op_type(res, ia32_AddrModeS);
1040 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
1042 DBG((dbg, LEVEL_1, "\tLEA [%+F + %+F * %d + %d]\n", base, index, scale, get_ia32_am_offs_int(res)));
1044 assert(irn && "Couldn't find result proj");
1046 /* get the result Proj of the Add/Sub */
1047 try_add_to_sched(irn, res);
1049 /* exchange the old op with the new LEA */
1050 try_remove_from_sched(irn);
1053 /* we will exchange it, report here before the Proj is created */
1054 if (shift && lea && lea_o) {
1055 try_remove_from_sched(shift);
1056 try_remove_from_sched(lea);
1057 try_remove_from_sched(lea_o);
1058 DBG_OPT_LEA4(irn, lea_o, lea, shift, res);
1059 } else if (shift && lea) {
1060 try_remove_from_sched(shift);
1061 try_remove_from_sched(lea);
1062 DBG_OPT_LEA3(irn, lea, shift, res);
1063 } else if (shift && lea_o) {
1064 try_remove_from_sched(shift);
1065 try_remove_from_sched(lea_o);
1066 DBG_OPT_LEA3(irn, lea_o, shift, res);
1067 } else if (lea && lea_o) {
1068 try_remove_from_sched(lea);
1069 try_remove_from_sched(lea_o);
1070 DBG_OPT_LEA3(irn, lea_o, lea, res);
1072 try_remove_from_sched(shift);
1073 DBG_OPT_LEA2(irn, shift, res);
1075 try_remove_from_sched(lea);
1076 DBG_OPT_LEA2(irn, lea, res);
1078 try_remove_from_sched(lea_o);
1079 DBG_OPT_LEA2(irn, lea_o, res);
1081 DBG_OPT_LEA1(irn, res);
1090 * Merges a Load/Store node with a LEA.
1091 * @param irn The Load/Store node
1092 * @param lea The LEA
1094 static void merge_loadstore_lea(ir_node *irn, ir_node *lea) {
1095 ir_entity *irn_ent = get_ia32_frame_ent(irn);
1096 ir_entity *lea_ent = get_ia32_frame_ent(lea);
1098 /* If the irn and the LEA both have a different frame entity set: do not merge */
1099 if (irn_ent != NULL && lea_ent != NULL && (irn_ent != lea_ent))
1101 else if (irn_ent == NULL && lea_ent != NULL) {
1102 set_ia32_frame_ent(irn, lea_ent);
1103 set_ia32_use_frame(irn);
1106 /* get the AM attributes from the LEA */
1107 add_ia32_am_offs_int(irn, get_ia32_am_offs_int(lea));
1108 set_ia32_am_scale(irn, get_ia32_am_scale(lea));
1109 set_ia32_am_flavour(irn, get_ia32_am_flavour(lea));
1111 set_ia32_am_sc(irn, get_ia32_am_sc(lea));
1112 if (is_ia32_am_sc_sign(lea))
1113 set_ia32_am_sc_sign(irn);
1115 set_ia32_op_type(irn, is_ia32_Ld(irn) ? ia32_AddrModeS : ia32_AddrModeD);
1117 /* set base and index */
1118 set_irn_n(irn, 0, get_irn_n(lea, 0));
1119 set_irn_n(irn, 1, get_irn_n(lea, 1));
1121 try_remove_from_sched(lea);
1123 /* clear remat flag */
1124 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1126 if (is_ia32_Ld(irn))
1127 DBG_OPT_LOAD_LEA(lea, irn);
1129 DBG_OPT_STORE_LEA(lea, irn);
1134 * Sets new_right index of irn to right and new_left index to left.
1135 * Also exchange left and right
1137 static void exchange_left_right(ir_node *irn, ir_node **left, ir_node **right, int new_left, int new_right) {
1140 set_irn_n(irn, new_right, *right);
1141 set_irn_n(irn, new_left, *left);
1147 /* this is only needed for Compares, but currently ALL nodes
1148 * have this attribute :-) */
1149 set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn)));
1153 * Performs address calculation optimization (create LEAs if possible)
1155 static void optimize_lea(ir_node *irn, void *env) {
1156 ia32_code_gen_t *cg = env;
1158 if (! is_ia32_irn(irn))
1161 /* Following cases can occur: */
1162 /* - Sub (l, imm) -> LEA [base - offset] */
1163 /* - Sub (l, r == LEA with ia32_am_O) -> LEA [base - offset] */
1164 /* - Add (l, imm) -> LEA [base + offset] */
1165 /* - Add (l, r == LEA with ia32_am_O) -> LEA [base + offset] */
1166 /* - Add (l == LEA with ia32_am_O, r) -> LEA [base + offset] */
1167 /* - Add (l, r) -> LEA [base + index * scale] */
1168 /* with scale > 1 iff l/r == shl (1,2,3) */
1169 if (is_ia32_Sub(irn) || is_ia32_Add(irn)) {
1172 if(!is_addr_candidate(irn))
1175 DBG((dbg, LEVEL_1, "\tfound address calculation candidate %+F ... ", irn));
1176 res = fold_addr(cg, irn);
1179 DB((dbg, LEVEL_1, "transformed into %+F\n", res));
1181 DB((dbg, LEVEL_1, "not transformed\n"));
1182 } else if (is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn)) {
1183 /* - Load -> LEA into Load } TODO: If the LEA is used by more than one Load/Store */
1184 /* - Store -> LEA into Store } it might be better to keep the LEA */
1185 ir_node *left = get_irn_n(irn, 0);
1187 if (is_ia32_Lea(left)) {
1188 const ir_edge_t *edge, *ne;
1191 /* merge all Loads/Stores connected to this LEA with the LEA */
1192 foreach_out_edge_safe(left, edge, ne) {
1193 src = get_edge_src_irn(edge);
1195 if (src && (get_edge_src_pos(edge) == 0) && (is_ia32_Ld(src) || is_ia32_St(src) || is_ia32_Store8Bit(src))) {
1196 DBG((dbg, LEVEL_1, "\nmerging %+F into %+F\n", left, irn));
1197 if (! is_ia32_got_lea(src))
1198 merge_loadstore_lea(src, left);
1199 set_ia32_got_lea(src);
1207 * Checks for address mode patterns and performs the
1208 * necessary transformations.
1209 * This function is called by a walker.
1211 static void optimize_am(ir_node *irn, void *env) {
1212 ia32_am_opt_env_t *am_opt_env = env;
1213 ia32_code_gen_t *cg = am_opt_env->cg;
1214 ir_graph *irg = get_irn_irg(irn);
1215 heights_t *h = am_opt_env->h;
1216 ir_node *block, *left, *right;
1217 ir_node *store, *load, *mem_proj;
1218 ir_node *addr_b, *addr_i;
1219 int need_exchange_on_fail = 0;
1220 ia32_am_type_t am_support;
1221 ia32_am_cand_t cand;
1222 ia32_am_cand_t orig_cand;
1224 int source_possible;
1226 if (!is_ia32_irn(irn) || is_ia32_Ld(irn) || is_ia32_St(irn) || is_ia32_Store8Bit(irn))
1228 if (is_ia32_Lea(irn))
1231 am_support = get_ia32_am_support(irn);
1232 block = get_nodes_block(irn);
1234 DBG((dbg, LEVEL_1, "checking for AM\n"));
1236 /* fold following patterns: */
1237 /* - op -> Load into AMop with am_Source */
1239 /* - op is am_Source capable AND */
1240 /* - the Load is only used by this op AND */
1241 /* - the Load is in the same block */
1242 /* - Store -> op -> Load into AMop with am_Dest */
1244 /* - op is am_Dest capable AND */
1245 /* - the Store uses the same address as the Load AND */
1246 /* - the Load is only used by this op AND */
1247 /* - the Load and Store are in the same block AND */
1248 /* - nobody else uses the result of the op */
1249 if (get_ia32_am_support(irn) == ia32_am_None)
1252 cand = is_am_candidate(cg, h, block, irn);
1253 if (cand == IA32_AM_CAND_NONE)
1257 DBG((dbg, LEVEL_1, "\tfound address mode candidate %+F ... ", irn));
1259 left = get_irn_n(irn, 2);
1260 if (get_irn_arity(irn) == 4) {
1261 /* it's an "unary" operation */
1263 assert(cand == IA32_AM_CAND_BOTH);
1265 right = get_irn_n(irn, 3);
1268 dest_possible = am_support & ia32_am_Dest ? 1 : 0;
1269 source_possible = am_support & ia32_am_Source ? 1 : 0;
1271 if (dest_possible) {
1276 /* we should only have 1 user which is a store */
1277 if (ia32_get_irn_n_edges(irn) == 1) {
1278 ir_node *succ = get_edge_src_irn(get_irn_out_edge_first(irn));
1280 if (is_ia32_xStore(succ) || is_ia32_Store(succ)) {
1282 addr_b = get_irn_n(store, 0);
1283 addr_i = get_irn_n(store, 1);
1287 if (store == NULL) {
1292 if (dest_possible) {
1293 /* normalize nodes, we need the interesting load on the left side */
1294 if (cand & IA32_AM_CAND_RIGHT) {
1295 load = get_Proj_pred(right);
1296 if (load_store_addr_is_equal(load, store, addr_b, addr_i)) {
1297 exchange_left_right(irn, &left, &right, 3, 2);
1298 need_exchange_on_fail ^= 1;
1299 if (cand == IA32_AM_CAND_RIGHT)
1300 cand = IA32_AM_CAND_LEFT;
1305 if (dest_possible) {
1306 if(cand & IA32_AM_CAND_LEFT && is_Proj(left)) {
1307 load = get_Proj_pred(left);
1309 #ifndef AGGRESSIVE_AM
1310 /* we have to be the only user of the load */
1311 if (get_irn_n_edges(left) > 1) {
1320 if (dest_possible) {
1321 /* the store has to use the loads memory or the same memory
1323 ir_node *loadmem = get_irn_n(load, 2);
1324 ir_node *storemem = get_irn_n(store, 3);
1325 assert(get_irn_mode(loadmem) == mode_M);
1326 assert(get_irn_mode(storemem) == mode_M);
1327 if(storemem != loadmem || !is_Proj(storemem)
1328 || get_Proj_pred(storemem) != load) {
1333 if (dest_possible) {
1334 /* Compare Load and Store address */
1335 if (!load_store_addr_is_equal(load, store, addr_b, addr_i))
1339 if (dest_possible) {
1340 /* all conditions fullfilled, do the transformation */
1341 assert(cand & IA32_AM_CAND_LEFT);
1343 /* set new base, index and attributes */
1344 set_irn_n(irn, 0, addr_b);
1345 set_irn_n(irn, 1, addr_i);
1346 add_ia32_am_offs_int(irn, get_ia32_am_offs_int(load));
1347 set_ia32_am_scale(irn, get_ia32_am_scale(load));
1348 set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
1349 set_ia32_op_type(irn, ia32_AddrModeD);
1350 set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
1351 set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
1353 set_ia32_am_sc(irn, get_ia32_am_sc(load));
1354 if (is_ia32_am_sc_sign(load))
1355 set_ia32_am_sc_sign(irn);
1357 /* connect to Load memory and disconnect Load */
1358 if (get_irn_arity(irn) == 5) {
1360 set_irn_n(irn, 4, get_irn_n(load, 2));
1361 set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
1364 set_irn_n(irn, 3, get_irn_n(load, 2));
1365 set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
1368 set_irn_mode(irn, mode_M);
1370 /* connect the memory Proj of the Store to the op */
1371 mem_proj = ia32_get_proj_for_mode(store, mode_M);
1372 edges_reroute(mem_proj, irn, irg);
1374 /* clear remat flag */
1375 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1377 try_remove_from_sched(load);
1378 try_remove_from_sched(store);
1379 DBG_OPT_AM_D(load, store, irn);
1381 DB((dbg, LEVEL_1, "merged with %+F and %+F into dest AM\n", load, store));
1382 need_exchange_on_fail = 0;
1383 source_possible = 0;
1386 if (source_possible) {
1387 /* normalize ops, we need the load on the right */
1388 if(cand == IA32_AM_CAND_LEFT) {
1389 if(node_is_ia32_comm(irn)) {
1390 exchange_left_right(irn, &left, &right, 3, 2);
1391 need_exchange_on_fail ^= 1;
1392 cand = IA32_AM_CAND_RIGHT;
1394 source_possible = 0;
1399 if (source_possible) {
1400 /* all conditions fullfilled, do transform */
1401 assert(cand & IA32_AM_CAND_RIGHT);
1402 load = get_Proj_pred(right);
1404 if(get_irn_n_edges(load) > 1) {
1405 source_possible = 0;
1409 if (source_possible) {
1410 addr_b = get_irn_n(load, 0);
1411 addr_i = get_irn_n(load, 1);
1413 /* set new base, index and attributes */
1414 set_irn_n(irn, 0, addr_b);
1415 set_irn_n(irn, 1, addr_i);
1416 add_ia32_am_offs_int(irn, get_ia32_am_offs_int(load));
1417 set_ia32_am_scale(irn, get_ia32_am_scale(load));
1418 set_ia32_am_flavour(irn, get_ia32_am_flavour(load));
1419 set_ia32_op_type(irn, ia32_AddrModeS);
1420 set_ia32_frame_ent(irn, get_ia32_frame_ent(load));
1421 set_ia32_ls_mode(irn, get_ia32_ls_mode(load));
1423 set_ia32_am_sc(irn, get_ia32_am_sc(load));
1424 if (is_ia32_am_sc_sign(load))
1425 set_ia32_am_sc_sign(irn);
1427 /* clear remat flag */
1428 set_ia32_flags(irn, get_ia32_flags(irn) & ~arch_irn_flags_rematerializable);
1430 if (is_ia32_use_frame(load)) {
1431 if(get_ia32_frame_ent(load) == NULL) {
1432 set_ia32_need_stackent(irn);
1434 set_ia32_use_frame(irn);
1437 /* connect to Load memory and disconnect Load */
1438 if (get_irn_arity(irn) == 5) {
1440 set_irn_n(irn, 3, ia32_get_admissible_noreg(cg, irn, 3));
1441 set_irn_n(irn, 4, get_irn_n(load, 2));
1443 assert(get_irn_arity(irn) == 4);
1445 set_irn_n(irn, 2, ia32_get_admissible_noreg(cg, irn, 2));
1446 set_irn_n(irn, 3, get_irn_n(load, 2));
1449 DBG_OPT_AM_S(load, irn);
1451 /* If Load has a memory Proj, connect it to the op */
1452 mem_proj = ia32_get_proj_for_mode(load, mode_M);
1453 if (mem_proj != NULL) {
1455 ir_mode *mode = get_irn_mode(irn);
1457 res_proj = new_rd_Proj(get_irn_dbg_info(irn), irg,
1458 get_nodes_block(irn), new_Unknown(mode_T),
1460 set_irn_mode(irn, mode_T);
1461 edges_reroute(irn, res_proj, irg);
1462 set_Proj_pred(res_proj, irn);
1464 set_Proj_pred(mem_proj, irn);
1465 set_Proj_proj(mem_proj, 1);
1467 if(sched_is_scheduled(irn)) {
1468 sched_add_after(irn, res_proj);
1469 sched_add_after(irn, mem_proj);
1473 if(get_irn_n_edges(load) == 0) {
1474 try_remove_from_sched(load);
1476 need_exchange_on_fail = 0;
1478 DB((dbg, LEVEL_1, "merged with %+F into source AM\n", load));
1481 /* was exchanged but optimize failed: exchange back */
1482 if (need_exchange_on_fail) {
1483 exchange_left_right(irn, &left, &right, 3, 2);
1488 * Performs address mode optimization.
1490 void ia32_optimize_addressmode(ia32_code_gen_t *cg) {
1491 /* if we are supposed to do AM or LEA optimization: recalculate edges */
1492 if (cg->opt & (IA32_OPT_DOAM | IA32_OPT_LEA)) {
1494 edges_deactivate(cg->irg);
1495 edges_activate(cg->irg);
1499 /* no optimizations at all */
1503 /* beware: we cannot optimize LEA and AM in one run because */
1504 /* LEA optimization adds new nodes to the irg which */
1505 /* invalidates the phase data */
1507 if (cg->opt & IA32_OPT_LEA) {
1508 irg_walk_blkwise_graph(cg->irg, NULL, optimize_lea, cg);
1512 be_dump(cg->irg, "-lea", dump_ir_block_graph_sched);
1514 /* hack for now, so these don't get created during optimize, because then
1515 * they will be unknown to the heights module
1517 ia32_new_NoReg_gp(cg);
1518 ia32_new_NoReg_fp(cg);
1519 ia32_new_NoReg_vfp(cg);
1521 if (cg->opt & IA32_OPT_DOAM) {
1522 /* we need height information for am optimization */
1523 heights_t *h = heights_new(cg->irg);
1524 ia32_am_opt_env_t env;
1529 irg_walk_blkwise_graph(cg->irg, NULL, optimize_am, &env);
1535 void ia32_init_optimize(void)
1537 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize");