2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Implements several optimizations for IA32.
23 * @author Matthias Braun, Christian Wuerdig
32 #include "firm_types.h"
44 #include "../benode.h"
45 #include "../besched.h"
46 #include "../bepeephole.h"
48 #include "ia32_new_nodes.h"
49 #include "ia32_optimize.h"
50 #include "bearch_ia32_t.h"
51 #include "gen_ia32_regalloc_if.h"
52 #include "ia32_common_transform.h"
53 #include "ia32_transform.h"
54 #include "ia32_dbg_stat.h"
55 #include "ia32_util.h"
56 #include "ia32_architecture.h"
58 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
60 static ia32_code_gen_t *cg;
62 static void copy_mark(const ir_node *old, ir_node *new)
64 if (is_ia32_is_reload(old))
65 set_ia32_is_reload(new);
66 if (is_ia32_is_spill(old))
67 set_ia32_is_spill(new);
68 if (is_ia32_is_remat(old))
69 set_ia32_is_remat(new);
72 typedef enum produces_flag_t {
79 * Return which usable flag the given node produces
81 * @param node the node to check
82 * @param pn the projection number of the used result
84 static produces_flag_t produces_test_flag(ir_node *node, int pn)
87 const ia32_immediate_attr_t *imm_attr;
89 if (!is_ia32_irn(node))
90 return produces_no_flag;
92 switch (get_ia32_irn_opcode(node)) {
107 assert(n_ia32_ShlD_count == n_ia32_ShrD_count);
108 count = get_irn_n(node, n_ia32_ShlD_count);
109 goto check_shift_amount;
114 assert(n_ia32_Shl_count == n_ia32_Shr_count
115 && n_ia32_Shl_count == n_ia32_Sar_count);
116 count = get_irn_n(node, n_ia32_Shl_count);
118 /* when shift count is zero the flags are not affected, so we can only
119 * do this for constants != 0 */
120 if (!is_ia32_Immediate(count))
121 return produces_no_flag;
123 imm_attr = get_ia32_immediate_attr_const(count);
124 if (imm_attr->symconst != NULL)
125 return produces_no_flag;
126 if ((imm_attr->offset & 0x1f) == 0)
127 return produces_no_flag;
131 return pn == pn_ia32_Mul_res_high ?
132 produces_flag_carry : produces_no_flag;
135 return produces_no_flag;
138 return pn == pn_ia32_res ?
139 produces_flag_zero : produces_no_flag;
143 * Replace Cmp(x, 0) by a Test(x, x)
145 static void peephole_ia32_Cmp(ir_node *const node)
148 ia32_immediate_attr_t const *imm;
154 ia32_attr_t const *attr;
158 arch_register_t const *reg;
159 ir_edge_t const *edge;
160 ir_edge_t const *tmp;
162 if (get_ia32_op_type(node) != ia32_Normal)
165 right = get_irn_n(node, n_ia32_Cmp_right);
166 if (!is_ia32_Immediate(right))
169 imm = get_ia32_immediate_attr_const(right);
170 if (imm->symconst != NULL || imm->offset != 0)
173 dbgi = get_irn_dbg_info(node);
174 block = get_nodes_block(node);
175 noreg = ia32_new_NoReg_gp(cg);
176 nomem = get_irg_no_mem(current_ir_graph);
177 op = get_irn_n(node, n_ia32_Cmp_left);
178 attr = get_irn_generic_attr(node);
179 ins_permuted = attr->data.ins_permuted;
180 cmp_unsigned = attr->data.cmp_unsigned;
182 if (is_ia32_Cmp(node)) {
183 test = new_bd_ia32_Test(dbgi, block, noreg, noreg, nomem,
184 op, op, ins_permuted, cmp_unsigned);
186 test = new_bd_ia32_Test8Bit(dbgi, block, noreg, noreg, nomem,
187 op, op, ins_permuted, cmp_unsigned);
189 set_ia32_ls_mode(test, get_ia32_ls_mode(node));
191 reg = arch_irn_get_register(node, pn_ia32_Cmp_eflags);
192 arch_irn_set_register(test, pn_ia32_Test_eflags, reg);
194 foreach_out_edge_safe(node, edge, tmp) {
195 ir_node *const user = get_edge_src_irn(edge);
198 exchange(user, test);
201 sched_add_before(node, test);
202 copy_mark(node, test);
203 be_peephole_exchange(node, test);
207 * Peephole optimization for Test instructions.
208 * - Remove the Test, if an appropriate flag was produced which is still live
209 * - Change a Test(x, c) to 8Bit, if 0 <= c < 256 (3 byte shorter opcode)
211 static void peephole_ia32_Test(ir_node *node)
213 ir_node *left = get_irn_n(node, n_ia32_Test_left);
214 ir_node *right = get_irn_n(node, n_ia32_Test_right);
216 assert(n_ia32_Test_left == n_ia32_Test8Bit_left
217 && n_ia32_Test_right == n_ia32_Test8Bit_right);
219 if (left == right) { /* we need a test for 0 */
220 ir_node *block = get_nodes_block(node);
221 int pn = pn_ia32_res;
225 const ir_edge_t *edge;
227 if (get_nodes_block(left) != block)
231 pn = get_Proj_proj(left);
232 left = get_Proj_pred(left);
235 /* happens rarely, but if it does code will panic' */
236 if (is_ia32_Unknown_GP(left))
239 /* walk schedule up and abort when we find left or some other node destroys
243 schedpoint = sched_prev(schedpoint);
244 if (schedpoint == left)
246 if (arch_irn_is(schedpoint, modify_flags))
248 if (schedpoint == block)
249 panic("couldn't find left");
252 /* make sure only Lg/Eq tests are used */
253 foreach_out_edge(node, edge) {
254 ir_node *user = get_edge_src_irn(edge);
255 int pnc = get_ia32_condcode(user);
257 if(pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) {
262 switch (produces_test_flag(left, pn)) {
263 case produces_flag_zero:
266 case produces_flag_carry:
267 foreach_out_edge(node, edge) {
268 ir_node *user = get_edge_src_irn(edge);
269 int pnc = get_ia32_condcode(user);
272 case pn_Cmp_Eq: pnc = pn_Cmp_Ge | ia32_pn_Cmp_unsigned; break;
273 case pn_Cmp_Lg: pnc = pn_Cmp_Lt | ia32_pn_Cmp_unsigned; break;
274 default: panic("unexpected pn");
276 set_ia32_condcode(user, pnc);
284 if (get_irn_mode(left) != mode_T) {
285 set_irn_mode(left, mode_T);
287 /* If there are other users, reroute them to result proj */
288 if (get_irn_n_edges(left) != 2) {
289 ir_node *res = new_r_Proj(block, left, mode_Iu, pn_ia32_res);
291 edges_reroute(left, res, current_ir_graph);
292 /* Reattach the result proj to left */
293 set_Proj_pred(res, left);
297 flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode;
298 flags_proj = new_r_Proj(block, left, flags_mode, pn_ia32_flags);
299 arch_set_irn_register(flags_proj, &ia32_flags_regs[REG_EFLAGS]);
301 assert(get_irn_mode(node) != mode_T);
303 be_peephole_exchange(node, flags_proj);
304 } else if (is_ia32_Immediate(right)) {
305 ia32_immediate_attr_t const *const imm = get_ia32_immediate_attr_const(right);
308 /* A test with a symconst is rather strange, but better safe than sorry */
309 if (imm->symconst != NULL)
312 offset = imm->offset;
313 if (get_ia32_op_type(node) == ia32_AddrModeS) {
314 ia32_attr_t *const attr = get_irn_generic_attr(node);
316 if ((offset & 0xFFFFFF00) == 0) {
317 /* attr->am_offs += 0; */
318 } else if ((offset & 0xFFFF00FF) == 0) {
319 ir_node *imm = ia32_create_Immediate(NULL, 0, offset >> 8);
320 set_irn_n(node, n_ia32_Test_right, imm);
322 } else if ((offset & 0xFF00FFFF) == 0) {
323 ir_node *imm = ia32_create_Immediate(NULL, 0, offset >> 16);
324 set_irn_n(node, n_ia32_Test_right, imm);
326 } else if ((offset & 0x00FFFFFF) == 0) {
327 ir_node *imm = ia32_create_Immediate(NULL, 0, offset >> 24);
328 set_irn_n(node, n_ia32_Test_right, imm);
333 } else if (offset < 256) {
334 arch_register_t const* const reg = arch_get_irn_register(left);
336 if (reg != &ia32_gp_regs[REG_EAX] &&
337 reg != &ia32_gp_regs[REG_EBX] &&
338 reg != &ia32_gp_regs[REG_ECX] &&
339 reg != &ia32_gp_regs[REG_EDX]) {
346 /* Technically we should build a Test8Bit because of the register
347 * constraints, but nobody changes registers at this point anymore. */
348 set_ia32_ls_mode(node, mode_Bu);
353 * AMD Athlon works faster when RET is not destination of
354 * conditional jump or directly preceded by other jump instruction.
355 * Can be avoided by placing a Rep prefix before the return.
357 static void peephole_ia32_Return(ir_node *node) {
358 ir_node *block, *irn;
360 if (!ia32_cg_config.use_pad_return)
363 block = get_nodes_block(node);
365 /* check if this return is the first on the block */
366 sched_foreach_reverse_from(node, irn) {
367 switch (get_irn_opcode(irn)) {
369 /* the return node itself, ignore */
374 /* ignore no code generated */
377 /* arg, IncSP 0 nodes might occur, ignore these */
378 if (be_get_IncSP_offset(irn) == 0)
388 /* ensure, that the 3 byte return is generated */
389 be_Return_set_emit_pop(node, 1);
392 /* only optimize up to 48 stores behind IncSPs */
393 #define MAXPUSH_OPTIMIZE 48
396 * Tries to create Push's from IncSP, Store combinations.
397 * The Stores are replaced by Push's, the IncSP is modified
398 * (possibly into IncSP 0, but not removed).
400 static void peephole_IncSP_Store_to_push(ir_node *irn)
406 ir_node *stores[MAXPUSH_OPTIMIZE];
411 ir_node *first_push = NULL;
412 ir_edge_t const *edge;
413 ir_edge_t const *next;
415 memset(stores, 0, sizeof(stores));
417 assert(be_is_IncSP(irn));
419 inc_ofs = be_get_IncSP_offset(irn);
424 * We first walk the schedule after the IncSP node as long as we find
425 * suitable Stores that could be transformed to a Push.
426 * We save them into the stores array which is sorted by the frame offset/4
427 * attached to the node
430 for (node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
435 /* it has to be a Store */
436 if (!is_ia32_Store(node))
439 /* it has to use our sp value */
440 if (get_irn_n(node, n_ia32_base) != irn)
442 /* Store has to be attached to NoMem */
443 mem = get_irn_n(node, n_ia32_mem);
447 /* unfortunately we can't support the full AMs possible for push at the
448 * moment. TODO: fix this */
449 if (!is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
452 offset = get_ia32_am_offs_int(node);
453 /* we should NEVER access uninitialized stack BELOW the current SP */
456 /* storing at half-slots is bad */
457 if ((offset & 3) != 0)
460 if (inc_ofs - 4 < offset || offset >= MAXPUSH_OPTIMIZE * 4)
462 storeslot = offset >> 2;
464 /* storing into the same slot twice is bad (and shouldn't happen...) */
465 if (stores[storeslot] != NULL)
468 stores[storeslot] = node;
469 if (storeslot > maxslot)
475 for (i = -1; i < maxslot; ++i) {
476 if (stores[i + 1] == NULL)
480 /* walk through the Stores and create Pushs for them */
481 block = get_nodes_block(irn);
482 spmode = get_irn_mode(irn);
484 for (; i >= 0; --i) {
485 const arch_register_t *spreg;
487 ir_node *val, *mem, *mem_proj;
488 ir_node *store = stores[i];
489 ir_node *noreg = ia32_new_NoReg_gp(cg);
491 val = get_irn_n(store, n_ia32_unary_op);
492 mem = get_irn_n(store, n_ia32_mem);
493 spreg = arch_get_irn_register(curr_sp);
495 push = new_bd_ia32_Push(get_irn_dbg_info(store), block, noreg, noreg, mem, val, curr_sp);
496 copy_mark(store, push);
498 if (first_push == NULL)
501 sched_add_after(skip_Proj(curr_sp), push);
503 /* create stackpointer Proj */
504 curr_sp = new_r_Proj(block, push, spmode, pn_ia32_Push_stack);
505 arch_set_irn_register(curr_sp, spreg);
507 /* create memory Proj */
508 mem_proj = new_r_Proj(block, push, mode_M, pn_ia32_Push_M);
510 /* use the memproj now */
511 be_peephole_exchange(store, mem_proj);
516 foreach_out_edge_safe(irn, edge, next) {
517 ir_node *const src = get_edge_src_irn(edge);
518 int const pos = get_edge_src_pos(edge);
520 if (src == first_push)
523 set_irn_n(src, pos, curr_sp);
526 be_set_IncSP_offset(irn, inc_ofs);
530 static void peephole_store_incsp(ir_node *store)
541 ir_node *am_base = get_irn_n(store, n_ia32_Store_base);
542 if (!be_is_IncSP(am_base)
543 || get_nodes_block(am_base) != get_nodes_block(store))
545 mem = get_irn_n(store, n_ia32_Store_mem);
546 if (!is_ia32_NoReg_GP(get_irn_n(store, n_ia32_Store_index))
550 int incsp_offset = be_get_IncSP_offset(am_base);
551 if (incsp_offset <= 0)
554 /* we have to be at offset 0 */
555 int my_offset = get_ia32_am_offs_int(store);
556 if (my_offset != 0) {
557 /* TODO here: find out wether there is a store with offset 0 before
558 * us and wether we can move it down to our place */
561 ir_mode *ls_mode = get_ia32_ls_mode(store);
562 int my_store_size = get_mode_size_bytes(ls_mode);
564 if (my_offset + my_store_size > incsp_offset)
567 /* correctness checking:
568 - noone else must write to that stackslot
569 (because after translation incsp won't allocate it anymore)
571 sched_foreach_reverse_from(store, node) {
577 /* make sure noone else can use the space on the stack */
578 arity = get_irn_arity(node);
579 for (i = 0; i < arity; ++i) {
580 ir_node *pred = get_irn_n(node, i);
584 if (i == n_ia32_base &&
585 (get_ia32_op_type(node) == ia32_AddrModeS
586 || get_ia32_op_type(node) == ia32_AddrModeD)) {
587 int node_offset = get_ia32_am_offs_int(node);
588 ir_mode *node_ls_mode = get_ia32_ls_mode(node);
589 int node_size = get_mode_size_bytes(node_ls_mode);
590 /* overlapping with our position? abort */
591 if (node_offset < my_offset + my_store_size
592 && node_offset + node_size >= my_offset)
594 /* otherwise it's fine */
598 /* strange use of esp: abort */
603 /* all ok, change to push */
604 dbgi = get_irn_dbg_info(store);
605 block = get_nodes_block(store);
606 noreg = ia32_new_NoReg_gp(cg);
607 val = get_irn_n(store, n_ia32_Store_val);
609 push = new_bd_ia32_Push(dbgi, block, noreg, noreg, mem,
611 create_push(dbgi, current_ir_graph, block, am_base, store);
616 * Return true if a mode can be stored in the GP register set
618 static inline int mode_needs_gp_reg(ir_mode *mode) {
619 if (mode == mode_fpcw)
621 if (get_mode_size_bits(mode) > 32)
623 return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b;
627 * Tries to create Pops from Load, IncSP combinations.
628 * The Loads are replaced by Pops, the IncSP is modified
629 * (possibly into IncSP 0, but not removed).
631 static void peephole_Load_IncSP_to_pop(ir_node *irn)
633 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
634 int i, maxslot, inc_ofs, ofs;
635 ir_node *node, *pred_sp, *block;
636 ir_node *loads[MAXPUSH_OPTIMIZE];
638 unsigned regmask = 0;
639 unsigned copymask = ~0;
641 memset(loads, 0, sizeof(loads));
642 assert(be_is_IncSP(irn));
644 inc_ofs = -be_get_IncSP_offset(irn);
649 * We first walk the schedule before the IncSP node as long as we find
650 * suitable Loads that could be transformed to a Pop.
651 * We save them into the stores array which is sorted by the frame offset/4
652 * attached to the node
655 pred_sp = be_get_IncSP_pred(irn);
656 for (node = sched_prev(irn); !sched_is_end(node); node = sched_prev(node)) {
659 const arch_register_t *sreg, *dreg;
661 /* it has to be a Load */
662 if (!is_ia32_Load(node)) {
663 if (be_is_Copy(node)) {
664 if (!mode_needs_gp_reg(get_irn_mode(node))) {
665 /* not a GP copy, ignore */
668 dreg = arch_get_irn_register(node);
669 sreg = arch_get_irn_register(be_get_Copy_op(node));
670 if (regmask & copymask & (1 << sreg->index)) {
673 if (regmask & copymask & (1 << dreg->index)) {
676 /* we CAN skip Copies if neither the destination nor the source
677 * is not in our regmask, ie none of our future Pop will overwrite it */
678 regmask |= (1 << dreg->index) | (1 << sreg->index);
679 copymask &= ~((1 << dreg->index) | (1 << sreg->index));
685 /* we can handle only GP loads */
686 if (!mode_needs_gp_reg(get_ia32_ls_mode(node)))
689 /* it has to use our predecessor sp value */
690 if (get_irn_n(node, n_ia32_base) != pred_sp) {
691 /* it would be ok if this load does not use a Pop result,
692 * but we do not check this */
696 /* should have NO index */
697 if (!is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
700 offset = get_ia32_am_offs_int(node);
701 /* we should NEVER access uninitialized stack BELOW the current SP */
704 /* storing at half-slots is bad */
705 if ((offset & 3) != 0)
708 if (offset < 0 || offset >= MAXPUSH_OPTIMIZE * 4)
710 /* ignore those outside the possible windows */
711 if (offset > inc_ofs - 4)
713 loadslot = offset >> 2;
715 /* loading from the same slot twice is bad (and shouldn't happen...) */
716 if (loads[loadslot] != NULL)
719 dreg = arch_irn_get_register(node, pn_ia32_Load_res);
720 if (regmask & (1 << dreg->index)) {
721 /* this register is already used */
724 regmask |= 1 << dreg->index;
726 loads[loadslot] = node;
727 if (loadslot > maxslot)
734 /* find the first slot */
735 for (i = maxslot; i >= 0; --i) {
736 ir_node *load = loads[i];
742 ofs = inc_ofs - (maxslot + 1) * 4;
745 /* create a new IncSP if needed */
746 block = get_nodes_block(irn);
749 pred_sp = be_new_IncSP(esp, block, pred_sp, -inc_ofs, be_get_IncSP_align(irn));
750 sched_add_before(irn, pred_sp);
753 /* walk through the Loads and create Pops for them */
754 for (++i; i <= maxslot; ++i) {
755 ir_node *load = loads[i];
757 const ir_edge_t *edge, *tmp;
758 const arch_register_t *reg;
760 mem = get_irn_n(load, n_ia32_mem);
761 reg = arch_irn_get_register(load, pn_ia32_Load_res);
763 pop = new_bd_ia32_Pop(get_irn_dbg_info(load), block, mem, pred_sp);
764 arch_irn_set_register(pop, pn_ia32_Load_res, reg);
766 copy_mark(load, pop);
768 /* create stackpointer Proj */
769 pred_sp = new_r_Proj(block, pop, mode_Iu, pn_ia32_Pop_stack);
770 arch_set_irn_register(pred_sp, esp);
772 sched_add_before(irn, pop);
775 foreach_out_edge_safe(load, edge, tmp) {
776 ir_node *proj = get_edge_src_irn(edge);
778 set_Proj_pred(proj, pop);
781 /* we can remove the Load now */
786 be_set_IncSP_offset(irn, -ofs);
787 be_set_IncSP_pred(irn, pred_sp);
792 * Find a free GP register if possible, else return NULL.
794 static const arch_register_t *get_free_gp_reg(void)
798 for(i = 0; i < N_ia32_gp_REGS; ++i) {
799 const arch_register_t *reg = &ia32_gp_regs[i];
800 if(arch_register_type_is(reg, ignore))
803 if(be_peephole_get_value(CLASS_ia32_gp, i) == NULL)
804 return &ia32_gp_regs[i];
811 * Creates a Pop instruction before the given schedule point.
813 * @param dbgi debug info
814 * @param block the block
815 * @param stack the previous stack value
816 * @param schedpoint the new node is added before this node
817 * @param reg the register to pop
819 * @return the new stack value
821 static ir_node *create_pop(dbg_info *dbgi, ir_node *block,
822 ir_node *stack, ir_node *schedpoint,
823 const arch_register_t *reg)
825 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
831 pop = new_bd_ia32_Pop(dbgi, block, new_NoMem(), stack);
833 stack = new_r_Proj(block, pop, mode_Iu, pn_ia32_Pop_stack);
834 arch_set_irn_register(stack, esp);
835 val = new_r_Proj(block, pop, mode_Iu, pn_ia32_Pop_res);
836 arch_set_irn_register(val, reg);
838 sched_add_before(schedpoint, pop);
841 keep = be_new_Keep(block, 1, in);
842 sched_add_before(schedpoint, keep);
848 * Creates a Push instruction before the given schedule point.
850 * @param dbgi debug info
851 * @param block the block
852 * @param stack the previous stack value
853 * @param schedpoint the new node is added before this node
854 * @param reg the register to pop
856 * @return the new stack value
858 static ir_node *create_push(dbg_info *dbgi, ir_node *block,
859 ir_node *stack, ir_node *schedpoint)
861 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
863 ir_node *val = ia32_new_Unknown_gp(cg);
864 ir_node *noreg = ia32_new_NoReg_gp(cg);
865 ir_node *nomem = new_NoMem();
866 ir_node *push = new_bd_ia32_Push(dbgi, block, noreg, noreg, nomem, val, stack);
867 sched_add_before(schedpoint, push);
869 stack = new_r_Proj(block, push, mode_Iu, pn_ia32_Push_stack);
870 arch_set_irn_register(stack, esp);
876 * Optimize an IncSp by replacing it with Push/Pop.
878 static void peephole_be_IncSP(ir_node *node)
880 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
881 const arch_register_t *reg;
887 /* first optimize incsp->incsp combinations */
888 node = be_peephole_IncSP_IncSP(node);
890 /* transform IncSP->Store combinations to Push where possible */
891 peephole_IncSP_Store_to_push(node);
893 /* transform Load->IncSP combinations to Pop where possible */
894 peephole_Load_IncSP_to_pop(node);
896 if (arch_get_irn_register(node) != esp)
899 /* replace IncSP -4 by Pop freereg when possible */
900 offset = be_get_IncSP_offset(node);
901 if ((offset != -8 || ia32_cg_config.use_add_esp_8) &&
902 (offset != -4 || ia32_cg_config.use_add_esp_4) &&
903 (offset != +4 || ia32_cg_config.use_sub_esp_4) &&
904 (offset != +8 || ia32_cg_config.use_sub_esp_8))
908 /* we need a free register for pop */
909 reg = get_free_gp_reg();
913 dbgi = get_irn_dbg_info(node);
914 block = get_nodes_block(node);
915 stack = be_get_IncSP_pred(node);
917 stack = create_pop(dbgi, block, stack, node, reg);
920 stack = create_pop(dbgi, block, stack, node, reg);
923 dbgi = get_irn_dbg_info(node);
924 block = get_nodes_block(node);
925 stack = be_get_IncSP_pred(node);
926 stack = create_push(dbgi, block, stack, node);
929 stack = create_push(dbgi, block, stack, node);
933 be_peephole_exchange(node, stack);
937 * Peephole optimisation for ia32_Const's
939 static void peephole_ia32_Const(ir_node *node)
941 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
942 const arch_register_t *reg;
947 /* try to transform a mov 0, reg to xor reg reg */
948 if (attr->offset != 0 || attr->symconst != NULL)
950 if (ia32_cg_config.use_mov_0)
952 /* xor destroys the flags, so no-one must be using them */
953 if (be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
956 reg = arch_get_irn_register(node);
957 assert(be_peephole_get_reg_value(reg) == NULL);
959 /* create xor(produceval, produceval) */
960 block = get_nodes_block(node);
961 dbgi = get_irn_dbg_info(node);
962 xor = new_bd_ia32_Xor0(dbgi, block);
963 arch_set_irn_register(xor, reg);
965 sched_add_before(node, xor);
967 copy_mark(node, xor);
968 be_peephole_exchange(node, xor);
971 static inline int is_noreg(ia32_code_gen_t *cg, const ir_node *node)
973 return node == cg->noreg_gp;
976 static ir_node *create_immediate_from_int(int val)
978 ir_graph *irg = current_ir_graph;
979 ir_node *start_block = get_irg_start_block(irg);
981 = new_bd_ia32_Immediate(NULL, start_block, NULL, 0, 0, val);
982 arch_set_irn_register(immediate, &ia32_gp_regs[REG_GP_NOREG]);
987 static ir_node *create_immediate_from_am(const ir_node *node)
989 ir_node *block = get_nodes_block(node);
990 int offset = get_ia32_am_offs_int(node);
991 int sc_sign = is_ia32_am_sc_sign(node);
992 const ia32_attr_t *attr = get_ia32_attr_const(node);
993 int sc_no_pic_adjust = attr->data.am_sc_no_pic_adjust;
994 ir_entity *entity = get_ia32_am_sc(node);
997 res = new_bd_ia32_Immediate(NULL, block, entity, sc_sign, sc_no_pic_adjust,
999 arch_set_irn_register(res, &ia32_gp_regs[REG_GP_NOREG]);
1003 static int is_am_one(const ir_node *node)
1005 int offset = get_ia32_am_offs_int(node);
1006 ir_entity *entity = get_ia32_am_sc(node);
1008 return offset == 1 && entity == NULL;
1011 static int is_am_minus_one(const ir_node *node)
1013 int offset = get_ia32_am_offs_int(node);
1014 ir_entity *entity = get_ia32_am_sc(node);
1016 return offset == -1 && entity == NULL;
1020 * Transforms a LEA into an Add or SHL if possible.
1022 static void peephole_ia32_Lea(ir_node *node)
1026 const arch_register_t *base_reg;
1027 const arch_register_t *index_reg;
1028 const arch_register_t *out_reg;
1039 assert(is_ia32_Lea(node));
1041 /* we can only do this if it is allowed to clobber the flags */
1042 if(be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
1045 base = get_irn_n(node, n_ia32_Lea_base);
1046 index = get_irn_n(node, n_ia32_Lea_index);
1048 if(is_noreg(cg, base)) {
1052 base_reg = arch_get_irn_register(base);
1054 if(is_noreg(cg, index)) {
1058 index_reg = arch_get_irn_register(index);
1061 if(base == NULL && index == NULL) {
1062 /* we shouldn't construct these in the first place... */
1063 #ifdef DEBUG_libfirm
1064 ir_fprintf(stderr, "Optimisation warning: found immediate only lea\n");
1069 out_reg = arch_get_irn_register(node);
1070 scale = get_ia32_am_scale(node);
1071 assert(!is_ia32_need_stackent(node) || get_ia32_frame_ent(node) != NULL);
1072 /* check if we have immediates values (frame entities should already be
1073 * expressed in the offsets) */
1074 if(get_ia32_am_offs_int(node) != 0 || get_ia32_am_sc(node) != NULL) {
1080 /* we can transform leas where the out register is the same as either the
1081 * base or index register back to an Add or Shl */
1082 if(out_reg == base_reg) {
1084 #ifdef DEBUG_libfirm
1085 if(!has_immediates) {
1086 ir_fprintf(stderr, "Optimisation warning: found lea which is "
1091 goto make_add_immediate;
1093 if(scale == 0 && !has_immediates) {
1098 /* can't create an add */
1100 } else if(out_reg == index_reg) {
1102 if(has_immediates && scale == 0) {
1104 goto make_add_immediate;
1105 } else if(!has_immediates && scale > 0) {
1107 op2 = create_immediate_from_int(scale);
1109 } else if(!has_immediates) {
1110 #ifdef DEBUG_libfirm
1111 ir_fprintf(stderr, "Optimisation warning: found lea which is "
1115 } else if(scale == 0 && !has_immediates) {
1120 /* can't create an add */
1123 /* can't create an add */
1128 if(ia32_cg_config.use_incdec) {
1129 if(is_am_one(node)) {
1130 dbgi = get_irn_dbg_info(node);
1131 block = get_nodes_block(node);
1132 res = new_bd_ia32_Inc(dbgi, block, op1);
1133 arch_set_irn_register(res, out_reg);
1136 if(is_am_minus_one(node)) {
1137 dbgi = get_irn_dbg_info(node);
1138 block = get_nodes_block(node);
1139 res = new_bd_ia32_Dec(dbgi, block, op1);
1140 arch_set_irn_register(res, out_reg);
1144 op2 = create_immediate_from_am(node);
1147 dbgi = get_irn_dbg_info(node);
1148 block = get_nodes_block(node);
1149 noreg = ia32_new_NoReg_gp(cg);
1150 nomem = new_NoMem();
1151 res = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, op1, op2);
1152 arch_set_irn_register(res, out_reg);
1153 set_ia32_commutative(res);
1157 dbgi = get_irn_dbg_info(node);
1158 block = get_nodes_block(node);
1159 noreg = ia32_new_NoReg_gp(cg);
1160 nomem = new_NoMem();
1161 res = new_bd_ia32_Shl(dbgi, block, op1, op2);
1162 arch_set_irn_register(res, out_reg);
1166 SET_IA32_ORIG_NODE(res, node);
1168 /* add new ADD/SHL to schedule */
1169 DBG_OPT_LEA2ADD(node, res);
1171 /* exchange the Add and the LEA */
1172 sched_add_before(node, res);
1173 copy_mark(node, res);
1174 be_peephole_exchange(node, res);
1178 * Split a Imul mem, imm into a Load mem and Imul reg, imm if possible.
1180 static void peephole_ia32_Imul_split(ir_node *imul)
1182 const ir_node *right = get_irn_n(imul, n_ia32_IMul_right);
1183 const arch_register_t *reg;
1186 if (!is_ia32_Immediate(right) || get_ia32_op_type(imul) != ia32_AddrModeS) {
1187 /* no memory, imm form ignore */
1190 /* we need a free register */
1191 reg = get_free_gp_reg();
1195 /* fine, we can rebuild it */
1196 res = turn_back_am(imul);
1197 arch_set_irn_register(res, reg);
1201 * Replace xorps r,r and xorpd r,r by pxor r,r
1203 static void peephole_ia32_xZero(ir_node *xor)
1205 set_irn_op(xor, op_ia32_xPzero);
1209 * Replace 16bit sign extension from ax to eax by shorter cwtl
1211 static void peephole_ia32_Conv_I2I(ir_node *node)
1213 const arch_register_t *eax = &ia32_gp_regs[REG_EAX];
1214 ir_mode *smaller_mode = get_ia32_ls_mode(node);
1215 ir_node *val = get_irn_n(node, n_ia32_Conv_I2I_val);
1220 if (get_mode_size_bits(smaller_mode) != 16 ||
1221 !mode_is_signed(smaller_mode) ||
1222 eax != arch_get_irn_register(val) ||
1223 eax != arch_irn_get_register(node, pn_ia32_Conv_I2I_res))
1226 dbgi = get_irn_dbg_info(node);
1227 block = get_nodes_block(node);
1228 cwtl = new_bd_ia32_Cwtl(dbgi, block, val);
1229 arch_set_irn_register(cwtl, eax);
1230 sched_add_before(node, cwtl);
1231 be_peephole_exchange(node, cwtl);
1235 * Register a peephole optimisation function.
1237 static void register_peephole_optimisation(ir_op *op, peephole_opt_func func)
1239 assert(op->ops.generic == NULL);
1240 op->ops.generic = (op_func)func;
1243 /* Perform peephole-optimizations. */
1244 void ia32_peephole_optimization(ia32_code_gen_t *new_cg)
1248 /* register peephole optimisations */
1249 clear_irp_opcodes_generic_func();
1250 register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
1251 register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
1252 register_peephole_optimisation(op_ia32_Lea, peephole_ia32_Lea);
1253 register_peephole_optimisation(op_ia32_Cmp, peephole_ia32_Cmp);
1254 register_peephole_optimisation(op_ia32_Cmp8Bit, peephole_ia32_Cmp);
1255 register_peephole_optimisation(op_ia32_Test, peephole_ia32_Test);
1256 register_peephole_optimisation(op_ia32_Test8Bit, peephole_ia32_Test);
1257 register_peephole_optimisation(op_be_Return, peephole_ia32_Return);
1258 if (! ia32_cg_config.use_imul_mem_imm32)
1259 register_peephole_optimisation(op_ia32_IMul, peephole_ia32_Imul_split);
1260 if (ia32_cg_config.use_pxor)
1261 register_peephole_optimisation(op_ia32_xZero, peephole_ia32_xZero);
1262 if (ia32_cg_config.use_short_sex_eax)
1263 register_peephole_optimisation(op_ia32_Conv_I2I, peephole_ia32_Conv_I2I);
1265 be_peephole_opt(cg->birg);
1269 * Removes node from schedule if it is not used anymore. If irn is a mode_T node
1270 * all it's Projs are removed as well.
1271 * @param irn The irn to be removed from schedule
1273 static inline void try_kill(ir_node *node)
1275 if(get_irn_mode(node) == mode_T) {
1276 const ir_edge_t *edge, *next;
1277 foreach_out_edge_safe(node, edge, next) {
1278 ir_node *proj = get_edge_src_irn(edge);
1283 if(get_irn_n_edges(node) != 0)
1286 if (sched_is_scheduled(node)) {
1293 static void optimize_conv_store(ir_node *node)
1298 ir_mode *store_mode;
1300 if(!is_ia32_Store(node) && !is_ia32_Store8Bit(node))
1303 assert(n_ia32_Store_val == n_ia32_Store8Bit_val);
1304 pred_proj = get_irn_n(node, n_ia32_Store_val);
1305 if(is_Proj(pred_proj)) {
1306 pred = get_Proj_pred(pred_proj);
1310 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1312 if(get_ia32_op_type(pred) != ia32_Normal)
1315 /* the store only stores the lower bits, so we only need the conv
1316 * it it shrinks the mode */
1317 conv_mode = get_ia32_ls_mode(pred);
1318 store_mode = get_ia32_ls_mode(node);
1319 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode))
1322 set_irn_n(node, n_ia32_Store_val, get_irn_n(pred, n_ia32_Conv_I2I_val));
1323 if(get_irn_n_edges(pred_proj) == 0) {
1324 kill_node(pred_proj);
1325 if(pred != pred_proj)
1330 static void optimize_load_conv(ir_node *node)
1332 ir_node *pred, *predpred;
1336 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1339 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
1340 pred = get_irn_n(node, n_ia32_Conv_I2I_val);
1344 predpred = get_Proj_pred(pred);
1345 if(!is_ia32_Load(predpred))
1348 /* the load is sign extending the upper bits, so we only need the conv
1349 * if it shrinks the mode */
1350 load_mode = get_ia32_ls_mode(predpred);
1351 conv_mode = get_ia32_ls_mode(node);
1352 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(load_mode))
1355 if(get_mode_sign(conv_mode) != get_mode_sign(load_mode)) {
1356 /* change the load if it has only 1 user */
1357 if(get_irn_n_edges(pred) == 1) {
1359 if(get_mode_sign(conv_mode)) {
1360 newmode = find_signed_mode(load_mode);
1362 newmode = find_unsigned_mode(load_mode);
1364 assert(newmode != NULL);
1365 set_ia32_ls_mode(predpred, newmode);
1367 /* otherwise we have to keep the conv */
1373 exchange(node, pred);
1376 static void optimize_conv_conv(ir_node *node)
1378 ir_node *pred_proj, *pred, *result_conv;
1379 ir_mode *pred_mode, *conv_mode;
1383 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1386 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
1387 pred_proj = get_irn_n(node, n_ia32_Conv_I2I_val);
1388 if(is_Proj(pred_proj))
1389 pred = get_Proj_pred(pred_proj);
1393 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1396 /* we know that after a conv, the upper bits are sign extended
1397 * so we only need the 2nd conv if it shrinks the mode */
1398 conv_mode = get_ia32_ls_mode(node);
1399 conv_mode_bits = get_mode_size_bits(conv_mode);
1400 pred_mode = get_ia32_ls_mode(pred);
1401 pred_mode_bits = get_mode_size_bits(pred_mode);
1403 if(conv_mode_bits == pred_mode_bits
1404 && get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
1405 result_conv = pred_proj;
1406 } else if(conv_mode_bits <= pred_mode_bits) {
1407 /* if 2nd conv is smaller then first conv, then we can always take the
1409 if(get_irn_n_edges(pred_proj) == 1) {
1410 result_conv = pred_proj;
1411 set_ia32_ls_mode(pred, conv_mode);
1413 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
1414 if (get_mode_size_bits(conv_mode) == 8) {
1415 set_irn_op(pred, op_ia32_Conv_I2I8Bit);
1416 set_ia32_in_req_all(pred, get_ia32_in_req_all(node));
1419 /* we don't want to end up with 2 loads, so we better do nothing */
1420 if(get_irn_mode(pred) == mode_T) {
1424 result_conv = exact_copy(pred);
1425 set_ia32_ls_mode(result_conv, conv_mode);
1427 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
1428 if (get_mode_size_bits(conv_mode) == 8) {
1429 set_irn_op(result_conv, op_ia32_Conv_I2I8Bit);
1430 set_ia32_in_req_all(result_conv, get_ia32_in_req_all(node));
1434 /* if both convs have the same sign, then we can take the smaller one */
1435 if(get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
1436 result_conv = pred_proj;
1438 /* no optimisation possible if smaller conv is sign-extend */
1439 if(mode_is_signed(pred_mode)) {
1442 /* we can take the smaller conv if it is unsigned */
1443 result_conv = pred_proj;
1447 /* Some user (like Phis) won't be happy if we change the mode. */
1448 set_irn_mode(result_conv, get_irn_mode(node));
1451 exchange(node, result_conv);
1453 if(get_irn_n_edges(pred_proj) == 0) {
1454 kill_node(pred_proj);
1455 if(pred != pred_proj)
1458 optimize_conv_conv(result_conv);
1461 static void optimize_node(ir_node *node, void *env)
1465 optimize_load_conv(node);
1466 optimize_conv_store(node);
1467 optimize_conv_conv(node);
1471 * Performs conv and address mode optimization.
1473 void ia32_optimize_graph(ia32_code_gen_t *cg)
1475 irg_walk_blkwise_graph(cg->irg, NULL, optimize_node, cg);
1478 be_dump(cg->irg, "-opt", dump_ir_block_graph_sched);
1481 void ia32_init_optimize(void)
1483 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize");