2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Implements several optimizations for IA32.
23 * @author Matthias Braun, Christian Wuerdig
32 #include "firm_types.h"
47 #include "bepeephole.h"
49 #include "ia32_new_nodes.h"
50 #include "ia32_optimize.h"
51 #include "bearch_ia32_t.h"
52 #include "gen_ia32_regalloc_if.h"
53 #include "ia32_common_transform.h"
54 #include "ia32_transform.h"
55 #include "ia32_dbg_stat.h"
56 #include "ia32_architecture.h"
58 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
60 static void copy_mark(const ir_node *old, ir_node *newn)
62 if (is_ia32_is_reload(old))
63 set_ia32_is_reload(newn);
64 if (is_ia32_is_spill(old))
65 set_ia32_is_spill(newn);
66 if (is_ia32_is_remat(old))
67 set_ia32_is_remat(newn);
70 typedef enum produces_flag_t {
77 * Return which usable flag the given node produces
79 * @param node the node to check
80 * @param pn the projection number of the used result
82 static produces_flag_t produces_test_flag(ir_node *node, int pn)
85 const ia32_immediate_attr_t *imm_attr;
87 if (!is_ia32_irn(node))
88 return produces_no_flag;
90 switch (get_ia32_irn_opcode(node)) {
105 assert((int)n_ia32_ShlD_count == (int)n_ia32_ShrD_count);
106 count = get_irn_n(node, n_ia32_ShlD_count);
107 goto check_shift_amount;
112 assert((int)n_ia32_Shl_count == (int)n_ia32_Shr_count
113 && (int)n_ia32_Shl_count == (int)n_ia32_Sar_count);
114 count = get_irn_n(node, n_ia32_Shl_count);
116 /* when shift count is zero the flags are not affected, so we can only
117 * do this for constants != 0 */
118 if (!is_ia32_Immediate(count))
119 return produces_no_flag;
121 imm_attr = get_ia32_immediate_attr_const(count);
122 if (imm_attr->symconst != NULL)
123 return produces_no_flag;
124 if ((imm_attr->offset & 0x1f) == 0)
125 return produces_no_flag;
129 return pn == pn_ia32_Mul_res_high ?
130 produces_flag_carry : produces_no_flag;
133 return produces_no_flag;
136 return pn == pn_ia32_res ?
137 produces_flag_zero : produces_no_flag;
141 * Replace Cmp(x, 0) by a Test(x, x)
143 static void peephole_ia32_Cmp(ir_node *const node)
147 ia32_immediate_attr_t const *imm;
153 ia32_attr_t const *attr;
156 arch_register_t const *reg;
157 ir_edge_t const *edge;
158 ir_edge_t const *tmp;
160 if (get_ia32_op_type(node) != ia32_Normal)
163 right = get_irn_n(node, n_ia32_Cmp_right);
164 if (!is_ia32_Immediate(right))
167 imm = get_ia32_immediate_attr_const(right);
168 if (imm->symconst != NULL || imm->offset != 0)
171 dbgi = get_irn_dbg_info(node);
172 irg = get_irn_irg(node);
173 block = get_nodes_block(node);
174 noreg = ia32_new_NoReg_gp(irg);
175 nomem = get_irg_no_mem(current_ir_graph);
176 op = get_irn_n(node, n_ia32_Cmp_left);
177 attr = get_ia32_attr(node);
178 ins_permuted = attr->data.ins_permuted;
180 if (is_ia32_Cmp(node)) {
181 test = new_bd_ia32_Test(dbgi, block, noreg, noreg, nomem,
182 op, op, ins_permuted);
184 test = new_bd_ia32_Test8Bit(dbgi, block, noreg, noreg, nomem,
185 op, op, ins_permuted);
187 set_ia32_ls_mode(test, get_ia32_ls_mode(node));
189 reg = arch_get_irn_register_out(node, pn_ia32_Cmp_eflags);
190 arch_set_irn_register_out(test, pn_ia32_Test_eflags, reg);
192 foreach_out_edge_safe(node, edge, tmp) {
193 ir_node *const user = get_edge_src_irn(edge);
196 exchange(user, test);
199 sched_add_before(node, test);
200 copy_mark(node, test);
201 be_peephole_exchange(node, test);
205 * Peephole optimization for Test instructions.
206 * - Remove the Test, if an appropriate flag was produced which is still live
207 * - Change a Test(x, c) to 8Bit, if 0 <= c < 256 (3 byte shorter opcode)
209 static void peephole_ia32_Test(ir_node *node)
211 ir_node *left = get_irn_n(node, n_ia32_Test_left);
212 ir_node *right = get_irn_n(node, n_ia32_Test_right);
214 assert((int)n_ia32_Test_left == (int)n_ia32_Test8Bit_left
215 && (int)n_ia32_Test_right == (int)n_ia32_Test8Bit_right);
217 if (left == right) { /* we need a test for 0 */
218 ir_node *block = get_nodes_block(node);
219 int pn = pn_ia32_res;
225 const ir_edge_t *edge;
227 if (get_nodes_block(left) != block)
231 pn = get_Proj_proj(op);
232 op = get_Proj_pred(op);
235 /* walk schedule up and abort when we find left or some other node
236 * destroys the flags */
239 schedpoint = sched_prev(schedpoint);
240 if (schedpoint == op)
242 if (arch_irn_is(schedpoint, modify_flags))
244 if (schedpoint == block)
245 panic("couldn't find left");
248 /* make sure only Lg/Eq tests are used */
249 foreach_out_edge(node, edge) {
250 ir_node *user = get_edge_src_irn(edge);
251 ia32_condition_code_t cc = get_ia32_condcode(user);
253 if (cc != ia32_cc_equal && cc != ia32_cc_not_equal) {
258 switch (produces_test_flag(op, pn)) {
259 case produces_flag_zero:
262 case produces_flag_carry:
263 foreach_out_edge(node, edge) {
264 ir_node *user = get_edge_src_irn(edge);
265 ia32_condition_code_t cc = get_ia32_condcode(user);
268 case ia32_cc_equal: cc = ia32_cc_above_equal; break; /* CF = 0 */
269 case ia32_cc_not_equal: cc = ia32_cc_below; break; /* CF = 1 */
270 default: panic("unexpected pn");
272 set_ia32_condcode(user, cc);
280 op_mode = get_ia32_ls_mode(op);
282 op_mode = get_irn_mode(op);
284 /* Make sure we operate on the same bit size */
285 if (get_mode_size_bits(op_mode) != get_mode_size_bits(get_ia32_ls_mode(node)))
288 if (get_irn_mode(op) != mode_T) {
289 set_irn_mode(op, mode_T);
291 /* If there are other users, reroute them to result proj */
292 if (get_irn_n_edges(op) != 2) {
293 ir_node *res = new_r_Proj(op, mode_Iu, pn_ia32_res);
295 edges_reroute(op, res);
296 /* Reattach the result proj to left */
297 set_Proj_pred(res, op);
300 if (get_irn_n_edges(left) == 2)
304 flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode;
305 flags_proj = new_r_Proj(op, flags_mode, pn_ia32_flags);
306 arch_set_irn_register(flags_proj, &ia32_registers[REG_EFLAGS]);
308 assert(get_irn_mode(node) != mode_T);
310 be_peephole_exchange(node, flags_proj);
311 } else if (is_ia32_Immediate(right)) {
312 ia32_immediate_attr_t const *const imm = get_ia32_immediate_attr_const(right);
315 /* A test with a symconst is rather strange, but better safe than sorry */
316 if (imm->symconst != NULL)
319 offset = imm->offset;
320 if (get_ia32_op_type(node) == ia32_AddrModeS) {
321 ia32_attr_t *const attr = get_ia32_attr(node);
323 if ((offset & 0xFFFFFF00) == 0) {
324 /* attr->am_offs += 0; */
325 } else if ((offset & 0xFFFF00FF) == 0) {
326 ir_node *imm_node = ia32_create_Immediate(NULL, 0, offset>>8);
327 set_irn_n(node, n_ia32_Test_right, imm_node);
329 } else if ((offset & 0xFF00FFFF) == 0) {
330 ir_node *imm_node = ia32_create_Immediate(NULL, 0, offset>>16);
331 set_irn_n(node, n_ia32_Test_right, imm_node);
333 } else if ((offset & 0x00FFFFFF) == 0) {
334 ir_node *imm_node = ia32_create_Immediate(NULL, 0, offset>>24);
335 set_irn_n(node, n_ia32_Test_right, imm_node);
340 } else if (offset < 256) {
341 arch_register_t const* const reg = arch_get_irn_register(left);
343 if (reg != &ia32_registers[REG_EAX] &&
344 reg != &ia32_registers[REG_EBX] &&
345 reg != &ia32_registers[REG_ECX] &&
346 reg != &ia32_registers[REG_EDX]) {
353 /* Technically we should build a Test8Bit because of the register
354 * constraints, but nobody changes registers at this point anymore. */
355 set_ia32_ls_mode(node, mode_Bu);
360 * AMD Athlon works faster when RET is not destination of
361 * conditional jump or directly preceded by other jump instruction.
362 * Can be avoided by placing a Rep prefix before the return.
364 static void peephole_ia32_Return(ir_node *node)
368 if (!ia32_cg_config.use_pad_return)
371 /* check if this return is the first on the block */
372 sched_foreach_reverse_from(node, irn) {
373 switch (get_irn_opcode(irn)) {
375 /* the return node itself, ignore */
379 /* ignore no code generated */
382 /* arg, IncSP 0 nodes might occur, ignore these */
383 if (be_get_IncSP_offset(irn) == 0)
393 /* ensure, that the 3 byte return is generated */
394 be_Return_set_emit_pop(node, 1);
397 /* only optimize up to 48 stores behind IncSPs */
398 #define MAXPUSH_OPTIMIZE 48
401 * Tries to create Push's from IncSP, Store combinations.
402 * The Stores are replaced by Push's, the IncSP is modified
403 * (possibly into IncSP 0, but not removed).
405 static void peephole_IncSP_Store_to_push(ir_node *irn)
411 ir_node *stores[MAXPUSH_OPTIMIZE];
416 ir_node *first_push = NULL;
417 ir_edge_t const *edge;
418 ir_edge_t const *next;
420 memset(stores, 0, sizeof(stores));
422 assert(be_is_IncSP(irn));
424 inc_ofs = be_get_IncSP_offset(irn);
429 * We first walk the schedule after the IncSP node as long as we find
430 * suitable Stores that could be transformed to a Push.
431 * We save them into the stores array which is sorted by the frame offset/4
432 * attached to the node
435 for (node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
440 /* it has to be a Store */
441 if (!is_ia32_Store(node))
444 /* it has to use our sp value */
445 if (get_irn_n(node, n_ia32_base) != irn)
447 /* Store has to be attached to NoMem */
448 mem = get_irn_n(node, n_ia32_mem);
452 /* unfortunately we can't support the full AMs possible for push at the
453 * moment. TODO: fix this */
454 if (!is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
457 offset = get_ia32_am_offs_int(node);
458 /* we should NEVER access uninitialized stack BELOW the current SP */
461 /* storing at half-slots is bad */
462 if ((offset & 3) != 0)
465 if (inc_ofs - 4 < offset || offset >= MAXPUSH_OPTIMIZE * 4)
467 storeslot = offset >> 2;
469 /* storing into the same slot twice is bad (and shouldn't happen...) */
470 if (stores[storeslot] != NULL)
473 stores[storeslot] = node;
474 if (storeslot > maxslot)
480 for (i = -1; i < maxslot; ++i) {
481 if (stores[i + 1] == NULL)
485 /* walk through the Stores and create Pushs for them */
486 block = get_nodes_block(irn);
487 spmode = get_irn_mode(irn);
488 irg = get_irn_irg(irn);
489 for (; i >= 0; --i) {
490 const arch_register_t *spreg;
492 ir_node *val, *mem, *mem_proj;
493 ir_node *store = stores[i];
494 ir_node *noreg = ia32_new_NoReg_gp(irg);
496 val = get_irn_n(store, n_ia32_unary_op);
497 mem = get_irn_n(store, n_ia32_mem);
498 spreg = arch_get_irn_register(curr_sp);
500 push = new_bd_ia32_Push(get_irn_dbg_info(store), block, noreg, noreg,
502 copy_mark(store, push);
504 if (first_push == NULL)
507 sched_add_after(skip_Proj(curr_sp), push);
509 /* create stackpointer Proj */
510 curr_sp = new_r_Proj(push, spmode, pn_ia32_Push_stack);
511 arch_set_irn_register(curr_sp, spreg);
513 /* create memory Proj */
514 mem_proj = new_r_Proj(push, mode_M, pn_ia32_Push_M);
516 /* rewire Store Projs */
517 foreach_out_edge_safe(store, edge, next) {
518 ir_node *proj = get_edge_src_irn(edge);
521 switch (get_Proj_proj(proj)) {
522 case pn_ia32_Store_M:
523 exchange(proj, mem_proj);
526 panic("unexpected Proj on Store->IncSp");
530 /* use the memproj now */
531 be_peephole_exchange(store, push);
536 foreach_out_edge_safe(irn, edge, next) {
537 ir_node *const src = get_edge_src_irn(edge);
538 int const pos = get_edge_src_pos(edge);
540 if (src == first_push)
543 set_irn_n(src, pos, curr_sp);
546 be_set_IncSP_offset(irn, inc_ofs);
551 * Creates a Push instruction before the given schedule point.
553 * @param dbgi debug info
554 * @param block the block
555 * @param stack the previous stack value
556 * @param schedpoint the new node is added before this node
557 * @param reg the register to pop
559 * @return the new stack value
561 static ir_node *create_push(dbg_info *dbgi, ir_node *block,
562 ir_node *stack, ir_node *schedpoint)
564 const arch_register_t *esp = &ia32_registers[REG_ESP];
566 ir_node *val = ia32_new_NoReg_gp(cg);
567 ir_node *noreg = ia32_new_NoReg_gp(cg);
568 ir_graph *irg = get_irn_irg(block);
569 ir_node *nomem = get_irg_no_mem(irg);
570 ir_node *push = new_bd_ia32_Push(dbgi, block, noreg, noreg, nomem, val, stack);
571 sched_add_before(schedpoint, push);
573 stack = new_r_Proj(push, mode_Iu, pn_ia32_Push_stack);
574 arch_set_irn_register(stack, esp);
579 static void peephole_store_incsp(ir_node *store)
590 ir_node *am_base = get_irn_n(store, n_ia32_Store_base);
591 if (!be_is_IncSP(am_base)
592 || get_nodes_block(am_base) != get_nodes_block(store))
594 mem = get_irn_n(store, n_ia32_Store_mem);
595 if (!is_ia32_NoReg_GP(get_irn_n(store, n_ia32_Store_index))
599 int incsp_offset = be_get_IncSP_offset(am_base);
600 if (incsp_offset <= 0)
603 /* we have to be at offset 0 */
604 int my_offset = get_ia32_am_offs_int(store);
605 if (my_offset != 0) {
606 /* TODO here: find out whether there is a store with offset 0 before
607 * us and whether we can move it down to our place */
610 ir_mode *ls_mode = get_ia32_ls_mode(store);
611 int my_store_size = get_mode_size_bytes(ls_mode);
613 if (my_offset + my_store_size > incsp_offset)
616 /* correctness checking:
617 - noone else must write to that stackslot
618 (because after translation incsp won't allocate it anymore)
620 sched_foreach_reverse_from(store, node) {
626 /* make sure noone else can use the space on the stack */
627 arity = get_irn_arity(node);
628 for (i = 0; i < arity; ++i) {
629 ir_node *pred = get_irn_n(node, i);
633 if (i == n_ia32_base &&
634 (get_ia32_op_type(node) == ia32_AddrModeS
635 || get_ia32_op_type(node) == ia32_AddrModeD)) {
636 int node_offset = get_ia32_am_offs_int(node);
637 ir_mode *node_ls_mode = get_ia32_ls_mode(node);
638 int node_size = get_mode_size_bytes(node_ls_mode);
639 /* overlapping with our position? abort */
640 if (node_offset < my_offset + my_store_size
641 && node_offset + node_size >= my_offset)
643 /* otherwise it's fine */
647 /* strange use of esp: abort */
652 /* all ok, change to push */
653 dbgi = get_irn_dbg_info(store);
654 block = get_nodes_block(store);
655 noreg = ia32_new_NoReg_gp(cg);
656 val = get_irn_n(store, n_ia32_Store_val);
658 push = new_bd_ia32_Push(dbgi, block, noreg, noreg, mem,
660 create_push(dbgi, current_ir_graph, block, am_base, store);
665 * Return true if a mode can be stored in the GP register set
667 static inline int mode_needs_gp_reg(ir_mode *mode)
669 if (mode == ia32_mode_fpcw)
671 if (get_mode_size_bits(mode) > 32)
673 return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b;
677 * Tries to create Pops from Load, IncSP combinations.
678 * The Loads are replaced by Pops, the IncSP is modified
679 * (possibly into IncSP 0, but not removed).
681 static void peephole_Load_IncSP_to_pop(ir_node *irn)
683 const arch_register_t *esp = &ia32_registers[REG_ESP];
684 int i, maxslot, inc_ofs, ofs;
685 ir_node *node, *pred_sp, *block;
686 ir_node *loads[MAXPUSH_OPTIMIZE];
687 unsigned regmask = 0;
688 unsigned copymask = ~0;
690 memset(loads, 0, sizeof(loads));
691 assert(be_is_IncSP(irn));
693 inc_ofs = -be_get_IncSP_offset(irn);
698 * We first walk the schedule before the IncSP node as long as we find
699 * suitable Loads that could be transformed to a Pop.
700 * We save them into the stores array which is sorted by the frame offset/4
701 * attached to the node
704 pred_sp = be_get_IncSP_pred(irn);
705 for (node = sched_prev(irn); !sched_is_end(node); node = sched_prev(node)) {
708 const arch_register_t *sreg, *dreg;
710 /* it has to be a Load */
711 if (!is_ia32_Load(node)) {
712 if (be_is_Copy(node)) {
713 if (!mode_needs_gp_reg(get_irn_mode(node))) {
714 /* not a GP copy, ignore */
717 dreg = arch_get_irn_register(node);
718 sreg = arch_get_irn_register(be_get_Copy_op(node));
719 if (regmask & copymask & (1 << sreg->index)) {
722 if (regmask & copymask & (1 << dreg->index)) {
725 /* we CAN skip Copies if neither the destination nor the source
726 * is not in our regmask, ie none of our future Pop will overwrite it */
727 regmask |= (1 << dreg->index) | (1 << sreg->index);
728 copymask &= ~((1 << dreg->index) | (1 << sreg->index));
734 /* we can handle only GP loads */
735 if (!mode_needs_gp_reg(get_ia32_ls_mode(node)))
738 /* it has to use our predecessor sp value */
739 if (get_irn_n(node, n_ia32_base) != pred_sp) {
740 /* it would be ok if this load does not use a Pop result,
741 * but we do not check this */
745 /* should have NO index */
746 if (!is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
749 offset = get_ia32_am_offs_int(node);
750 /* we should NEVER access uninitialized stack BELOW the current SP */
753 /* storing at half-slots is bad */
754 if ((offset & 3) != 0)
757 if (offset < 0 || offset >= MAXPUSH_OPTIMIZE * 4)
759 /* ignore those outside the possible windows */
760 if (offset > inc_ofs - 4)
762 loadslot = offset >> 2;
764 /* loading from the same slot twice is bad (and shouldn't happen...) */
765 if (loads[loadslot] != NULL)
768 dreg = arch_get_irn_register_out(node, pn_ia32_Load_res);
769 if (regmask & (1 << dreg->index)) {
770 /* this register is already used */
773 regmask |= 1 << dreg->index;
775 loads[loadslot] = node;
776 if (loadslot > maxslot)
783 /* find the first slot */
784 for (i = maxslot; i >= 0; --i) {
785 ir_node *load = loads[i];
791 ofs = inc_ofs - (maxslot + 1) * 4;
794 /* create a new IncSP if needed */
795 block = get_nodes_block(irn);
797 pred_sp = be_new_IncSP(esp, block, pred_sp, -inc_ofs, be_get_IncSP_align(irn));
798 sched_add_before(irn, pred_sp);
801 /* walk through the Loads and create Pops for them */
802 for (++i; i <= maxslot; ++i) {
803 ir_node *load = loads[i];
805 const ir_edge_t *edge, *tmp;
806 const arch_register_t *reg;
808 mem = get_irn_n(load, n_ia32_mem);
809 reg = arch_get_irn_register_out(load, pn_ia32_Load_res);
811 pop = new_bd_ia32_Pop(get_irn_dbg_info(load), block, mem, pred_sp);
812 arch_set_irn_register_out(pop, pn_ia32_Load_res, reg);
814 copy_mark(load, pop);
816 /* create stackpointer Proj */
817 pred_sp = new_r_Proj(pop, mode_Iu, pn_ia32_Pop_stack);
818 arch_set_irn_register(pred_sp, esp);
820 sched_add_before(irn, pop);
823 foreach_out_edge_safe(load, edge, tmp) {
824 ir_node *proj = get_edge_src_irn(edge);
826 set_Proj_pred(proj, pop);
829 /* we can remove the Load now */
834 be_set_IncSP_offset(irn, -ofs);
835 be_set_IncSP_pred(irn, pred_sp);
840 * Find a free GP register if possible, else return NULL.
842 static const arch_register_t *get_free_gp_reg(ir_graph *irg)
844 be_irg_t *birg = be_birg_from_irg(irg);
847 for (i = 0; i < N_ia32_gp_REGS; ++i) {
848 const arch_register_t *reg = &ia32_reg_classes[CLASS_ia32_gp].regs[i];
849 if (!rbitset_is_set(birg->allocatable_regs, reg->global_index))
852 if (be_peephole_get_value(reg->global_index) == NULL)
860 * Creates a Pop instruction before the given schedule point.
862 * @param dbgi debug info
863 * @param block the block
864 * @param stack the previous stack value
865 * @param schedpoint the new node is added before this node
866 * @param reg the register to pop
868 * @return the new stack value
870 static ir_node *create_pop(dbg_info *dbgi, ir_node *block,
871 ir_node *stack, ir_node *schedpoint,
872 const arch_register_t *reg)
874 const arch_register_t *esp = &ia32_registers[REG_ESP];
875 ir_graph *irg = get_irn_irg(block);
881 pop = new_bd_ia32_Pop(dbgi, block, get_irg_no_mem(irg), stack);
883 stack = new_r_Proj(pop, mode_Iu, pn_ia32_Pop_stack);
884 arch_set_irn_register(stack, esp);
885 val = new_r_Proj(pop, mode_Iu, pn_ia32_Pop_res);
886 arch_set_irn_register(val, reg);
888 sched_add_before(schedpoint, pop);
891 keep = be_new_Keep(block, 1, in);
892 sched_add_before(schedpoint, keep);
898 * Optimize an IncSp by replacing it with Push/Pop.
900 static void peephole_be_IncSP(ir_node *node)
902 const arch_register_t *esp = &ia32_registers[REG_ESP];
903 const arch_register_t *reg;
909 /* first optimize incsp->incsp combinations */
910 node = be_peephole_IncSP_IncSP(node);
912 /* transform IncSP->Store combinations to Push where possible */
913 peephole_IncSP_Store_to_push(node);
915 /* transform Load->IncSP combinations to Pop where possible */
916 peephole_Load_IncSP_to_pop(node);
918 if (arch_get_irn_register(node) != esp)
921 /* replace IncSP -4 by Pop freereg when possible */
922 offset = be_get_IncSP_offset(node);
923 if ((offset != -8 || ia32_cg_config.use_add_esp_8) &&
924 (offset != -4 || ia32_cg_config.use_add_esp_4) &&
925 (offset != +4 || ia32_cg_config.use_sub_esp_4) &&
926 (offset != +8 || ia32_cg_config.use_sub_esp_8))
930 /* we need a free register for pop */
931 reg = get_free_gp_reg(get_irn_irg(node));
935 dbgi = get_irn_dbg_info(node);
936 block = get_nodes_block(node);
937 stack = be_get_IncSP_pred(node);
939 stack = create_pop(dbgi, block, stack, node, reg);
942 stack = create_pop(dbgi, block, stack, node, reg);
945 dbgi = get_irn_dbg_info(node);
946 block = get_nodes_block(node);
947 stack = be_get_IncSP_pred(node);
948 stack = new_bd_ia32_PushEax(dbgi, block, stack);
949 arch_set_irn_register(stack, esp);
950 sched_add_before(node, stack);
953 stack = new_bd_ia32_PushEax(dbgi, block, stack);
954 arch_set_irn_register(stack, esp);
955 sched_add_before(node, stack);
959 be_peephole_exchange(node, stack);
963 * Peephole optimisation for ia32_Const's
965 static void peephole_ia32_Const(ir_node *node)
967 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
968 const arch_register_t *reg;
973 /* try to transform a mov 0, reg to xor reg reg */
974 if (attr->offset != 0 || attr->symconst != NULL)
976 if (ia32_cg_config.use_mov_0)
978 /* xor destroys the flags, so no-one must be using them */
979 if (be_peephole_get_value(REG_EFLAGS) != NULL)
982 reg = arch_get_irn_register(node);
983 assert(be_peephole_get_reg_value(reg) == NULL);
985 /* create xor(produceval, produceval) */
986 block = get_nodes_block(node);
987 dbgi = get_irn_dbg_info(node);
988 xorn = new_bd_ia32_Xor0(dbgi, block);
989 arch_set_irn_register(xorn, reg);
991 sched_add_before(node, xorn);
993 copy_mark(node, xorn);
994 be_peephole_exchange(node, xorn);
997 static inline int is_noreg(const ir_node *node)
999 return is_ia32_NoReg_GP(node);
1002 ir_node *ia32_immediate_from_long(long val)
1004 ir_graph *irg = current_ir_graph;
1005 ir_node *start_block = get_irg_start_block(irg);
1007 = new_bd_ia32_Immediate(NULL, start_block, NULL, 0, 0, val);
1008 arch_set_irn_register(immediate, &ia32_registers[REG_GP_NOREG]);
1013 static ir_node *create_immediate_from_am(const ir_node *node)
1015 ir_node *block = get_nodes_block(node);
1016 int offset = get_ia32_am_offs_int(node);
1017 int sc_sign = is_ia32_am_sc_sign(node);
1018 const ia32_attr_t *attr = get_ia32_attr_const(node);
1019 int sc_no_pic_adjust = attr->data.am_sc_no_pic_adjust;
1020 ir_entity *entity = get_ia32_am_sc(node);
1023 res = new_bd_ia32_Immediate(NULL, block, entity, sc_sign, sc_no_pic_adjust,
1025 arch_set_irn_register(res, &ia32_registers[REG_GP_NOREG]);
1029 static int is_am_one(const ir_node *node)
1031 int offset = get_ia32_am_offs_int(node);
1032 ir_entity *entity = get_ia32_am_sc(node);
1034 return offset == 1 && entity == NULL;
1037 static int is_am_minus_one(const ir_node *node)
1039 int offset = get_ia32_am_offs_int(node);
1040 ir_entity *entity = get_ia32_am_sc(node);
1042 return offset == -1 && entity == NULL;
1046 * Transforms a LEA into an Add or SHL if possible.
1048 static void peephole_ia32_Lea(ir_node *node)
1053 const arch_register_t *base_reg;
1054 const arch_register_t *index_reg;
1055 const arch_register_t *out_reg;
1066 assert(is_ia32_Lea(node));
1068 /* we can only do this if it is allowed to clobber the flags */
1069 if (be_peephole_get_value(REG_EFLAGS) != NULL)
1072 base = get_irn_n(node, n_ia32_Lea_base);
1073 index = get_irn_n(node, n_ia32_Lea_index);
1075 if (is_noreg(base)) {
1079 base_reg = arch_get_irn_register(base);
1081 if (is_noreg(index)) {
1085 index_reg = arch_get_irn_register(index);
1088 if (base == NULL && index == NULL) {
1089 /* we shouldn't construct these in the first place... */
1090 #ifdef DEBUG_libfirm
1091 ir_fprintf(stderr, "Optimisation warning: found immediate only lea\n");
1096 out_reg = arch_get_irn_register(node);
1097 scale = get_ia32_am_scale(node);
1098 assert(!is_ia32_need_stackent(node) || get_ia32_frame_ent(node) != NULL);
1099 /* check if we have immediates values (frame entities should already be
1100 * expressed in the offsets) */
1101 if (get_ia32_am_offs_int(node) != 0 || get_ia32_am_sc(node) != NULL) {
1107 /* we can transform leas where the out register is the same as either the
1108 * base or index register back to an Add or Shl */
1109 if (out_reg == base_reg) {
1110 if (index == NULL) {
1111 #ifdef DEBUG_libfirm
1112 if (!has_immediates) {
1113 ir_fprintf(stderr, "Optimisation warning: found lea which is "
1118 goto make_add_immediate;
1120 if (scale == 0 && !has_immediates) {
1125 /* can't create an add */
1127 } else if (out_reg == index_reg) {
1129 if (has_immediates && scale == 0) {
1131 goto make_add_immediate;
1132 } else if (!has_immediates && scale > 0) {
1134 op2 = ia32_immediate_from_long(scale);
1136 } else if (!has_immediates) {
1137 #ifdef DEBUG_libfirm
1138 ir_fprintf(stderr, "Optimisation warning: found lea which is "
1142 } else if (scale == 0 && !has_immediates) {
1147 /* can't create an add */
1150 /* can't create an add */
1155 if (ia32_cg_config.use_incdec) {
1156 if (is_am_one(node)) {
1157 dbgi = get_irn_dbg_info(node);
1158 block = get_nodes_block(node);
1159 res = new_bd_ia32_Inc(dbgi, block, op1);
1160 arch_set_irn_register(res, out_reg);
1163 if (is_am_minus_one(node)) {
1164 dbgi = get_irn_dbg_info(node);
1165 block = get_nodes_block(node);
1166 res = new_bd_ia32_Dec(dbgi, block, op1);
1167 arch_set_irn_register(res, out_reg);
1171 op2 = create_immediate_from_am(node);
1174 dbgi = get_irn_dbg_info(node);
1175 block = get_nodes_block(node);
1176 irg = get_irn_irg(node);
1177 noreg = ia32_new_NoReg_gp(irg);
1178 nomem = get_irg_no_mem(irg);
1179 res = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, op1, op2);
1180 arch_set_irn_register(res, out_reg);
1181 set_ia32_commutative(res);
1185 dbgi = get_irn_dbg_info(node);
1186 block = get_nodes_block(node);
1187 irg = get_irn_irg(node);
1188 noreg = ia32_new_NoReg_gp(irg);
1189 nomem = get_irg_no_mem(irg);
1190 res = new_bd_ia32_Shl(dbgi, block, op1, op2);
1191 arch_set_irn_register(res, out_reg);
1195 SET_IA32_ORIG_NODE(res, node);
1197 /* add new ADD/SHL to schedule */
1198 DBG_OPT_LEA2ADD(node, res);
1200 /* exchange the Add and the LEA */
1201 sched_add_before(node, res);
1202 copy_mark(node, res);
1203 be_peephole_exchange(node, res);
1207 * Split a Imul mem, imm into a Load mem and Imul reg, imm if possible.
1209 static void peephole_ia32_Imul_split(ir_node *imul)
1211 const ir_node *right = get_irn_n(imul, n_ia32_IMul_right);
1212 const arch_register_t *reg;
1215 if (!is_ia32_Immediate(right) || get_ia32_op_type(imul) != ia32_AddrModeS) {
1216 /* no memory, imm form ignore */
1219 /* we need a free register */
1220 reg = get_free_gp_reg(get_irn_irg(imul));
1224 /* fine, we can rebuild it */
1225 res = ia32_turn_back_am(imul);
1226 arch_set_irn_register(res, reg);
1230 * Replace xorps r,r and xorpd r,r by pxor r,r
1232 static void peephole_ia32_xZero(ir_node *xorn)
1234 set_irn_op(xorn, op_ia32_xPzero);
1238 * Replace 16bit sign extension from ax to eax by shorter cwtl
1240 static void peephole_ia32_Conv_I2I(ir_node *node)
1242 const arch_register_t *eax = &ia32_registers[REG_EAX];
1243 ir_mode *smaller_mode = get_ia32_ls_mode(node);
1244 ir_node *val = get_irn_n(node, n_ia32_Conv_I2I_val);
1249 if (get_mode_size_bits(smaller_mode) != 16 ||
1250 !mode_is_signed(smaller_mode) ||
1251 eax != arch_get_irn_register(val) ||
1252 eax != arch_get_irn_register_out(node, pn_ia32_Conv_I2I_res))
1255 dbgi = get_irn_dbg_info(node);
1256 block = get_nodes_block(node);
1257 cwtl = new_bd_ia32_Cwtl(dbgi, block, val);
1258 arch_set_irn_register(cwtl, eax);
1259 sched_add_before(node, cwtl);
1260 be_peephole_exchange(node, cwtl);
1264 * Register a peephole optimisation function.
1266 static void register_peephole_optimisation(ir_op *op, peephole_opt_func func)
1268 assert(op->ops.generic == NULL);
1269 op->ops.generic = (op_func)func;
1272 /* Perform peephole-optimizations. */
1273 void ia32_peephole_optimization(ir_graph *irg)
1275 /* register peephole optimisations */
1276 clear_irp_opcodes_generic_func();
1277 register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
1278 register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
1279 register_peephole_optimisation(op_ia32_Lea, peephole_ia32_Lea);
1280 register_peephole_optimisation(op_ia32_Cmp, peephole_ia32_Cmp);
1281 register_peephole_optimisation(op_ia32_Cmp8Bit, peephole_ia32_Cmp);
1282 register_peephole_optimisation(op_ia32_Test, peephole_ia32_Test);
1283 register_peephole_optimisation(op_ia32_Test8Bit, peephole_ia32_Test);
1284 register_peephole_optimisation(op_be_Return, peephole_ia32_Return);
1285 if (! ia32_cg_config.use_imul_mem_imm32)
1286 register_peephole_optimisation(op_ia32_IMul, peephole_ia32_Imul_split);
1287 if (ia32_cg_config.use_pxor)
1288 register_peephole_optimisation(op_ia32_xZero, peephole_ia32_xZero);
1289 if (ia32_cg_config.use_short_sex_eax)
1290 register_peephole_optimisation(op_ia32_Conv_I2I, peephole_ia32_Conv_I2I);
1292 be_peephole_opt(irg);
1296 * Removes node from schedule if it is not used anymore. If irn is a mode_T node
1297 * all its Projs are removed as well.
1298 * @param irn The irn to be removed from schedule
1300 static inline void try_kill(ir_node *node)
1302 if (get_irn_mode(node) == mode_T) {
1303 const ir_edge_t *edge, *next;
1304 foreach_out_edge_safe(node, edge, next) {
1305 ir_node *proj = get_edge_src_irn(edge);
1310 if (get_irn_n_edges(node) != 0)
1313 if (sched_is_scheduled(node)) {
1320 static void optimize_conv_store(ir_node *node)
1325 ir_mode *store_mode;
1327 if (!is_ia32_Store(node) && !is_ia32_Store8Bit(node))
1330 assert((int)n_ia32_Store_val == (int)n_ia32_Store8Bit_val);
1331 pred_proj = get_irn_n(node, n_ia32_Store_val);
1332 if (is_Proj(pred_proj)) {
1333 pred = get_Proj_pred(pred_proj);
1337 if (!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1339 if (get_ia32_op_type(pred) != ia32_Normal)
1342 /* the store only stores the lower bits, so we only need the conv
1343 * it it shrinks the mode */
1344 conv_mode = get_ia32_ls_mode(pred);
1345 store_mode = get_ia32_ls_mode(node);
1346 if (get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode))
1349 set_irn_n(node, n_ia32_Store_val, get_irn_n(pred, n_ia32_Conv_I2I_val));
1350 if (get_irn_n_edges(pred_proj) == 0) {
1351 kill_node(pred_proj);
1352 if (pred != pred_proj)
1357 static void optimize_load_conv(ir_node *node)
1359 ir_node *pred, *predpred;
1363 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1366 assert((int)n_ia32_Conv_I2I_val == (int)n_ia32_Conv_I2I8Bit_val);
1367 pred = get_irn_n(node, n_ia32_Conv_I2I_val);
1371 predpred = get_Proj_pred(pred);
1372 if (!is_ia32_Load(predpred))
1375 /* the load is sign extending the upper bits, so we only need the conv
1376 * if it shrinks the mode */
1377 load_mode = get_ia32_ls_mode(predpred);
1378 conv_mode = get_ia32_ls_mode(node);
1379 if (get_mode_size_bits(conv_mode) < get_mode_size_bits(load_mode))
1382 if (get_mode_sign(conv_mode) != get_mode_sign(load_mode)) {
1383 /* change the load if it has only 1 user */
1384 if (get_irn_n_edges(pred) == 1) {
1386 if (get_mode_sign(conv_mode)) {
1387 newmode = find_signed_mode(load_mode);
1389 newmode = find_unsigned_mode(load_mode);
1391 assert(newmode != NULL);
1392 set_ia32_ls_mode(predpred, newmode);
1394 /* otherwise we have to keep the conv */
1400 exchange(node, pred);
1403 static void optimize_conv_conv(ir_node *node)
1405 ir_node *pred_proj, *pred, *result_conv;
1406 ir_mode *pred_mode, *conv_mode;
1410 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1413 assert((int)n_ia32_Conv_I2I_val == (int)n_ia32_Conv_I2I8Bit_val);
1414 pred_proj = get_irn_n(node, n_ia32_Conv_I2I_val);
1415 if (is_Proj(pred_proj))
1416 pred = get_Proj_pred(pred_proj);
1420 if (!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1423 /* we know that after a conv, the upper bits are sign extended
1424 * so we only need the 2nd conv if it shrinks the mode */
1425 conv_mode = get_ia32_ls_mode(node);
1426 conv_mode_bits = get_mode_size_bits(conv_mode);
1427 pred_mode = get_ia32_ls_mode(pred);
1428 pred_mode_bits = get_mode_size_bits(pred_mode);
1430 if (conv_mode_bits == pred_mode_bits
1431 && get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
1432 result_conv = pred_proj;
1433 } else if (conv_mode_bits <= pred_mode_bits) {
1434 /* if 2nd conv is smaller then first conv, then we can always take the
1436 if (get_irn_n_edges(pred_proj) == 1) {
1437 result_conv = pred_proj;
1438 set_ia32_ls_mode(pred, conv_mode);
1440 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
1441 if (get_mode_size_bits(conv_mode) == 8) {
1442 const arch_register_req_t **reqs = arch_get_irn_register_reqs_in(node);
1443 set_irn_op(pred, op_ia32_Conv_I2I8Bit);
1444 arch_set_irn_register_reqs_in(pred, reqs);
1447 /* we don't want to end up with 2 loads, so we better do nothing */
1448 if (get_irn_mode(pred) == mode_T) {
1452 result_conv = exact_copy(pred);
1453 set_ia32_ls_mode(result_conv, conv_mode);
1455 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
1456 if (get_mode_size_bits(conv_mode) == 8) {
1457 const arch_register_req_t **reqs = arch_get_irn_register_reqs_in(node);
1458 set_irn_op(result_conv, op_ia32_Conv_I2I8Bit);
1459 arch_set_irn_register_reqs_in(result_conv, reqs);
1463 /* if both convs have the same sign, then we can take the smaller one */
1464 if (get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
1465 result_conv = pred_proj;
1467 /* no optimisation possible if smaller conv is sign-extend */
1468 if (mode_is_signed(pred_mode)) {
1471 /* we can take the smaller conv if it is unsigned */
1472 result_conv = pred_proj;
1476 /* Some user (like Phis) won't be happy if we change the mode. */
1477 set_irn_mode(result_conv, get_irn_mode(node));
1480 exchange(node, result_conv);
1482 if (get_irn_n_edges(pred_proj) == 0) {
1483 kill_node(pred_proj);
1484 if (pred != pred_proj)
1487 optimize_conv_conv(result_conv);
1490 static void optimize_node(ir_node *node, void *env)
1494 optimize_load_conv(node);
1495 optimize_conv_store(node);
1496 optimize_conv_conv(node);
1500 * Performs conv and address mode optimization.
1502 void ia32_optimize_graph(ir_graph *irg)
1504 irg_walk_blkwise_graph(irg, NULL, optimize_node, NULL);
1507 void ia32_init_optimize(void)
1509 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize");