2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Implements several optimizations for IA32.
23 * @author Matthias Braun, Christian Wuerdig
31 #include "firm_types.h"
40 #include "firmstat_t.h"
46 #include "bepeephole.h"
48 #include "ia32_new_nodes.h"
49 #include "ia32_optimize.h"
50 #include "bearch_ia32_t.h"
51 #include "gen_ia32_regalloc_if.h"
52 #include "ia32_common_transform.h"
53 #include "ia32_transform.h"
54 #include "ia32_dbg_stat.h"
55 #include "ia32_architecture.h"
57 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
59 static void copy_mark(const ir_node *old, ir_node *newn)
61 if (is_ia32_is_reload(old))
62 set_ia32_is_reload(newn);
63 if (is_ia32_is_spill(old))
64 set_ia32_is_spill(newn);
65 if (is_ia32_is_remat(old))
66 set_ia32_is_remat(newn);
69 typedef enum produces_flag_t {
72 produces_zero_in_carry
76 * Return which usable flag the given node produces about the result.
77 * That is zero (ZF) and sign(SF).
78 * We do not check for carry (CF) or overflow (OF).
80 * @param node the node to check
81 * @param pn the projection number of the used result
83 static produces_flag_t check_produces_zero_sign(ir_node *node, int pn)
86 const ia32_immediate_attr_t *imm_attr;
88 if (!is_ia32_irn(node))
89 return produces_no_flag;
91 switch (get_ia32_irn_opcode(node)) {
106 assert((int)n_ia32_ShlD_count == (int)n_ia32_ShrD_count);
107 count = get_irn_n(node, n_ia32_ShlD_count);
108 goto check_shift_amount;
113 assert((int)n_ia32_Shl_count == (int)n_ia32_Shr_count
114 && (int)n_ia32_Shl_count == (int)n_ia32_Sar_count);
115 count = get_irn_n(node, n_ia32_Shl_count);
117 /* when shift count is zero the flags are not affected, so we can only
118 * do this for constants != 0 */
119 if (!is_ia32_Immediate(count))
120 return produces_no_flag;
122 imm_attr = get_ia32_immediate_attr_const(count);
123 if (imm_attr->symconst != NULL)
124 return produces_no_flag;
125 if ((imm_attr->offset & 0x1f) == 0)
126 return produces_no_flag;
130 return pn == pn_ia32_Mul_res_high ?
131 produces_zero_in_carry : produces_no_flag;
134 return produces_no_flag;
137 return pn == pn_ia32_res ? produces_zero_sign : produces_no_flag;
141 * Replace Cmp(x, 0) by a Test(x, x)
143 static void peephole_ia32_Cmp(ir_node *const node)
145 if (get_ia32_op_type(node) != ia32_Normal)
148 ir_node *const right = get_irn_n(node, n_ia32_Cmp_right);
149 if (!is_ia32_Immediate(right))
152 ia32_immediate_attr_t const *const imm = get_ia32_immediate_attr_const(right);
153 if (imm->symconst != NULL || imm->offset != 0)
156 dbg_info *const dbgi = get_irn_dbg_info(node);
157 ir_node *const block = get_nodes_block(node);
158 ir_graph *const irg = get_Block_irg(block);
159 ir_node *const noreg = ia32_new_NoReg_gp(irg);
160 ir_node *const nomem = get_irg_no_mem(irg);
161 ir_node *const op = get_irn_n(node, n_ia32_Cmp_left);
162 int const ins_permuted = get_ia32_attr(node)->data.ins_permuted;
164 ir_mode *const ls_mode = get_ia32_ls_mode(node);
165 ir_node *const test = get_mode_size_bits(ls_mode) == 8
166 ? new_bd_ia32_Test_8bit(dbgi, block, noreg, noreg, nomem, op, op, ins_permuted)
167 : new_bd_ia32_Test (dbgi, block, noreg, noreg, nomem, op, op, ins_permuted);
168 set_ia32_ls_mode(test, ls_mode);
170 arch_register_t const *const reg = arch_get_irn_register_out(node, pn_ia32_Cmp_eflags);
171 arch_set_irn_register_out(test, pn_ia32_Test_eflags, reg);
173 foreach_out_edge_safe(node, edge) {
174 ir_node *const user = get_edge_src_irn(edge);
177 exchange(user, test);
180 sched_add_before(node, test);
181 copy_mark(node, test);
182 be_peephole_exchange(node, test);
186 * Peephole optimization for Test instructions.
187 * - Remove the Test, if an appropriate flag was produced which is still live
188 * - Change a Test(x, c) to 8Bit, if 0 <= c < 256 (3 byte shorter opcode)
190 static void peephole_ia32_Test(ir_node *node)
192 ir_node *left = get_irn_n(node, n_ia32_Test_left);
193 ir_node *right = get_irn_n(node, n_ia32_Test_right);
195 if (left == right) { /* we need a test for 0 */
196 ir_node *block = get_nodes_block(node);
197 int pn = pn_ia32_res;
203 produces_flag_t produced;
205 if (get_nodes_block(left) != block)
209 pn = get_Proj_proj(op);
210 op = get_Proj_pred(op);
213 /* walk schedule up and abort when we find left or some other node
214 * destroys the flags */
217 schedpoint = sched_prev(schedpoint);
218 if (schedpoint == op)
220 if (arch_irn_is(schedpoint, modify_flags))
222 if (schedpoint == block)
223 panic("couldn't find left");
226 produced = check_produces_zero_sign(op, pn);
227 if (produced == produces_no_flag)
230 /* make sure users only look at the sign/zero flag */
231 foreach_out_edge(node, edge) {
232 ir_node *user = get_edge_src_irn(edge);
233 ia32_condition_code_t cc = get_ia32_condcode(user);
235 if (cc == ia32_cc_equal || cc == ia32_cc_not_equal)
237 if (produced == produces_zero_sign
238 && (cc == ia32_cc_sign || cc == ia32_cc_not_sign)) {
244 op_mode = get_ia32_ls_mode(op);
246 op_mode = get_irn_mode(op);
248 /* Make sure we operate on the same bit size */
249 if (get_mode_size_bits(op_mode) != get_mode_size_bits(get_ia32_ls_mode(node)))
252 if (produced == produces_zero_in_carry) {
253 /* patch users to look at the carry instead of the zero flag */
254 foreach_out_edge(node, edge) {
255 ir_node *user = get_edge_src_irn(edge);
256 ia32_condition_code_t cc = get_ia32_condcode(user);
259 case ia32_cc_equal: cc = ia32_cc_above_equal; break;
260 case ia32_cc_not_equal: cc = ia32_cc_below; break;
261 default: panic("unexpected pn");
263 set_ia32_condcode(user, cc);
267 if (get_irn_mode(op) != mode_T) {
268 set_irn_mode(op, mode_T);
270 /* If there are other users, reroute them to result proj */
271 if (get_irn_n_edges(op) != 2) {
272 ir_node *res = new_r_Proj(op, mode_Iu, pn_ia32_res);
273 edges_reroute_except(op, res, res);
276 if (get_irn_n_edges(left) == 2)
280 flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode;
281 flags_proj = new_r_Proj(op, flags_mode, pn_ia32_flags);
282 arch_set_irn_register(flags_proj, &ia32_registers[REG_EFLAGS]);
284 assert(get_irn_mode(node) != mode_T);
286 be_peephole_exchange(node, flags_proj);
287 } else if (is_ia32_Immediate(right)) {
288 ia32_immediate_attr_t const *const imm = get_ia32_immediate_attr_const(right);
291 /* A test with a symconst is rather strange, but better safe than sorry */
292 if (imm->symconst != NULL)
295 offset = imm->offset;
296 if (get_ia32_op_type(node) == ia32_AddrModeS) {
297 ia32_attr_t *const attr = get_ia32_attr(node);
298 ir_graph *const irg = get_irn_irg(node);
300 if ((offset & 0xFFFFFF00) == 0) {
301 /* attr->am_offs += 0; */
302 } else if ((offset & 0xFFFF00FF) == 0) {
303 ir_node *imm_node = ia32_create_Immediate(irg, NULL, 0, offset >> 8);
304 set_irn_n(node, n_ia32_Test_right, imm_node);
306 } else if ((offset & 0xFF00FFFF) == 0) {
307 ir_node *imm_node = ia32_create_Immediate(irg, NULL, 0, offset >> 16);
308 set_irn_n(node, n_ia32_Test_right, imm_node);
310 } else if ((offset & 0x00FFFFFF) == 0) {
311 ir_node *imm_node = ia32_create_Immediate(irg, NULL, 0, offset >> 24);
312 set_irn_n(node, n_ia32_Test_right, imm_node);
317 } else if (offset < 256) {
318 arch_register_t const* const reg = arch_get_irn_register(left);
320 if (reg != &ia32_registers[REG_EAX] &&
321 reg != &ia32_registers[REG_EBX] &&
322 reg != &ia32_registers[REG_ECX] &&
323 reg != &ia32_registers[REG_EDX]) {
330 /* Technically we should build a Test8Bit because of the register
331 * constraints, but nobody changes registers at this point anymore. */
332 set_ia32_ls_mode(node, mode_Bu);
337 * AMD Athlon works faster when RET is not destination of
338 * conditional jump or directly preceded by other jump instruction.
339 * Can be avoided by placing a Rep prefix before the return.
341 static void peephole_ia32_Return(ir_node *node)
343 if (!ia32_cg_config.use_pad_return)
346 /* check if this return is the first on the block */
347 sched_foreach_reverse_from(node, irn) {
348 switch (get_irn_opcode(irn)) {
350 /* the return node itself, ignore */
354 /* ignore no code generated */
357 /* arg, IncSP 0 nodes might occur, ignore these */
358 if (be_get_IncSP_offset(irn) == 0)
368 /* ensure, that the 3 byte return is generated */
369 be_Return_set_emit_pop(node, 1);
372 /* only optimize up to 48 stores behind IncSPs */
373 #define MAXPUSH_OPTIMIZE 48
376 * Tries to create Push's from IncSP, Store combinations.
377 * The Stores are replaced by Push's, the IncSP is modified
378 * (possibly into IncSP 0, but not removed).
380 static void peephole_IncSP_Store_to_push(ir_node *irn)
385 ir_node *stores[MAXPUSH_OPTIMIZE];
390 ir_node *first_push = NULL;
392 memset(stores, 0, sizeof(stores));
394 int inc_ofs = be_get_IncSP_offset(irn);
399 * We first walk the schedule after the IncSP node as long as we find
400 * suitable Stores that could be transformed to a Push.
401 * We save them into the stores array which is sorted by the frame offset/4
402 * attached to the node
405 for (node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
410 /* it has to be a Store */
411 if (!is_ia32_Store(node))
414 /* it has to use our sp value */
415 if (get_irn_n(node, n_ia32_base) != irn)
417 /* Store has to be attached to NoMem */
418 mem = get_irn_n(node, n_ia32_mem);
422 /* unfortunately we can't support the full AMs possible for push at the
423 * moment. TODO: fix this */
424 if (!is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
427 offset = get_ia32_am_offs_int(node);
428 /* we should NEVER access uninitialized stack BELOW the current SP */
431 /* storing at half-slots is bad */
432 if ((offset & 3) != 0)
435 if (inc_ofs - 4 < offset || offset >= MAXPUSH_OPTIMIZE * 4)
437 storeslot = offset >> 2;
439 /* storing into the same slot twice is bad (and shouldn't happen...) */
440 if (stores[storeslot] != NULL)
443 stores[storeslot] = node;
444 if (storeslot > maxslot)
450 for (i = -1; i < maxslot; ++i) {
451 if (stores[i + 1] == NULL)
455 /* walk through the Stores and create Pushs for them */
456 block = get_nodes_block(irn);
457 spmode = get_irn_mode(irn);
458 irg = get_irn_irg(irn);
459 for (; i >= 0; --i) {
460 const arch_register_t *spreg;
462 ir_node *val, *mem, *mem_proj;
463 ir_node *store = stores[i];
464 ir_node *noreg = ia32_new_NoReg_gp(irg);
466 val = get_irn_n(store, n_ia32_unary_op);
467 mem = get_irn_n(store, n_ia32_mem);
468 spreg = arch_get_irn_register(curr_sp);
470 push = new_bd_ia32_Push(get_irn_dbg_info(store), block, noreg, noreg,
472 copy_mark(store, push);
474 if (first_push == NULL)
477 sched_add_after(skip_Proj(curr_sp), push);
479 /* create stackpointer Proj */
480 curr_sp = new_r_Proj(push, spmode, pn_ia32_Push_stack);
481 arch_set_irn_register(curr_sp, spreg);
483 /* create memory Proj */
484 mem_proj = new_r_Proj(push, mode_M, pn_ia32_Push_M);
486 /* rewire Store Projs */
487 foreach_out_edge_safe(store, edge) {
488 ir_node *proj = get_edge_src_irn(edge);
491 switch (get_Proj_proj(proj)) {
492 case pn_ia32_Store_M:
493 exchange(proj, mem_proj);
496 panic("unexpected Proj on Store->IncSp");
500 /* use the memproj now */
501 be_peephole_exchange(store, push);
506 foreach_out_edge_safe(irn, edge) {
507 ir_node *const src = get_edge_src_irn(edge);
508 int const pos = get_edge_src_pos(edge);
510 if (src == first_push)
513 set_irn_n(src, pos, curr_sp);
516 be_set_IncSP_offset(irn, inc_ofs);
521 * Creates a Push instruction before the given schedule point.
523 * @param dbgi debug info
524 * @param block the block
525 * @param stack the previous stack value
526 * @param schedpoint the new node is added before this node
527 * @param reg the register to pop
529 * @return the new stack value
531 static ir_node *create_push(dbg_info *dbgi, ir_node *block,
532 ir_node *stack, ir_node *schedpoint)
534 const arch_register_t *esp = &ia32_registers[REG_ESP];
536 ir_node *val = ia32_new_NoReg_gp(cg);
537 ir_node *noreg = ia32_new_NoReg_gp(cg);
538 ir_graph *irg = get_irn_irg(block);
539 ir_node *nomem = get_irg_no_mem(irg);
540 ir_node *push = new_bd_ia32_Push(dbgi, block, noreg, noreg, nomem, val, stack);
541 sched_add_before(schedpoint, push);
543 stack = new_r_Proj(push, mode_Iu, pn_ia32_Push_stack);
544 arch_set_irn_register(stack, esp);
549 static void peephole_store_incsp(ir_node *store)
559 ir_node *am_base = get_irn_n(store, n_ia32_Store_base);
560 if (!be_is_IncSP(am_base)
561 || get_nodes_block(am_base) != get_nodes_block(store))
563 mem = get_irn_n(store, n_ia32_Store_mem);
564 if (!is_ia32_NoReg_GP(get_irn_n(store, n_ia32_Store_index))
568 int incsp_offset = be_get_IncSP_offset(am_base);
569 if (incsp_offset <= 0)
572 /* we have to be at offset 0 */
573 int my_offset = get_ia32_am_offs_int(store);
574 if (my_offset != 0) {
575 /* TODO here: find out whether there is a store with offset 0 before
576 * us and whether we can move it down to our place */
579 ir_mode *ls_mode = get_ia32_ls_mode(store);
580 int my_store_size = get_mode_size_bytes(ls_mode);
582 if (my_offset + my_store_size > incsp_offset)
585 /* correctness checking:
586 - noone else must write to that stackslot
587 (because after translation incsp won't allocate it anymore)
589 sched_foreach_reverse_from(store, node) {
595 /* make sure noone else can use the space on the stack */
596 arity = get_irn_arity(node);
597 for (i = 0; i < arity; ++i) {
598 ir_node *pred = get_irn_n(node, i);
602 if (i == n_ia32_base &&
603 (get_ia32_op_type(node) == ia32_AddrModeS
604 || get_ia32_op_type(node) == ia32_AddrModeD)) {
605 int node_offset = get_ia32_am_offs_int(node);
606 ir_mode *node_ls_mode = get_ia32_ls_mode(node);
607 int node_size = get_mode_size_bytes(node_ls_mode);
608 /* overlapping with our position? abort */
609 if (node_offset < my_offset + my_store_size
610 && node_offset + node_size >= my_offset)
612 /* otherwise it's fine */
616 /* strange use of esp: abort */
621 /* all ok, change to push */
622 dbgi = get_irn_dbg_info(store);
623 block = get_nodes_block(store);
624 noreg = ia32_new_NoReg_gp(cg);
625 val = get_irn_n(store, n_ia32_Store_val);
627 push = new_bd_ia32_Push(dbgi, block, noreg, noreg, mem,
629 create_push(dbgi, block, am_base, store);
634 * Return true if a mode can be stored in the GP register set
636 static inline int mode_needs_gp_reg(ir_mode *mode)
638 if (mode == ia32_mode_fpcw)
640 if (get_mode_size_bits(mode) > 32)
642 return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b;
646 * Tries to create Pops from Load, IncSP combinations.
647 * The Loads are replaced by Pops, the IncSP is modified
648 * (possibly into IncSP 0, but not removed).
650 static void peephole_Load_IncSP_to_pop(ir_node *irn)
652 const arch_register_t *esp = &ia32_registers[REG_ESP];
654 ir_node *node, *pred_sp, *block;
655 ir_node *loads[MAXPUSH_OPTIMIZE];
656 unsigned regmask = 0;
657 unsigned copymask = ~0;
659 memset(loads, 0, sizeof(loads));
661 int inc_ofs = -be_get_IncSP_offset(irn);
666 * We first walk the schedule before the IncSP node as long as we find
667 * suitable Loads that could be transformed to a Pop.
668 * We save them into the stores array which is sorted by the frame offset/4
669 * attached to the node
672 pred_sp = be_get_IncSP_pred(irn);
673 for (node = sched_prev(irn); !sched_is_end(node); node = sched_prev(node)) {
676 const arch_register_t *sreg, *dreg;
678 /* it has to be a Load */
679 if (!is_ia32_Load(node)) {
680 if (be_is_Copy(node)) {
681 if (!mode_needs_gp_reg(get_irn_mode(node))) {
682 /* not a GP copy, ignore */
685 dreg = arch_get_irn_register(node);
686 sreg = arch_get_irn_register(be_get_Copy_op(node));
687 if (regmask & copymask & (1 << sreg->index)) {
690 if (regmask & copymask & (1 << dreg->index)) {
693 /* we CAN skip Copies if neither the destination nor the source
694 * is not in our regmask, ie none of our future Pop will overwrite it */
695 regmask |= (1 << dreg->index) | (1 << sreg->index);
696 copymask &= ~((1 << dreg->index) | (1 << sreg->index));
702 /* we can handle only GP loads */
703 if (!mode_needs_gp_reg(get_ia32_ls_mode(node)))
706 /* it has to use our predecessor sp value */
707 if (get_irn_n(node, n_ia32_base) != pred_sp) {
708 /* it would be ok if this load does not use a Pop result,
709 * but we do not check this */
713 /* should have NO index */
714 if (!is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
717 offset = get_ia32_am_offs_int(node);
718 /* we should NEVER access uninitialized stack BELOW the current SP */
721 /* storing at half-slots is bad */
722 if ((offset & 3) != 0)
725 if (offset < 0 || offset >= MAXPUSH_OPTIMIZE * 4)
727 /* ignore those outside the possible windows */
728 if (offset > inc_ofs - 4)
730 loadslot = offset >> 2;
732 /* loading from the same slot twice is bad (and shouldn't happen...) */
733 if (loads[loadslot] != NULL)
736 dreg = arch_get_irn_register_out(node, pn_ia32_Load_res);
737 if (regmask & (1 << dreg->index)) {
738 /* this register is already used */
741 regmask |= 1 << dreg->index;
743 loads[loadslot] = node;
744 if (loadslot > maxslot)
751 /* find the first slot */
752 for (i = maxslot; i >= 0; --i) {
753 ir_node *load = loads[i];
759 ofs = inc_ofs - (maxslot + 1) * 4;
762 /* create a new IncSP if needed */
763 block = get_nodes_block(irn);
765 pred_sp = be_new_IncSP(esp, block, pred_sp, -inc_ofs, be_get_IncSP_align(irn));
766 sched_add_before(irn, pred_sp);
769 /* walk through the Loads and create Pops for them */
770 for (++i; i <= maxslot; ++i) {
771 ir_node *load = loads[i];
773 const arch_register_t *reg;
775 mem = get_irn_n(load, n_ia32_mem);
776 reg = arch_get_irn_register_out(load, pn_ia32_Load_res);
778 pop = new_bd_ia32_Pop(get_irn_dbg_info(load), block, mem, pred_sp);
779 arch_set_irn_register_out(pop, pn_ia32_Load_res, reg);
781 copy_mark(load, pop);
783 /* create stackpointer Proj */
784 pred_sp = new_r_Proj(pop, mode_Iu, pn_ia32_Pop_stack);
785 arch_set_irn_register(pred_sp, esp);
787 sched_add_before(irn, pop);
790 foreach_out_edge_safe(load, edge) {
791 ir_node *proj = get_edge_src_irn(edge);
793 set_Proj_pred(proj, pop);
796 /* we can remove the Load now */
801 be_set_IncSP_offset(irn, -ofs);
802 be_set_IncSP_pred(irn, pred_sp);
807 * Find a free GP register if possible, else return NULL.
809 static const arch_register_t *get_free_gp_reg(ir_graph *irg)
811 be_irg_t *birg = be_birg_from_irg(irg);
814 for (i = 0; i < N_ia32_gp_REGS; ++i) {
815 const arch_register_t *reg = &ia32_reg_classes[CLASS_ia32_gp].regs[i];
816 if (!rbitset_is_set(birg->allocatable_regs, reg->global_index))
819 if (be_peephole_get_value(reg->global_index) == NULL)
827 * Creates a Pop instruction before the given schedule point.
829 * @param dbgi debug info
830 * @param block the block
831 * @param stack the previous stack value
832 * @param schedpoint the new node is added before this node
833 * @param reg the register to pop
835 * @return the new stack value
837 static ir_node *create_pop(dbg_info *dbgi, ir_node *block,
838 ir_node *stack, ir_node *schedpoint,
839 const arch_register_t *reg)
841 const arch_register_t *esp = &ia32_registers[REG_ESP];
842 ir_graph *irg = get_irn_irg(block);
848 pop = new_bd_ia32_Pop(dbgi, block, get_irg_no_mem(irg), stack);
850 stack = new_r_Proj(pop, mode_Iu, pn_ia32_Pop_stack);
851 arch_set_irn_register(stack, esp);
852 val = new_r_Proj(pop, mode_Iu, pn_ia32_Pop_res);
853 arch_set_irn_register(val, reg);
855 sched_add_before(schedpoint, pop);
858 keep = be_new_Keep(block, 1, in);
859 sched_add_before(schedpoint, keep);
865 * Optimize an IncSp by replacing it with Push/Pop.
867 static void peephole_be_IncSP(ir_node *node)
869 const arch_register_t *esp = &ia32_registers[REG_ESP];
870 const arch_register_t *reg;
876 /* first optimize incsp->incsp combinations */
877 node = be_peephole_IncSP_IncSP(node);
879 /* transform IncSP->Store combinations to Push where possible */
880 peephole_IncSP_Store_to_push(node);
882 /* transform Load->IncSP combinations to Pop where possible */
883 peephole_Load_IncSP_to_pop(node);
885 if (arch_get_irn_register(node) != esp)
888 /* replace IncSP -4 by Pop freereg when possible */
889 offset = be_get_IncSP_offset(node);
890 if ((offset != -8 || ia32_cg_config.use_add_esp_8) &&
891 (offset != -4 || ia32_cg_config.use_add_esp_4) &&
892 (offset != +4 || ia32_cg_config.use_sub_esp_4) &&
893 (offset != +8 || ia32_cg_config.use_sub_esp_8))
897 /* we need a free register for pop */
898 reg = get_free_gp_reg(get_irn_irg(node));
902 dbgi = get_irn_dbg_info(node);
903 block = get_nodes_block(node);
904 stack = be_get_IncSP_pred(node);
906 stack = create_pop(dbgi, block, stack, node, reg);
909 stack = create_pop(dbgi, block, stack, node, reg);
912 dbgi = get_irn_dbg_info(node);
913 block = get_nodes_block(node);
914 stack = be_get_IncSP_pred(node);
915 stack = new_bd_ia32_PushEax(dbgi, block, stack);
916 arch_set_irn_register(stack, esp);
917 sched_add_before(node, stack);
920 stack = new_bd_ia32_PushEax(dbgi, block, stack);
921 arch_set_irn_register(stack, esp);
922 sched_add_before(node, stack);
926 be_peephole_exchange(node, stack);
930 * Peephole optimisation for ia32_Const's
932 static void peephole_ia32_Const(ir_node *node)
934 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
935 const arch_register_t *reg;
940 /* try to transform a mov 0, reg to xor reg reg */
941 if (attr->offset != 0 || attr->symconst != NULL)
943 if (ia32_cg_config.use_mov_0)
945 /* xor destroys the flags, so no-one must be using them */
946 if (be_peephole_get_value(REG_EFLAGS) != NULL)
949 reg = arch_get_irn_register(node);
950 assert(be_peephole_get_reg_value(reg) == NULL);
952 /* create xor(produceval, produceval) */
953 block = get_nodes_block(node);
954 dbgi = get_irn_dbg_info(node);
955 xorn = new_bd_ia32_Xor0(dbgi, block);
956 arch_set_irn_register(xorn, reg);
958 sched_add_before(node, xorn);
960 copy_mark(node, xorn);
961 be_peephole_exchange(node, xorn);
964 static inline int is_noreg(const ir_node *node)
966 return is_ia32_NoReg_GP(node);
969 ir_node *ia32_immediate_from_long(long val)
971 ir_graph *irg = current_ir_graph;
972 ir_node *start_block = get_irg_start_block(irg);
974 = new_bd_ia32_Immediate(NULL, start_block, NULL, 0, 0, val);
975 arch_set_irn_register(immediate, &ia32_registers[REG_GP_NOREG]);
980 static ir_node *create_immediate_from_am(const ir_node *node)
982 ir_node *block = get_nodes_block(node);
983 int offset = get_ia32_am_offs_int(node);
984 int sc_sign = is_ia32_am_sc_sign(node);
985 const ia32_attr_t *attr = get_ia32_attr_const(node);
986 int sc_no_pic_adjust = attr->data.am_sc_no_pic_adjust;
987 ir_entity *entity = get_ia32_am_sc(node);
990 res = new_bd_ia32_Immediate(NULL, block, entity, sc_sign, sc_no_pic_adjust,
992 arch_set_irn_register(res, &ia32_registers[REG_GP_NOREG]);
996 static int is_am_one(const ir_node *node)
998 int offset = get_ia32_am_offs_int(node);
999 ir_entity *entity = get_ia32_am_sc(node);
1001 return offset == 1 && entity == NULL;
1004 static int is_am_minus_one(const ir_node *node)
1006 int offset = get_ia32_am_offs_int(node);
1007 ir_entity *entity = get_ia32_am_sc(node);
1009 return offset == -1 && entity == NULL;
1013 * Transforms a LEA into an Add or SHL if possible.
1015 static void peephole_ia32_Lea(ir_node *node)
1019 const arch_register_t *base_reg;
1020 const arch_register_t *index_reg;
1021 const arch_register_t *out_reg;
1030 assert(is_ia32_Lea(node));
1032 /* we can only do this if it is allowed to clobber the flags */
1033 if (be_peephole_get_value(REG_EFLAGS) != NULL)
1036 base = get_irn_n(node, n_ia32_Lea_base);
1037 index = get_irn_n(node, n_ia32_Lea_index);
1039 if (is_noreg(base)) {
1043 base_reg = arch_get_irn_register(base);
1045 if (is_noreg(index)) {
1049 index_reg = arch_get_irn_register(index);
1052 if (base == NULL && index == NULL) {
1053 /* we shouldn't construct these in the first place... */
1054 #ifdef DEBUG_libfirm
1055 ir_fprintf(stderr, "Optimisation warning: found immediate only lea\n");
1060 out_reg = arch_get_irn_register(node);
1061 scale = get_ia32_am_scale(node);
1062 assert(!is_ia32_need_stackent(node) || get_ia32_frame_ent(node) != NULL);
1063 /* check if we have immediates values (frame entities should already be
1064 * expressed in the offsets) */
1065 if (get_ia32_am_offs_int(node) != 0 || get_ia32_am_sc(node) != NULL) {
1071 /* we can transform leas where the out register is the same as either the
1072 * base or index register back to an Add or Shl */
1073 if (out_reg == base_reg) {
1074 if (index == NULL) {
1075 #ifdef DEBUG_libfirm
1076 if (!has_immediates) {
1077 ir_fprintf(stderr, "Optimisation warning: found lea which is "
1082 goto make_add_immediate;
1084 if (scale == 0 && !has_immediates) {
1089 /* can't create an add */
1091 } else if (out_reg == index_reg) {
1093 if (has_immediates && scale == 0) {
1095 goto make_add_immediate;
1096 } else if (!has_immediates && scale > 0) {
1098 op2 = ia32_immediate_from_long(scale);
1100 } else if (!has_immediates) {
1101 #ifdef DEBUG_libfirm
1102 ir_fprintf(stderr, "Optimisation warning: found lea which is "
1106 } else if (scale == 0 && !has_immediates) {
1111 /* can't create an add */
1114 /* can't create an add */
1119 if (ia32_cg_config.use_incdec) {
1120 if (is_am_one(node)) {
1121 dbgi = get_irn_dbg_info(node);
1122 block = get_nodes_block(node);
1123 res = new_bd_ia32_Inc(dbgi, block, op1);
1124 arch_set_irn_register(res, out_reg);
1127 if (is_am_minus_one(node)) {
1128 dbgi = get_irn_dbg_info(node);
1129 block = get_nodes_block(node);
1130 res = new_bd_ia32_Dec(dbgi, block, op1);
1131 arch_set_irn_register(res, out_reg);
1135 op2 = create_immediate_from_am(node);
1138 dbgi = get_irn_dbg_info(node);
1139 block = get_nodes_block(node);
1140 ir_graph *irg = get_irn_irg(node);
1141 ir_node *noreg = ia32_new_NoReg_gp(irg);
1142 ir_node *nomem = get_irg_no_mem(irg);
1143 res = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, op1, op2);
1144 arch_set_irn_register(res, out_reg);
1145 set_ia32_commutative(res);
1149 dbgi = get_irn_dbg_info(node);
1150 block = get_nodes_block(node);
1151 res = new_bd_ia32_Shl(dbgi, block, op1, op2);
1152 arch_set_irn_register(res, out_reg);
1156 SET_IA32_ORIG_NODE(res, node);
1158 /* add new ADD/SHL to schedule */
1159 DBG_OPT_LEA2ADD(node, res);
1161 /* exchange the Add and the LEA */
1162 sched_add_before(node, res);
1163 copy_mark(node, res);
1164 be_peephole_exchange(node, res);
1168 * Split a Imul mem, imm into a Load mem and Imul reg, imm if possible.
1170 static void peephole_ia32_Imul_split(ir_node *imul)
1172 const ir_node *right = get_irn_n(imul, n_ia32_IMul_right);
1173 const arch_register_t *reg;
1176 if (!is_ia32_Immediate(right) || get_ia32_op_type(imul) != ia32_AddrModeS) {
1177 /* no memory, imm form ignore */
1180 /* we need a free register */
1181 reg = get_free_gp_reg(get_irn_irg(imul));
1185 /* fine, we can rebuild it */
1186 res = ia32_turn_back_am(imul);
1187 arch_set_irn_register(res, reg);
1191 * Replace xorps r,r and xorpd r,r by pxor r,r
1193 static void peephole_ia32_xZero(ir_node *xorn)
1195 set_irn_op(xorn, op_ia32_xPzero);
1199 * Replace 16bit sign extension from ax to eax by shorter cwtl
1201 static void peephole_ia32_Conv_I2I(ir_node *node)
1203 const arch_register_t *eax = &ia32_registers[REG_EAX];
1204 ir_mode *smaller_mode = get_ia32_ls_mode(node);
1205 ir_node *val = get_irn_n(node, n_ia32_Conv_I2I_val);
1210 if (get_mode_size_bits(smaller_mode) != 16 ||
1211 !mode_is_signed(smaller_mode) ||
1212 eax != arch_get_irn_register(val) ||
1213 eax != arch_get_irn_register_out(node, pn_ia32_Conv_I2I_res))
1216 dbgi = get_irn_dbg_info(node);
1217 block = get_nodes_block(node);
1218 cwtl = new_bd_ia32_Cwtl(dbgi, block, val);
1219 arch_set_irn_register(cwtl, eax);
1220 sched_add_before(node, cwtl);
1221 be_peephole_exchange(node, cwtl);
1225 * Register a peephole optimisation function.
1227 static void register_peephole_optimisation(ir_op *op, peephole_opt_func func)
1229 assert(op->ops.generic == NULL);
1230 op->ops.generic = (op_func)func;
1233 /* Perform peephole-optimizations. */
1234 void ia32_peephole_optimization(ir_graph *irg)
1236 /* we currently do it in 2 passes because:
1237 * Lea -> Add could be usefull as flag producer for Test later
1241 ir_clear_opcodes_generic_func();
1242 register_peephole_optimisation(op_ia32_Cmp, peephole_ia32_Cmp);
1243 register_peephole_optimisation(op_ia32_Lea, peephole_ia32_Lea);
1244 if (ia32_cg_config.use_short_sex_eax)
1245 register_peephole_optimisation(op_ia32_Conv_I2I, peephole_ia32_Conv_I2I);
1246 if (ia32_cg_config.use_pxor)
1247 register_peephole_optimisation(op_ia32_xZero, peephole_ia32_xZero);
1248 if (! ia32_cg_config.use_imul_mem_imm32)
1249 register_peephole_optimisation(op_ia32_IMul, peephole_ia32_Imul_split);
1250 be_peephole_opt(irg);
1253 ir_clear_opcodes_generic_func();
1254 register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
1255 register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
1256 register_peephole_optimisation(op_ia32_Test, peephole_ia32_Test);
1257 register_peephole_optimisation(op_be_Return, peephole_ia32_Return);
1258 be_peephole_opt(irg);
1262 * Removes node from schedule if it is not used anymore. If irn is a mode_T node
1263 * all its Projs are removed as well.
1264 * @param irn The irn to be removed from schedule
1266 static inline void try_kill(ir_node *node)
1268 if (get_irn_mode(node) == mode_T) {
1269 foreach_out_edge_safe(node, edge) {
1270 ir_node *proj = get_edge_src_irn(edge);
1275 if (get_irn_n_edges(node) != 0)
1278 if (sched_is_scheduled(node)) {
1285 static void optimize_conv_store(ir_node *node)
1290 ir_mode *store_mode;
1292 if (!is_ia32_Store(node))
1295 pred_proj = get_irn_n(node, n_ia32_Store_val);
1296 if (is_Proj(pred_proj)) {
1297 pred = get_Proj_pred(pred_proj);
1301 if (!is_ia32_Conv_I2I(pred))
1303 if (get_ia32_op_type(pred) != ia32_Normal)
1306 /* the store only stores the lower bits, so we only need the conv
1307 * it it shrinks the mode */
1308 conv_mode = get_ia32_ls_mode(pred);
1309 store_mode = get_ia32_ls_mode(node);
1310 if (get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode))
1313 ir_fprintf(stderr, "Optimisation warning: unoptimized ia32 Store(Conv) (%+F, %+F)\n", node, pred);
1314 set_irn_n(node, n_ia32_Store_val, get_irn_n(pred, n_ia32_Conv_I2I_val));
1315 if (get_irn_n_edges(pred_proj) == 0) {
1316 kill_node(pred_proj);
1317 if (pred != pred_proj)
1322 static void optimize_load_conv(ir_node *node)
1324 ir_node *pred, *predpred;
1328 if (!is_ia32_Conv_I2I(node))
1331 pred = get_irn_n(node, n_ia32_Conv_I2I_val);
1335 predpred = get_Proj_pred(pred);
1336 if (!is_ia32_Load(predpred))
1339 /* the load is sign extending the upper bits, so we only need the conv
1340 * if it shrinks the mode */
1341 load_mode = get_ia32_ls_mode(predpred);
1342 conv_mode = get_ia32_ls_mode(node);
1343 if (get_mode_size_bits(conv_mode) < get_mode_size_bits(load_mode))
1346 if (get_mode_sign(conv_mode) != get_mode_sign(load_mode)) {
1347 /* change the load if it has only 1 user */
1348 if (get_irn_n_edges(pred) == 1) {
1350 if (get_mode_sign(conv_mode)) {
1351 newmode = find_signed_mode(load_mode);
1353 newmode = find_unsigned_mode(load_mode);
1355 assert(newmode != NULL);
1356 set_ia32_ls_mode(predpred, newmode);
1358 /* otherwise we have to keep the conv */
1364 ir_fprintf(stderr, "Optimisation warning: unoptimized ia32 Conv(Load) (%+F, %+F)\n", node, predpred);
1365 exchange(node, pred);
1368 static void optimize_conv_conv(ir_node *node)
1370 ir_node *pred_proj, *pred, *result_conv;
1371 ir_mode *pred_mode, *conv_mode;
1375 if (!is_ia32_Conv_I2I(node))
1378 pred_proj = get_irn_n(node, n_ia32_Conv_I2I_val);
1379 if (is_Proj(pred_proj))
1380 pred = get_Proj_pred(pred_proj);
1384 if (!is_ia32_Conv_I2I(pred))
1387 /* we know that after a conv, the upper bits are sign extended
1388 * so we only need the 2nd conv if it shrinks the mode */
1389 conv_mode = get_ia32_ls_mode(node);
1390 conv_mode_bits = get_mode_size_bits(conv_mode);
1391 pred_mode = get_ia32_ls_mode(pred);
1392 pred_mode_bits = get_mode_size_bits(pred_mode);
1394 if (conv_mode_bits == pred_mode_bits
1395 && get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
1396 result_conv = pred_proj;
1397 } else if (conv_mode_bits <= pred_mode_bits) {
1398 /* if 2nd conv is smaller then first conv, then we can always take the
1400 if (get_irn_n_edges(pred_proj) == 1) {
1401 result_conv = pred_proj;
1402 set_ia32_ls_mode(pred, conv_mode);
1404 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
1405 if (get_mode_size_bits(conv_mode) == 8) {
1406 const arch_register_req_t **reqs = arch_get_irn_register_reqs_in(node);
1407 set_irn_op(pred, op_ia32_Conv_I2I);
1408 arch_set_irn_register_reqs_in(pred, reqs);
1411 /* we don't want to end up with 2 loads, so we better do nothing */
1412 if (get_irn_mode(pred) == mode_T) {
1416 result_conv = exact_copy(pred);
1417 set_ia32_ls_mode(result_conv, conv_mode);
1419 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
1420 if (get_mode_size_bits(conv_mode) == 8) {
1421 const arch_register_req_t **reqs = arch_get_irn_register_reqs_in(node);
1422 set_irn_op(result_conv, op_ia32_Conv_I2I);
1423 arch_set_irn_register_reqs_in(result_conv, reqs);
1427 /* if both convs have the same sign, then we can take the smaller one */
1428 if (get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
1429 result_conv = pred_proj;
1431 /* no optimisation possible if smaller conv is sign-extend */
1432 if (mode_is_signed(pred_mode)) {
1435 /* we can take the smaller conv if it is unsigned */
1436 result_conv = pred_proj;
1440 ir_fprintf(stderr, "Optimisation warning: unoptimized ia32 Conv(Conv) (%+F, %+F)\n", node, pred);
1441 /* Some user (like Phis) won't be happy if we change the mode. */
1442 set_irn_mode(result_conv, get_irn_mode(node));
1445 exchange(node, result_conv);
1447 if (get_irn_n_edges(pred_proj) == 0) {
1448 kill_node(pred_proj);
1449 if (pred != pred_proj)
1452 optimize_conv_conv(result_conv);
1455 static void optimize_node(ir_node *node, void *env)
1459 optimize_load_conv(node);
1460 optimize_conv_store(node);
1461 optimize_conv_conv(node);
1465 * Performs conv and address mode optimization.
1467 void ia32_optimize_graph(ir_graph *irg)
1469 irg_walk_blkwise_graph(irg, NULL, optimize_node, NULL);
1472 void ia32_init_optimize(void)
1474 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize");