2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Implements several optimizations for IA32.
23 * @author Matthias Braun, Christian Wuerdig
34 #include "firm_types.h"
46 #include "../benode_t.h"
47 #include "../besched_t.h"
48 #include "../bepeephole.h"
50 #include "ia32_new_nodes.h"
51 #include "ia32_optimize.h"
52 #include "bearch_ia32_t.h"
53 #include "gen_ia32_regalloc_if.h"
54 #include "ia32_common_transform.h"
55 #include "ia32_transform.h"
56 #include "ia32_dbg_stat.h"
57 #include "ia32_util.h"
58 #include "ia32_architecture.h"
60 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
62 static ia32_code_gen_t *cg;
64 static void copy_mark(const ir_node *old, ir_node *new)
66 if (is_ia32_is_reload(old))
67 set_ia32_is_reload(new);
68 if (is_ia32_is_spill(old))
69 set_ia32_is_spill(new);
70 if (is_ia32_is_remat(old))
71 set_ia32_is_remat(new);
74 typedef enum produces_flag_t {
81 * Return which usable flag the given node produces
83 * @param node the node to check
84 * @param pn the projection number of the used result
86 static produces_flag_t produces_test_flag(ir_node *node, int pn)
89 const ia32_immediate_attr_t *imm_attr;
91 if (!is_ia32_irn(node))
92 return produces_no_flag;
94 switch (get_ia32_irn_opcode(node)) {
109 assert(n_ia32_ShlD_count == n_ia32_ShrD_count);
110 count = get_irn_n(node, n_ia32_ShlD_count);
111 goto check_shift_amount;
116 assert(n_ia32_Shl_count == n_ia32_Shr_count
117 && n_ia32_Shl_count == n_ia32_Sar_count);
118 count = get_irn_n(node, n_ia32_Shl_count);
120 /* when shift count is zero the flags are not affected, so we can only
121 * do this for constants != 0 */
122 if (!is_ia32_Immediate(count))
123 return produces_no_flag;
125 imm_attr = get_ia32_immediate_attr_const(count);
126 if (imm_attr->symconst != NULL)
127 return produces_no_flag;
128 if ((imm_attr->offset & 0x1f) == 0)
129 return produces_no_flag;
133 return pn == pn_ia32_Mul_res_high ?
134 produces_flag_carry : produces_no_flag;
137 return produces_no_flag;
140 return pn == pn_ia32_res ?
141 produces_flag_zero : produces_no_flag;
145 * If the given node has not mode_T, creates a mode_T version (with a result Proj).
147 * @param node the node to change
149 * @return the new mode_T node (if the mode was changed) or node itself
151 static ir_node *turn_into_mode_t(ir_node *node)
156 const arch_register_t *reg;
158 if(get_irn_mode(node) == mode_T)
161 assert(get_irn_mode(node) == mode_Iu);
163 new_node = exact_copy(node);
164 set_irn_mode(new_node, mode_T);
166 block = get_nodes_block(new_node);
167 res_proj = new_r_Proj(current_ir_graph, block, new_node, mode_Iu,
170 reg = arch_get_irn_register(node);
171 arch_set_irn_register(res_proj, reg);
173 sched_add_before(node, new_node);
174 be_peephole_exchange(node, res_proj);
179 * Replace Cmp(x, 0) by a Test(x, x)
181 static void peephole_ia32_Cmp(ir_node *const node)
184 ia32_immediate_attr_t const *imm;
191 ia32_attr_t const *attr;
195 arch_register_t const *reg;
196 ir_edge_t const *edge;
197 ir_edge_t const *tmp;
199 if (get_ia32_op_type(node) != ia32_Normal)
202 right = get_irn_n(node, n_ia32_Cmp_right);
203 if (!is_ia32_Immediate(right))
206 imm = get_ia32_immediate_attr_const(right);
207 if (imm->symconst != NULL || imm->offset != 0)
210 dbgi = get_irn_dbg_info(node);
211 irg = current_ir_graph;
212 block = get_nodes_block(node);
213 noreg = ia32_new_NoReg_gp(cg);
214 nomem = get_irg_no_mem(irg);
215 op = get_irn_n(node, n_ia32_Cmp_left);
216 attr = get_irn_generic_attr(node);
217 ins_permuted = attr->data.ins_permuted;
218 cmp_unsigned = attr->data.cmp_unsigned;
220 if (is_ia32_Cmp(node)) {
221 test = new_rd_ia32_Test(dbgi, irg, block, noreg, noreg, nomem,
222 op, op, ins_permuted, cmp_unsigned);
224 test = new_rd_ia32_Test8Bit(dbgi, irg, block, noreg, noreg, nomem,
225 op, op, ins_permuted, cmp_unsigned);
227 set_ia32_ls_mode(test, get_ia32_ls_mode(node));
229 reg = arch_get_irn_register(node);
230 arch_set_irn_register(test, reg);
232 foreach_out_edge_safe(node, edge, tmp) {
233 ir_node *const user = get_edge_src_irn(edge);
236 exchange(user, test);
239 sched_add_before(node, test);
240 copy_mark(node, test);
241 be_peephole_exchange(node, test);
245 * Peephole optimization for Test instructions.
246 * We can remove the Test, if a zero flags was produced which is still
249 static void peephole_ia32_Test(ir_node *node)
251 ir_node *left = get_irn_n(node, n_ia32_Test_left);
252 ir_node *right = get_irn_n(node, n_ia32_Test_right);
256 int pn = pn_ia32_res;
258 const ir_edge_t *edge;
260 assert(n_ia32_Test_left == n_ia32_Test8Bit_left
261 && n_ia32_Test_right == n_ia32_Test8Bit_right);
263 /* we need a test for 0 */
267 block = get_nodes_block(node);
268 if(get_nodes_block(left) != block)
272 pn = get_Proj_proj(left);
273 left = get_Proj_pred(left);
276 /* happens rarely, but if it does code will panic' */
277 if (is_ia32_Unknown_GP(left))
280 /* walk schedule up and abort when we find left or some other node destroys
284 schedpoint = sched_prev(schedpoint);
285 if (schedpoint == left)
287 if (arch_irn_is(cg->arch_env, schedpoint, modify_flags))
289 if (schedpoint == block)
290 panic("couldn't find left");
293 /* make sure only Lg/Eq tests are used */
294 foreach_out_edge(node, edge) {
295 ir_node *user = get_edge_src_irn(edge);
296 int pnc = get_ia32_condcode(user);
298 if(pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) {
303 switch (produces_test_flag(left, pn)) {
304 case produces_flag_zero:
307 case produces_flag_carry:
308 foreach_out_edge(node, edge) {
309 ir_node *user = get_edge_src_irn(edge);
310 int pnc = get_ia32_condcode(user);
313 case pn_Cmp_Eq: pnc = pn_Cmp_Ge | ia32_pn_Cmp_unsigned; break;
314 case pn_Cmp_Lg: pnc = pn_Cmp_Lt | ia32_pn_Cmp_unsigned; break;
315 default: panic("unexpected pn");
317 set_ia32_condcode(user, pnc);
325 left = turn_into_mode_t(left);
327 flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode;
328 flags_proj = new_r_Proj(current_ir_graph, block, left, flags_mode,
330 arch_set_irn_register(flags_proj, &ia32_flags_regs[REG_EFLAGS]);
332 assert(get_irn_mode(node) != mode_T);
334 be_peephole_exchange(node, flags_proj);
338 * AMD Athlon works faster when RET is not destination of
339 * conditional jump or directly preceded by other jump instruction.
340 * Can be avoided by placing a Rep prefix before the return.
342 static void peephole_ia32_Return(ir_node *node) {
343 ir_node *block, *irn;
345 if (!ia32_cg_config.use_pad_return)
348 block = get_nodes_block(node);
350 /* check if this return is the first on the block */
351 sched_foreach_reverse_from(node, irn) {
352 switch (get_irn_opcode(irn)) {
354 /* the return node itself, ignore */
359 /* ignore no code generated */
362 /* arg, IncSP 0 nodes might occur, ignore these */
363 if (be_get_IncSP_offset(irn) == 0)
373 /* ensure, that the 3 byte return is generated */
374 be_Return_set_emit_pop(node, 1);
377 /* only optimize up to 48 stores behind IncSPs */
378 #define MAXPUSH_OPTIMIZE 48
381 * Tries to create Push's from IncSP, Store combinations.
382 * The Stores are replaced by Push's, the IncSP is modified
383 * (possibly into IncSP 0, but not removed).
385 static void peephole_IncSP_Store_to_push(ir_node *irn)
391 ir_node *stores[MAXPUSH_OPTIMIZE];
396 ir_node *first_push = NULL;
397 ir_edge_t const *edge;
398 ir_edge_t const *next;
400 memset(stores, 0, sizeof(stores));
402 assert(be_is_IncSP(irn));
404 inc_ofs = be_get_IncSP_offset(irn);
409 * We first walk the schedule after the IncSP node as long as we find
410 * suitable Stores that could be transformed to a Push.
411 * We save them into the stores array which is sorted by the frame offset/4
412 * attached to the node
415 for (node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
420 /* it has to be a Store */
421 if (!is_ia32_Store(node))
424 /* it has to use our sp value */
425 if (get_irn_n(node, n_ia32_base) != irn)
427 /* Store has to be attached to NoMem */
428 mem = get_irn_n(node, n_ia32_mem);
432 /* unfortunately we can't support the full AMs possible for push at the
433 * moment. TODO: fix this */
434 if (!is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
437 offset = get_ia32_am_offs_int(node);
438 /* we should NEVER access uninitialized stack BELOW the current SP */
441 /* storing at half-slots is bad */
442 if ((offset & 3) != 0)
445 if (inc_ofs - 4 < offset || offset >= MAXPUSH_OPTIMIZE * 4)
447 storeslot = offset >> 2;
449 /* storing into the same slot twice is bad (and shouldn't happen...) */
450 if (stores[storeslot] != NULL)
453 stores[storeslot] = node;
454 if (storeslot > maxslot)
460 for (i = -1; i < maxslot; ++i) {
461 if (stores[i + 1] == NULL)
465 /* walk through the Stores and create Pushs for them */
466 block = get_nodes_block(irn);
467 spmode = get_irn_mode(irn);
469 for (; i >= 0; --i) {
470 const arch_register_t *spreg;
472 ir_node *val, *mem, *mem_proj;
473 ir_node *store = stores[i];
474 ir_node *noreg = ia32_new_NoReg_gp(cg);
476 val = get_irn_n(store, n_ia32_unary_op);
477 mem = get_irn_n(store, n_ia32_mem);
478 spreg = arch_get_irn_register(curr_sp);
480 push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, mem, val, curr_sp);
481 copy_mark(store, push);
483 if (first_push == NULL)
486 sched_add_after(curr_sp, push);
488 /* create stackpointer Proj */
489 curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack);
490 arch_set_irn_register(curr_sp, spreg);
492 /* create memory Proj */
493 mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
495 /* use the memproj now */
496 be_peephole_exchange(store, mem_proj);
501 foreach_out_edge_safe(irn, edge, next) {
502 ir_node *const src = get_edge_src_irn(edge);
503 int const pos = get_edge_src_pos(edge);
505 if (src == first_push)
508 set_irn_n(src, pos, curr_sp);
511 be_set_IncSP_offset(irn, inc_ofs);
515 static void peephole_store_incsp(ir_node *store)
524 ir_node *am_base = get_irn_n(store, n_ia32_Store_base);
525 if (!be_is_IncSP(am_base)
526 || get_nodes_block(am_base) != get_nodes_block(store))
528 mem = get_irn_n(store, n_ia32_Store_mem);
529 if (!is_ia32_NoReg_GP(get_irn_n(store, n_ia32_Store_index))
533 int incsp_offset = be_get_IncSP_offset(am_base);
534 if (incsp_offset <= 0)
537 /* we have to be at offset 0 */
538 int my_offset = get_ia32_am_offs_int(store);
539 if (my_offset != 0) {
540 /* TODO here: find out wether there is a store with offset 0 before
541 * us and wether we can move it down to our place */
544 ir_mode *ls_mode = get_ia32_ls_mode(store);
545 int my_store_size = get_mode_size_bytes(ls_mode);
547 if (my_offset + my_store_size > incsp_offset)
550 /* correctness checking:
551 - noone else must write to that stackslot
552 (because after translation incsp won't allocate it anymore)
554 sched_foreach_reverse_from(store, node) {
560 /* make sure noone else can use the space on the stack */
561 arity = get_irn_arity(node);
562 for (i = 0; i < arity; ++i) {
563 ir_node *pred = get_irn_n(node, i);
567 if (i == n_ia32_base &&
568 (get_ia32_op_type(node) == ia32_AddrModeS
569 || get_ia32_op_type(node) == ia32_AddrModeD)) {
570 int node_offset = get_ia32_am_offs_int(node);
571 ir_mode *node_ls_mode = get_ia32_ls_mode(node);
572 int node_size = get_mode_size_bytes(node);
573 /* overlapping with our position? abort */
574 if (node_offset < my_offset + my_store_size
575 && node_offset + node_size >= my_offset)
577 /* otherwise it's fine */
581 /* strange use of esp: abort */
586 /* all ok, change to push */
587 dbgi = get_irn_dbg_info(store);
588 block = get_nodes_block(store);
589 noreg = ia32_new_NoReg_gp(cg);
592 push = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, mem,
594 create_push(dbgi, current_ir_graph, block, am_base, store);
599 * Return true if a mode can be stored in the GP register set
601 static INLINE int mode_needs_gp_reg(ir_mode *mode) {
602 if (mode == mode_fpcw)
604 if (get_mode_size_bits(mode) > 32)
606 return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b;
610 * Tries to create Pops from Load, IncSP combinations.
611 * The Loads are replaced by Pops, the IncSP is modified
612 * (possibly into IncSP 0, but not removed).
614 static void peephole_Load_IncSP_to_pop(ir_node *irn)
616 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
617 int i, maxslot, inc_ofs, ofs;
618 ir_node *node, *pred_sp, *block;
619 ir_node *loads[MAXPUSH_OPTIMIZE];
621 unsigned regmask = 0;
622 unsigned copymask = ~0;
624 memset(loads, 0, sizeof(loads));
625 assert(be_is_IncSP(irn));
627 inc_ofs = -be_get_IncSP_offset(irn);
632 * We first walk the schedule before the IncSP node as long as we find
633 * suitable Loads that could be transformed to a Pop.
634 * We save them into the stores array which is sorted by the frame offset/4
635 * attached to the node
638 pred_sp = be_get_IncSP_pred(irn);
639 for (node = sched_prev(irn); !sched_is_end(node); node = sched_prev(node)) {
642 const arch_register_t *sreg, *dreg;
644 /* it has to be a Load */
645 if (!is_ia32_Load(node)) {
646 if (be_is_Copy(node)) {
647 if (!mode_needs_gp_reg(get_irn_mode(node))) {
648 /* not a GP copy, ignore */
651 dreg = arch_get_irn_register(node);
652 sreg = arch_get_irn_register(be_get_Copy_op(node));
653 if (regmask & copymask & (1 << sreg->index)) {
656 if (regmask & copymask & (1 << dreg->index)) {
659 /* we CAN skip Copies if neither the destination nor the source
660 * is not in our regmask, ie none of our future Pop will overwrite it */
661 regmask |= (1 << dreg->index) | (1 << sreg->index);
662 copymask &= ~((1 << dreg->index) | (1 << sreg->index));
668 /* we can handle only GP loads */
669 if (!mode_needs_gp_reg(get_ia32_ls_mode(node)))
672 /* it has to use our predecessor sp value */
673 if (get_irn_n(node, n_ia32_base) != pred_sp) {
674 /* it would be ok if this load does not use a Pop result,
675 * but we do not check this */
679 /* should have NO index */
680 if (!is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
683 offset = get_ia32_am_offs_int(node);
684 /* we should NEVER access uninitialized stack BELOW the current SP */
687 /* storing at half-slots is bad */
688 if ((offset & 3) != 0)
691 if (offset < 0 || offset >= MAXPUSH_OPTIMIZE * 4)
693 /* ignore those outside the possible windows */
694 if (offset > inc_ofs - 4)
696 loadslot = offset >> 2;
698 /* loading from the same slot twice is bad (and shouldn't happen...) */
699 if (loads[loadslot] != NULL)
702 dreg = arch_get_irn_register(node);
703 if (regmask & (1 << dreg->index)) {
704 /* this register is already used */
707 regmask |= 1 << dreg->index;
709 loads[loadslot] = node;
710 if (loadslot > maxslot)
717 /* find the first slot */
718 for (i = maxslot; i >= 0; --i) {
719 ir_node *load = loads[i];
725 ofs = inc_ofs - (maxslot + 1) * 4;
728 /* create a new IncSP if needed */
729 block = get_nodes_block(irn);
732 pred_sp = be_new_IncSP(esp, irg, block, pred_sp, -inc_ofs, be_get_IncSP_align(irn));
733 sched_add_before(irn, pred_sp);
736 /* walk through the Loads and create Pops for them */
737 for (++i; i <= maxslot; ++i) {
738 ir_node *load = loads[i];
740 const ir_edge_t *edge, *tmp;
741 const arch_register_t *reg;
743 mem = get_irn_n(load, n_ia32_mem);
744 reg = arch_get_irn_register(load);
746 pop = new_rd_ia32_Pop(get_irn_dbg_info(load), irg, block, mem, pred_sp);
747 arch_set_irn_register(pop, reg);
749 copy_mark(load, pop);
751 /* create stackpointer Proj */
752 pred_sp = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack);
753 arch_set_irn_register(pred_sp, esp);
755 sched_add_before(irn, pop);
758 foreach_out_edge_safe(load, edge, tmp) {
759 ir_node *proj = get_edge_src_irn(edge);
761 set_Proj_pred(proj, pop);
764 /* we can remove the Load now */
769 be_set_IncSP_offset(irn, -ofs);
770 be_set_IncSP_pred(irn, pred_sp);
775 * Find a free GP register if possible, else return NULL.
777 static const arch_register_t *get_free_gp_reg(void)
781 for(i = 0; i < N_ia32_gp_REGS; ++i) {
782 const arch_register_t *reg = &ia32_gp_regs[i];
783 if(arch_register_type_is(reg, ignore))
786 if(be_peephole_get_value(CLASS_ia32_gp, i) == NULL)
787 return &ia32_gp_regs[i];
794 * Creates a Pop instruction before the given schedule point.
796 * @param dbgi debug info
797 * @param irg the graph
798 * @param block the block
799 * @param stack the previous stack value
800 * @param schedpoint the new node is added before this node
801 * @param reg the register to pop
803 * @return the new stack value
805 static ir_node *create_pop(dbg_info *dbgi, ir_graph *irg, ir_node *block,
806 ir_node *stack, ir_node *schedpoint,
807 const arch_register_t *reg)
809 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
815 pop = new_rd_ia32_Pop(dbgi, irg, block, new_NoMem(), stack);
817 stack = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack);
818 arch_set_irn_register(stack, esp);
819 val = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_res);
820 arch_set_irn_register(val, reg);
822 sched_add_before(schedpoint, pop);
825 keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
826 sched_add_before(schedpoint, keep);
832 * Creates a Push instruction before the given schedule point.
834 * @param dbgi debug info
835 * @param irg the graph
836 * @param block the block
837 * @param stack the previous stack value
838 * @param schedpoint the new node is added before this node
839 * @param reg the register to pop
841 * @return the new stack value
843 static ir_node *create_push(dbg_info *dbgi, ir_graph *irg, ir_node *block,
844 ir_node *stack, ir_node *schedpoint)
846 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
848 ir_node *val = ia32_new_Unknown_gp(cg);
849 ir_node *noreg = ia32_new_NoReg_gp(cg);
850 ir_node *nomem = get_irg_no_mem(irg);
851 ir_node *push = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, nomem, val, stack);
852 sched_add_before(schedpoint, push);
854 stack = new_r_Proj(irg, block, push, mode_Iu, pn_ia32_Push_stack);
855 arch_set_irn_register(stack, esp);
861 * Optimize an IncSp by replacing it with Push/Pop.
863 static void peephole_be_IncSP(ir_node *node)
865 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
866 const arch_register_t *reg;
867 ir_graph *irg = current_ir_graph;
873 /* first optimize incsp->incsp combinations */
874 node = be_peephole_IncSP_IncSP(node);
876 /* transform IncSP->Store combinations to Push where possible */
877 peephole_IncSP_Store_to_push(node);
879 /* transform Load->IncSP combinations to Pop where possible */
880 peephole_Load_IncSP_to_pop(node);
882 if (arch_get_irn_register(node) != esp)
885 /* replace IncSP -4 by Pop freereg when possible */
886 offset = be_get_IncSP_offset(node);
887 if ((offset != -8 || ia32_cg_config.use_add_esp_8) &&
888 (offset != -4 || ia32_cg_config.use_add_esp_4) &&
889 (offset != +4 || ia32_cg_config.use_sub_esp_4) &&
890 (offset != +8 || ia32_cg_config.use_sub_esp_8))
894 /* we need a free register for pop */
895 reg = get_free_gp_reg();
899 dbgi = get_irn_dbg_info(node);
900 block = get_nodes_block(node);
901 stack = be_get_IncSP_pred(node);
903 stack = create_pop(dbgi, irg, block, stack, node, reg);
906 stack = create_pop(dbgi, irg, block, stack, node, reg);
909 dbgi = get_irn_dbg_info(node);
910 block = get_nodes_block(node);
911 stack = be_get_IncSP_pred(node);
912 stack = create_push(dbgi, irg, block, stack, node);
915 stack = create_push(dbgi, irg, block, stack, node);
919 be_peephole_exchange(node, stack);
923 * Peephole optimisation for ia32_Const's
925 static void peephole_ia32_Const(ir_node *node)
927 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
928 const arch_register_t *reg;
929 ir_graph *irg = current_ir_graph;
936 /* try to transform a mov 0, reg to xor reg reg */
937 if (attr->offset != 0 || attr->symconst != NULL)
939 if (ia32_cg_config.use_mov_0)
941 /* xor destroys the flags, so no-one must be using them */
942 if (be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
945 reg = arch_get_irn_register(node);
946 assert(be_peephole_get_reg_value(reg) == NULL);
948 /* create xor(produceval, produceval) */
949 block = get_nodes_block(node);
950 dbgi = get_irn_dbg_info(node);
951 produceval = new_rd_ia32_ProduceVal(dbgi, irg, block);
952 arch_set_irn_register(produceval, reg);
954 noreg = ia32_new_NoReg_gp(cg);
955 xor = new_rd_ia32_Xor(dbgi, irg, block, noreg, noreg, new_NoMem(),
956 produceval, produceval);
957 arch_set_irn_register(xor, reg);
959 sched_add_before(node, produceval);
960 sched_add_before(node, xor);
962 copy_mark(node, xor);
963 be_peephole_exchange(node, xor);
966 static INLINE int is_noreg(ia32_code_gen_t *cg, const ir_node *node)
968 return node == cg->noreg_gp;
971 static ir_node *create_immediate_from_int(int val)
973 ir_graph *irg = current_ir_graph;
974 ir_node *start_block = get_irg_start_block(irg);
975 ir_node *immediate = new_rd_ia32_Immediate(NULL, irg, start_block, NULL,
977 arch_set_irn_register(immediate, &ia32_gp_regs[REG_GP_NOREG]);
982 static ir_node *create_immediate_from_am(const ir_node *node)
984 ir_graph *irg = get_irn_irg(node);
985 ir_node *block = get_nodes_block(node);
986 int offset = get_ia32_am_offs_int(node);
987 int sc_sign = is_ia32_am_sc_sign(node);
988 ir_entity *entity = get_ia32_am_sc(node);
991 res = new_rd_ia32_Immediate(NULL, irg, block, entity, sc_sign, offset);
992 arch_set_irn_register(res, &ia32_gp_regs[REG_GP_NOREG]);
996 static int is_am_one(const ir_node *node)
998 int offset = get_ia32_am_offs_int(node);
999 ir_entity *entity = get_ia32_am_sc(node);
1001 return offset == 1 && entity == NULL;
1004 static int is_am_minus_one(const ir_node *node)
1006 int offset = get_ia32_am_offs_int(node);
1007 ir_entity *entity = get_ia32_am_sc(node);
1009 return offset == -1 && entity == NULL;
1013 * Transforms a LEA into an Add or SHL if possible.
1015 static void peephole_ia32_Lea(ir_node *node)
1017 ir_graph *irg = current_ir_graph;
1020 const arch_register_t *base_reg;
1021 const arch_register_t *index_reg;
1022 const arch_register_t *out_reg;
1033 assert(is_ia32_Lea(node));
1035 /* we can only do this if are allowed to globber the flags */
1036 if(be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
1039 base = get_irn_n(node, n_ia32_Lea_base);
1040 index = get_irn_n(node, n_ia32_Lea_index);
1042 if(is_noreg(cg, base)) {
1046 base_reg = arch_get_irn_register(base);
1048 if(is_noreg(cg, index)) {
1052 index_reg = arch_get_irn_register(index);
1055 if(base == NULL && index == NULL) {
1056 /* we shouldn't construct these in the first place... */
1057 #ifdef DEBUG_libfirm
1058 ir_fprintf(stderr, "Optimisation warning: found immediate only lea\n");
1063 out_reg = arch_get_irn_register(node);
1064 scale = get_ia32_am_scale(node);
1065 assert(!is_ia32_need_stackent(node) || get_ia32_frame_ent(node) != NULL);
1066 /* check if we have immediates values (frame entities should already be
1067 * expressed in the offsets) */
1068 if(get_ia32_am_offs_int(node) != 0 || get_ia32_am_sc(node) != NULL) {
1074 /* we can transform leas where the out register is the same as either the
1075 * base or index register back to an Add or Shl */
1076 if(out_reg == base_reg) {
1078 #ifdef DEBUG_libfirm
1079 if(!has_immediates) {
1080 ir_fprintf(stderr, "Optimisation warning: found lea which is "
1085 goto make_add_immediate;
1087 if(scale == 0 && !has_immediates) {
1092 /* can't create an add */
1094 } else if(out_reg == index_reg) {
1096 if(has_immediates && scale == 0) {
1098 goto make_add_immediate;
1099 } else if(!has_immediates && scale > 0) {
1101 op2 = create_immediate_from_int(scale);
1103 } else if(!has_immediates) {
1104 #ifdef DEBUG_libfirm
1105 ir_fprintf(stderr, "Optimisation warning: found lea which is "
1109 } else if(scale == 0 && !has_immediates) {
1114 /* can't create an add */
1117 /* can't create an add */
1122 if(ia32_cg_config.use_incdec) {
1123 if(is_am_one(node)) {
1124 dbgi = get_irn_dbg_info(node);
1125 block = get_nodes_block(node);
1126 res = new_rd_ia32_Inc(dbgi, irg, block, op1);
1127 arch_set_irn_register(res, out_reg);
1130 if(is_am_minus_one(node)) {
1131 dbgi = get_irn_dbg_info(node);
1132 block = get_nodes_block(node);
1133 res = new_rd_ia32_Dec(dbgi, irg, block, op1);
1134 arch_set_irn_register(res, out_reg);
1138 op2 = create_immediate_from_am(node);
1141 dbgi = get_irn_dbg_info(node);
1142 block = get_nodes_block(node);
1143 noreg = ia32_new_NoReg_gp(cg);
1144 nomem = new_NoMem();
1145 res = new_rd_ia32_Add(dbgi, irg, block, noreg, noreg, nomem, op1, op2);
1146 arch_set_irn_register(res, out_reg);
1147 set_ia32_commutative(res);
1151 dbgi = get_irn_dbg_info(node);
1152 block = get_nodes_block(node);
1153 noreg = ia32_new_NoReg_gp(cg);
1154 nomem = new_NoMem();
1155 res = new_rd_ia32_Shl(dbgi, irg, block, op1, op2);
1156 arch_set_irn_register(res, out_reg);
1160 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, node));
1162 /* add new ADD/SHL to schedule */
1163 DBG_OPT_LEA2ADD(node, res);
1165 /* exchange the Add and the LEA */
1166 sched_add_before(node, res);
1167 copy_mark(node, res);
1168 be_peephole_exchange(node, res);
1172 * Split a Imul mem, imm into a Load mem and Imul reg, imm if possible.
1174 static void peephole_ia32_Imul_split(ir_node *imul)
1176 const ir_node *right = get_irn_n(imul, n_ia32_IMul_right);
1177 const arch_register_t *reg;
1180 if (!is_ia32_Immediate(right) || get_ia32_op_type(imul) != ia32_AddrModeS) {
1181 /* no memory, imm form ignore */
1184 /* we need a free register */
1185 reg = get_free_gp_reg();
1189 /* fine, we can rebuild it */
1190 res = turn_back_am(imul);
1191 arch_set_irn_register(res, reg);
1195 * Replace xorps r,r and xorpd r,r by pxor r,r
1197 static void peephole_ia32_xZero(ir_node *xor) {
1198 set_irn_op(xor, op_ia32_xPzero);
1202 * Register a peephole optimisation function.
1204 static void register_peephole_optimisation(ir_op *op, peephole_opt_func func) {
1205 assert(op->ops.generic == NULL);
1206 op->ops.generic = (op_func)func;
1209 /* Perform peephole-optimizations. */
1210 void ia32_peephole_optimization(ia32_code_gen_t *new_cg)
1214 /* register peephole optimisations */
1215 clear_irp_opcodes_generic_func();
1216 register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
1217 register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
1218 register_peephole_optimisation(op_ia32_Lea, peephole_ia32_Lea);
1219 register_peephole_optimisation(op_ia32_Cmp, peephole_ia32_Cmp);
1220 register_peephole_optimisation(op_ia32_Cmp8Bit, peephole_ia32_Cmp);
1221 register_peephole_optimisation(op_ia32_Test, peephole_ia32_Test);
1222 register_peephole_optimisation(op_ia32_Test8Bit, peephole_ia32_Test);
1223 register_peephole_optimisation(op_be_Return, peephole_ia32_Return);
1224 if (! ia32_cg_config.use_imul_mem_imm32)
1225 register_peephole_optimisation(op_ia32_IMul, peephole_ia32_Imul_split);
1226 if (ia32_cg_config.use_pxor)
1227 register_peephole_optimisation(op_ia32_xZero, peephole_ia32_xZero);
1229 be_peephole_opt(cg->birg);
1233 * Removes node from schedule if it is not used anymore. If irn is a mode_T node
1234 * all it's Projs are removed as well.
1235 * @param irn The irn to be removed from schedule
1237 static INLINE void try_kill(ir_node *node)
1239 if(get_irn_mode(node) == mode_T) {
1240 const ir_edge_t *edge, *next;
1241 foreach_out_edge_safe(node, edge, next) {
1242 ir_node *proj = get_edge_src_irn(edge);
1247 if(get_irn_n_edges(node) != 0)
1250 if (sched_is_scheduled(node)) {
1257 static void optimize_conv_store(ir_node *node)
1262 ir_mode *store_mode;
1264 if(!is_ia32_Store(node) && !is_ia32_Store8Bit(node))
1267 assert(n_ia32_Store_val == n_ia32_Store8Bit_val);
1268 pred_proj = get_irn_n(node, n_ia32_Store_val);
1269 if(is_Proj(pred_proj)) {
1270 pred = get_Proj_pred(pred_proj);
1274 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1276 if(get_ia32_op_type(pred) != ia32_Normal)
1279 /* the store only stores the lower bits, so we only need the conv
1280 * it it shrinks the mode */
1281 conv_mode = get_ia32_ls_mode(pred);
1282 store_mode = get_ia32_ls_mode(node);
1283 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode))
1286 set_irn_n(node, n_ia32_Store_val, get_irn_n(pred, n_ia32_Conv_I2I_val));
1287 if(get_irn_n_edges(pred_proj) == 0) {
1288 kill_node(pred_proj);
1289 if(pred != pred_proj)
1294 static void optimize_load_conv(ir_node *node)
1296 ir_node *pred, *predpred;
1300 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1303 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
1304 pred = get_irn_n(node, n_ia32_Conv_I2I_val);
1308 predpred = get_Proj_pred(pred);
1309 if(!is_ia32_Load(predpred))
1312 /* the load is sign extending the upper bits, so we only need the conv
1313 * if it shrinks the mode */
1314 load_mode = get_ia32_ls_mode(predpred);
1315 conv_mode = get_ia32_ls_mode(node);
1316 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(load_mode))
1319 if(get_mode_sign(conv_mode) != get_mode_sign(load_mode)) {
1320 /* change the load if it has only 1 user */
1321 if(get_irn_n_edges(pred) == 1) {
1323 if(get_mode_sign(conv_mode)) {
1324 newmode = find_signed_mode(load_mode);
1326 newmode = find_unsigned_mode(load_mode);
1328 assert(newmode != NULL);
1329 set_ia32_ls_mode(predpred, newmode);
1331 /* otherwise we have to keep the conv */
1337 exchange(node, pred);
1340 static void optimize_conv_conv(ir_node *node)
1342 ir_node *pred_proj, *pred, *result_conv;
1343 ir_mode *pred_mode, *conv_mode;
1347 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1350 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
1351 pred_proj = get_irn_n(node, n_ia32_Conv_I2I_val);
1352 if(is_Proj(pred_proj))
1353 pred = get_Proj_pred(pred_proj);
1357 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1360 /* we know that after a conv, the upper bits are sign extended
1361 * so we only need the 2nd conv if it shrinks the mode */
1362 conv_mode = get_ia32_ls_mode(node);
1363 conv_mode_bits = get_mode_size_bits(conv_mode);
1364 pred_mode = get_ia32_ls_mode(pred);
1365 pred_mode_bits = get_mode_size_bits(pred_mode);
1367 if(conv_mode_bits == pred_mode_bits
1368 && get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
1369 result_conv = pred_proj;
1370 } else if(conv_mode_bits <= pred_mode_bits) {
1371 /* if 2nd conv is smaller then first conv, then we can always take the
1373 if(get_irn_n_edges(pred_proj) == 1) {
1374 result_conv = pred_proj;
1375 set_ia32_ls_mode(pred, conv_mode);
1377 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
1378 if (get_mode_size_bits(conv_mode) == 8) {
1379 set_irn_op(pred, op_ia32_Conv_I2I8Bit);
1380 set_ia32_in_req_all(pred, get_ia32_in_req_all(node));
1383 /* we don't want to end up with 2 loads, so we better do nothing */
1384 if(get_irn_mode(pred) == mode_T) {
1388 result_conv = exact_copy(pred);
1389 set_ia32_ls_mode(result_conv, conv_mode);
1391 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
1392 if (get_mode_size_bits(conv_mode) == 8) {
1393 set_irn_op(result_conv, op_ia32_Conv_I2I8Bit);
1394 set_ia32_in_req_all(result_conv, get_ia32_in_req_all(node));
1398 /* if both convs have the same sign, then we can take the smaller one */
1399 if(get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
1400 result_conv = pred_proj;
1402 /* no optimisation possible if smaller conv is sign-extend */
1403 if(mode_is_signed(pred_mode)) {
1406 /* we can take the smaller conv if it is unsigned */
1407 result_conv = pred_proj;
1412 exchange(node, result_conv);
1414 if(get_irn_n_edges(pred_proj) == 0) {
1415 kill_node(pred_proj);
1416 if(pred != pred_proj)
1419 optimize_conv_conv(result_conv);
1422 static void optimize_node(ir_node *node, void *env)
1426 optimize_load_conv(node);
1427 optimize_conv_store(node);
1428 optimize_conv_conv(node);
1432 * Performs conv and address mode optimization.
1434 void ia32_optimize_graph(ia32_code_gen_t *cg)
1436 irg_walk_blkwise_graph(cg->irg, NULL, optimize_node, cg);
1439 be_dump(cg->irg, "-opt", dump_ir_block_graph_sched);
1442 void ia32_init_optimize(void)
1444 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize");