2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Implements several optimizations for IA32.
23 * @author Matthias Braun, Christian Wuerdig
34 #include "firm_types.h"
46 #include "../benode_t.h"
47 #include "../besched_t.h"
48 #include "../bepeephole.h"
50 #include "ia32_new_nodes.h"
51 #include "ia32_optimize.h"
52 #include "bearch_ia32_t.h"
53 #include "gen_ia32_regalloc_if.h"
54 #include "ia32_common_transform.h"
55 #include "ia32_transform.h"
56 #include "ia32_dbg_stat.h"
57 #include "ia32_util.h"
58 #include "ia32_architecture.h"
60 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
62 static const arch_env_t *arch_env;
63 static ia32_code_gen_t *cg;
66 * Returns non-zero if the given node produces
69 * @param node the node to check
70 * @param pn if >= 0, the projection number of the used result
72 static int produces_zero_flag(ir_node *node, int pn)
75 const ia32_immediate_attr_t *imm_attr;
77 if (!is_ia32_irn(node))
81 if (pn != pn_ia32_res)
85 switch (get_ia32_irn_opcode(node)) {
103 assert(n_ia32_ShlD_count == n_ia32_ShrD_count);
104 assert(n_ia32_Shl_count == n_ia32_Shr_count
105 && n_ia32_Shl_count == n_ia32_Sar_count);
106 if (is_ia32_ShlD(node) || is_ia32_ShrD(node)) {
107 count = get_irn_n(node, n_ia32_ShlD_count);
109 count = get_irn_n(node, n_ia32_Shl_count);
111 /* when shift count is zero the flags are not affected, so we can only
112 * do this for constants != 0 */
113 if (!is_ia32_Immediate(count))
116 imm_attr = get_ia32_immediate_attr_const(count);
117 if (imm_attr->symconst != NULL)
119 if ((imm_attr->offset & 0x1f) == 0)
130 * If the given node has not mode_T, creates a mode_T version (with a result Proj).
132 * @param node the node to change
134 * @return the new mode_T node (if the mode was changed) or node itself
136 static ir_node *turn_into_mode_t(ir_node *node)
141 const arch_register_t *reg;
143 if(get_irn_mode(node) == mode_T)
146 assert(get_irn_mode(node) == mode_Iu);
148 new_node = exact_copy(node);
149 set_irn_mode(new_node, mode_T);
151 block = get_nodes_block(new_node);
152 res_proj = new_r_Proj(current_ir_graph, block, new_node, mode_Iu,
155 reg = arch_get_irn_register(arch_env, node);
156 arch_set_irn_register(arch_env, res_proj, reg);
158 sched_add_before(node, new_node);
159 be_peephole_exchange(node, res_proj);
164 * Replace Cmp(x, 0) by a Test(x, x)
166 static void peephole_ia32_Cmp(ir_node *const node)
169 ia32_immediate_attr_t const *imm;
176 ia32_attr_t const *attr;
180 arch_register_t const *reg;
181 ir_edge_t const *edge;
182 ir_edge_t const *tmp;
184 if (get_ia32_op_type(node) != ia32_Normal)
187 right = get_irn_n(node, n_ia32_Cmp_right);
188 if (!is_ia32_Immediate(right))
191 imm = get_ia32_immediate_attr_const(right);
192 if (imm->symconst != NULL || imm->offset != 0)
195 dbgi = get_irn_dbg_info(node);
196 irg = current_ir_graph;
197 block = get_nodes_block(node);
198 noreg = ia32_new_NoReg_gp(cg);
199 nomem = get_irg_no_mem(irg);
200 op = get_irn_n(node, n_ia32_Cmp_left);
201 attr = get_irn_generic_attr(node);
202 ins_permuted = attr->data.ins_permuted;
203 cmp_unsigned = attr->data.cmp_unsigned;
205 if (is_ia32_Cmp(node)) {
206 test = new_rd_ia32_Test(dbgi, irg, block, noreg, noreg, nomem,
207 op, op, ins_permuted, cmp_unsigned);
209 test = new_rd_ia32_Test8Bit(dbgi, irg, block, noreg, noreg, nomem,
210 op, op, ins_permuted, cmp_unsigned);
212 set_ia32_ls_mode(test, get_ia32_ls_mode(node));
214 reg = arch_get_irn_register(arch_env, node);
215 arch_set_irn_register(arch_env, test, reg);
217 foreach_out_edge_safe(node, edge, tmp) {
218 ir_node *const user = get_edge_src_irn(edge);
221 exchange(user, test);
224 sched_add_before(node, test);
225 be_peephole_exchange(node, test);
229 * Peephole optimization for Test instructions.
230 * We can remove the Test, if a zero flags was produced which is still
233 static void peephole_ia32_Test(ir_node *node)
235 ir_node *left = get_irn_n(node, n_ia32_Test_left);
236 ir_node *right = get_irn_n(node, n_ia32_Test_right);
242 const ir_edge_t *edge;
244 assert(n_ia32_Test_left == n_ia32_Test8Bit_left
245 && n_ia32_Test_right == n_ia32_Test8Bit_right);
247 /* we need a test for 0 */
251 block = get_nodes_block(node);
252 if(get_nodes_block(left) != block)
256 pn = get_Proj_proj(left);
257 left = get_Proj_pred(left);
260 /* happens rarely, but if it does code will panic' */
261 if (is_ia32_Unknown_GP(left))
264 /* walk schedule up and abort when we find left or some other node destroys
266 schedpoint = sched_prev(node);
267 while(schedpoint != left) {
268 schedpoint = sched_prev(schedpoint);
269 if(arch_irn_is(arch_env, schedpoint, modify_flags))
271 if(schedpoint == block)
272 panic("couldn't find left");
275 /* make sure only Lg/Eq tests are used */
276 foreach_out_edge(node, edge) {
277 ir_node *user = get_edge_src_irn(edge);
278 int pnc = get_ia32_condcode(user);
280 if(pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) {
285 if(!produces_zero_flag(left, pn))
288 left = turn_into_mode_t(left);
290 flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode;
291 flags_proj = new_r_Proj(current_ir_graph, block, left, flags_mode,
293 arch_set_irn_register(arch_env, flags_proj, &ia32_flags_regs[REG_EFLAGS]);
295 assert(get_irn_mode(node) != mode_T);
297 be_peephole_exchange(node, flags_proj);
301 * AMD Athlon works faster when RET is not destination of
302 * conditional jump or directly preceded by other jump instruction.
303 * Can be avoided by placing a Rep prefix before the return.
305 static void peephole_ia32_Return(ir_node *node) {
306 ir_node *block, *irn;
308 if (!ia32_cg_config.use_pad_return)
311 block = get_nodes_block(node);
313 /* check if this return is the first on the block */
314 sched_foreach_reverse_from(node, irn) {
315 switch (get_irn_opcode(irn)) {
317 /* the return node itself, ignore */
320 /* ignore the barrier, no code generated */
323 /* arg, IncSP 0 nodes might occur, ignore these */
324 if (be_get_IncSP_offset(irn) == 0)
334 /* ensure, that the 3 byte return is generated
335 * actually the emitter tests again if the block beginning has a label and
336 * isn't just a fallthrough */
337 be_Return_set_emit_pop(node, 1);
340 /* only optimize up to 48 stores behind IncSPs */
341 #define MAXPUSH_OPTIMIZE 48
344 * Tries to create Push's from IncSP, Store combinations.
345 * The Stores are replaced by Push's, the IncSP is modified
346 * (possibly into IncSP 0, but not removed).
348 static void peephole_IncSP_Store_to_push(ir_node *irn)
354 ir_node *stores[MAXPUSH_OPTIMIZE];
359 ir_node *first_push = NULL;
360 ir_edge_t const *edge;
361 ir_edge_t const *next;
363 memset(stores, 0, sizeof(stores));
365 assert(be_is_IncSP(irn));
367 inc_ofs = be_get_IncSP_offset(irn);
372 * We first walk the schedule after the IncSP node as long as we find
373 * suitable Stores that could be transformed to a Push.
374 * We save them into the stores array which is sorted by the frame offset/4
375 * attached to the node
378 for (node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
383 /* it has to be a Store */
384 if (!is_ia32_Store(node))
387 /* it has to use our sp value */
388 if (get_irn_n(node, n_ia32_base) != irn)
390 /* Store has to be attached to NoMem */
391 mem = get_irn_n(node, n_ia32_mem);
395 /* unfortunately we can't support the full AMs possible for push at the
396 * moment. TODO: fix this */
397 if (!is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
400 offset = get_ia32_am_offs_int(node);
401 /* we should NEVER access uninitialized stack BELOW the current SP */
404 /* storing at half-slots is bad */
405 if ((offset & 3) != 0)
408 if (inc_ofs - 4 < offset || offset >= MAXPUSH_OPTIMIZE * 4)
410 storeslot = offset >> 2;
412 /* storing into the same slot twice is bad (and shouldn't happen...) */
413 if (stores[storeslot] != NULL)
416 stores[storeslot] = node;
417 if (storeslot > maxslot)
423 for (i = -1; i < maxslot; ++i) {
424 if (stores[i + 1] == NULL)
428 /* walk through the Stores and create Pushs for them */
429 block = get_nodes_block(irn);
430 spmode = get_irn_mode(irn);
432 for (; i >= 0; --i) {
433 const arch_register_t *spreg;
435 ir_node *val, *mem, *mem_proj;
436 ir_node *store = stores[i];
437 ir_node *noreg = ia32_new_NoReg_gp(cg);
439 val = get_irn_n(store, n_ia32_unary_op);
440 mem = get_irn_n(store, n_ia32_mem);
441 spreg = arch_get_irn_register(cg->arch_env, curr_sp);
443 push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, mem, val, curr_sp);
445 if (first_push == NULL)
448 sched_add_after(curr_sp, push);
450 /* create stackpointer Proj */
451 curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack);
452 arch_set_irn_register(cg->arch_env, curr_sp, spreg);
454 /* create memory Proj */
455 mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
457 /* use the memproj now */
458 be_peephole_exchange(store, mem_proj);
463 foreach_out_edge_safe(irn, edge, next) {
464 ir_node *const src = get_edge_src_irn(edge);
465 int const pos = get_edge_src_pos(edge);
467 if (src == first_push)
470 set_irn_n(src, pos, curr_sp);
473 be_set_IncSP_offset(irn, inc_ofs);
477 * Return true if a mode can be stored in the GP register set
479 static INLINE int mode_needs_gp_reg(ir_mode *mode) {
480 if (mode == mode_fpcw)
482 if (get_mode_size_bits(mode) > 32)
484 return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b;
488 * Tries to create Pops from Load, IncSP combinations.
489 * The Loads are replaced by Pops, the IncSP is modified
490 * (possibly into IncSP 0, but not removed).
492 static void peephole_Load_IncSP_to_pop(ir_node *irn)
494 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
495 int i, maxslot, inc_ofs, ofs;
496 ir_node *node, *pred_sp, *block;
497 ir_node *loads[MAXPUSH_OPTIMIZE];
499 unsigned regmask = 0;
500 unsigned copymask = ~0;
502 memset(loads, 0, sizeof(loads));
503 assert(be_is_IncSP(irn));
505 inc_ofs = -be_get_IncSP_offset(irn);
510 * We first walk the schedule before the IncSP node as long as we find
511 * suitable Loads that could be transformed to a Pop.
512 * We save them into the stores array which is sorted by the frame offset/4
513 * attached to the node
516 pred_sp = be_get_IncSP_pred(irn);
517 for (node = sched_prev(irn); !sched_is_end(node); node = sched_prev(node)) {
520 const arch_register_t *sreg, *dreg;
522 /* it has to be a Load */
523 if (!is_ia32_Load(node)) {
524 if (be_is_Copy(node)) {
525 if (!mode_needs_gp_reg(get_irn_mode(node))) {
526 /* not a GP copy, ignore */
529 dreg = arch_get_irn_register(arch_env, node);
530 sreg = arch_get_irn_register(arch_env, be_get_Copy_op(node));
531 if (regmask & copymask & (1 << sreg->index)) {
534 if (regmask & copymask & (1 << dreg->index)) {
537 /* we CAN skip Copies if neither the destination nor the source
538 * is not in our regmask, ie none of our future Pop will overwrite it */
539 regmask |= (1 << dreg->index) | (1 << sreg->index);
540 copymask &= ~((1 << dreg->index) | (1 << sreg->index));
546 /* we can handle only GP loads */
547 if (!mode_needs_gp_reg(get_ia32_ls_mode(node)))
550 /* it has to use our predecessor sp value */
551 if (get_irn_n(node, n_ia32_base) != pred_sp) {
552 /* it would be ok if this load does not use a Pop result,
553 * but we do not check this */
557 /* should have NO index */
558 if (!is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
561 offset = get_ia32_am_offs_int(node);
562 /* we should NEVER access uninitialized stack BELOW the current SP */
565 /* storing at half-slots is bad */
566 if ((offset & 3) != 0)
569 if (offset < 0 || offset >= MAXPUSH_OPTIMIZE * 4)
571 /* ignore those outside the possible windows */
572 if (offset > inc_ofs - 4)
574 loadslot = offset >> 2;
576 /* loading from the same slot twice is bad (and shouldn't happen...) */
577 if (loads[loadslot] != NULL)
580 dreg = arch_get_irn_register(arch_env, node);
581 if (regmask & (1 << dreg->index)) {
582 /* this register is already used */
585 regmask |= 1 << dreg->index;
587 loads[loadslot] = node;
588 if (loadslot > maxslot)
595 /* find the first slot */
596 for (i = maxslot; i >= 0; --i) {
597 ir_node *load = loads[i];
603 ofs = inc_ofs - (maxslot + 1) * 4;
606 /* create a new IncSP if needed */
607 block = get_nodes_block(irn);
610 pred_sp = be_new_IncSP(esp, irg, block, pred_sp, -inc_ofs, be_get_IncSP_align(irn));
611 sched_add_before(irn, pred_sp);
614 /* walk through the Loads and create Pops for them */
615 for (++i; i <= maxslot; ++i) {
616 ir_node *load = loads[i];
618 const ir_edge_t *edge, *tmp;
619 const arch_register_t *reg;
621 mem = get_irn_n(load, n_ia32_mem);
622 reg = arch_get_irn_register(arch_env, load);
624 pop = new_rd_ia32_Pop(get_irn_dbg_info(load), irg, block, mem, pred_sp);
625 arch_set_irn_register(arch_env, pop, reg);
627 /* create stackpointer Proj */
628 pred_sp = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack);
629 arch_set_irn_register(arch_env, pred_sp, esp);
631 sched_add_before(irn, pop);
634 foreach_out_edge_safe(load, edge, tmp) {
635 ir_node *proj = get_edge_src_irn(edge);
637 set_Proj_pred(proj, pop);
640 /* we can remove the Load now */
645 be_set_IncSP_offset(irn, -ofs);
646 be_set_IncSP_pred(irn, pred_sp);
651 * Find a free GP register if possible, else return NULL.
653 static const arch_register_t *get_free_gp_reg(void)
657 for(i = 0; i < N_ia32_gp_REGS; ++i) {
658 const arch_register_t *reg = &ia32_gp_regs[i];
659 if(arch_register_type_is(reg, ignore))
662 if(be_peephole_get_value(CLASS_ia32_gp, i) == NULL)
663 return &ia32_gp_regs[i];
670 * Creates a Pop instruction before the given schedule point.
672 * @param dbgi debug info
673 * @param irg the graph
674 * @param block the block
675 * @param stack the previous stack value
676 * @param schedpoint the new node is added before this node
677 * @param reg the register to pop
679 * @return the new stack value
681 static ir_node *create_pop(dbg_info *dbgi, ir_graph *irg, ir_node *block,
682 ir_node *stack, ir_node *schedpoint,
683 const arch_register_t *reg)
685 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
691 pop = new_rd_ia32_Pop(dbgi, irg, block, new_NoMem(), stack);
693 stack = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack);
694 arch_set_irn_register(arch_env, stack, esp);
695 val = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_res);
696 arch_set_irn_register(arch_env, val, reg);
698 sched_add_before(schedpoint, pop);
701 keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
702 sched_add_before(schedpoint, keep);
708 * Creates a Push instruction before the given schedule point.
710 * @param dbgi debug info
711 * @param irg the graph
712 * @param block the block
713 * @param stack the previous stack value
714 * @param schedpoint the new node is added before this node
715 * @param reg the register to pop
717 * @return the new stack value
719 static ir_node *create_push(dbg_info *dbgi, ir_graph *irg, ir_node *block,
720 ir_node *stack, ir_node *schedpoint)
722 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
724 ir_node *val = ia32_new_Unknown_gp(cg);
725 ir_node *noreg = ia32_new_NoReg_gp(cg);
726 ir_node *nomem = get_irg_no_mem(irg);
727 ir_node *push = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, nomem, val, stack);
728 sched_add_before(schedpoint, push);
730 stack = new_r_Proj(irg, block, push, mode_Iu, pn_ia32_Push_stack);
731 arch_set_irn_register(arch_env, stack, esp);
737 * Optimize an IncSp by replacing it with Push/Pop.
739 static void peephole_be_IncSP(ir_node *node)
741 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
742 const arch_register_t *reg;
743 ir_graph *irg = current_ir_graph;
749 /* first optimize incsp->incsp combinations */
750 node = be_peephole_IncSP_IncSP(node);
752 /* transform IncSP->Store combinations to Push where possible */
753 peephole_IncSP_Store_to_push(node);
755 /* transform Load->IncSP combinations to Pop where possible */
756 peephole_Load_IncSP_to_pop(node);
758 if (arch_get_irn_register(arch_env, node) != esp)
761 /* replace IncSP -4 by Pop freereg when possible */
762 offset = be_get_IncSP_offset(node);
763 if ((offset != -8 || ia32_cg_config.use_add_esp_8) &&
764 (offset != -4 || ia32_cg_config.use_add_esp_4) &&
765 (offset != +4 || ia32_cg_config.use_sub_esp_4) &&
766 (offset != +8 || ia32_cg_config.use_sub_esp_8))
770 /* we need a free register for pop */
771 reg = get_free_gp_reg();
775 dbgi = get_irn_dbg_info(node);
776 block = get_nodes_block(node);
777 stack = be_get_IncSP_pred(node);
779 stack = create_pop(dbgi, irg, block, stack, node, reg);
782 stack = create_pop(dbgi, irg, block, stack, node, reg);
785 dbgi = get_irn_dbg_info(node);
786 block = get_nodes_block(node);
787 stack = be_get_IncSP_pred(node);
788 stack = create_push(dbgi, irg, block, stack, node);
791 stack = create_push(dbgi, irg, block, stack, node);
795 be_peephole_exchange(node, stack);
799 * Peephole optimisation for ia32_Const's
801 static void peephole_ia32_Const(ir_node *node)
803 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
804 const arch_register_t *reg;
805 ir_graph *irg = current_ir_graph;
812 /* try to transform a mov 0, reg to xor reg reg */
813 if (attr->offset != 0 || attr->symconst != NULL)
815 if (ia32_cg_config.use_mov_0)
817 /* xor destroys the flags, so no-one must be using them */
818 if (be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
821 reg = arch_get_irn_register(arch_env, node);
822 assert(be_peephole_get_reg_value(reg) == NULL);
824 /* create xor(produceval, produceval) */
825 block = get_nodes_block(node);
826 dbgi = get_irn_dbg_info(node);
827 produceval = new_rd_ia32_ProduceVal(dbgi, irg, block);
828 arch_set_irn_register(arch_env, produceval, reg);
830 noreg = ia32_new_NoReg_gp(cg);
831 xor = new_rd_ia32_Xor(dbgi, irg, block, noreg, noreg, new_NoMem(),
832 produceval, produceval);
833 arch_set_irn_register(arch_env, xor, reg);
835 sched_add_before(node, produceval);
836 sched_add_before(node, xor);
838 be_peephole_exchange(node, xor);
841 static INLINE int is_noreg(ia32_code_gen_t *cg, const ir_node *node)
843 return node == cg->noreg_gp;
846 static ir_node *create_immediate_from_int(ia32_code_gen_t *cg, int val)
848 ir_graph *irg = current_ir_graph;
849 ir_node *start_block = get_irg_start_block(irg);
850 ir_node *immediate = new_rd_ia32_Immediate(NULL, irg, start_block, NULL,
852 arch_set_irn_register(cg->arch_env, immediate, &ia32_gp_regs[REG_GP_NOREG]);
857 static ir_node *create_immediate_from_am(ia32_code_gen_t *cg,
860 ir_graph *irg = get_irn_irg(node);
861 ir_node *block = get_nodes_block(node);
862 int offset = get_ia32_am_offs_int(node);
863 int sc_sign = is_ia32_am_sc_sign(node);
864 ir_entity *entity = get_ia32_am_sc(node);
867 res = new_rd_ia32_Immediate(NULL, irg, block, entity, sc_sign, offset);
868 arch_set_irn_register(cg->arch_env, res, &ia32_gp_regs[REG_GP_NOREG]);
872 static int is_am_one(const ir_node *node)
874 int offset = get_ia32_am_offs_int(node);
875 ir_entity *entity = get_ia32_am_sc(node);
877 return offset == 1 && entity == NULL;
880 static int is_am_minus_one(const ir_node *node)
882 int offset = get_ia32_am_offs_int(node);
883 ir_entity *entity = get_ia32_am_sc(node);
885 return offset == -1 && entity == NULL;
889 * Transforms a LEA into an Add or SHL if possible.
891 static void peephole_ia32_Lea(ir_node *node)
893 const arch_env_t *arch_env = cg->arch_env;
894 ir_graph *irg = current_ir_graph;
897 const arch_register_t *base_reg;
898 const arch_register_t *index_reg;
899 const arch_register_t *out_reg;
910 assert(is_ia32_Lea(node));
912 /* we can only do this if are allowed to globber the flags */
913 if(be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
916 base = get_irn_n(node, n_ia32_Lea_base);
917 index = get_irn_n(node, n_ia32_Lea_index);
919 if(is_noreg(cg, base)) {
923 base_reg = arch_get_irn_register(arch_env, base);
925 if(is_noreg(cg, index)) {
929 index_reg = arch_get_irn_register(arch_env, index);
932 if(base == NULL && index == NULL) {
933 /* we shouldn't construct these in the first place... */
935 ir_fprintf(stderr, "Optimisation warning: found immediate only lea\n");
940 out_reg = arch_get_irn_register(arch_env, node);
941 scale = get_ia32_am_scale(node);
942 assert(!is_ia32_need_stackent(node) || get_ia32_frame_ent(node) != NULL);
943 /* check if we have immediates values (frame entities should already be
944 * expressed in the offsets) */
945 if(get_ia32_am_offs_int(node) != 0 || get_ia32_am_sc(node) != NULL) {
951 /* we can transform leas where the out register is the same as either the
952 * base or index register back to an Add or Shl */
953 if(out_reg == base_reg) {
956 if(!has_immediates) {
957 ir_fprintf(stderr, "Optimisation warning: found lea which is "
962 goto make_add_immediate;
964 if(scale == 0 && !has_immediates) {
969 /* can't create an add */
971 } else if(out_reg == index_reg) {
973 if(has_immediates && scale == 0) {
975 goto make_add_immediate;
976 } else if(!has_immediates && scale > 0) {
978 op2 = create_immediate_from_int(cg, scale);
980 } else if(!has_immediates) {
982 ir_fprintf(stderr, "Optimisation warning: found lea which is "
986 } else if(scale == 0 && !has_immediates) {
991 /* can't create an add */
994 /* can't create an add */
999 if(ia32_cg_config.use_incdec) {
1000 if(is_am_one(node)) {
1001 dbgi = get_irn_dbg_info(node);
1002 block = get_nodes_block(node);
1003 res = new_rd_ia32_Inc(dbgi, irg, block, op1);
1004 arch_set_irn_register(arch_env, res, out_reg);
1007 if(is_am_minus_one(node)) {
1008 dbgi = get_irn_dbg_info(node);
1009 block = get_nodes_block(node);
1010 res = new_rd_ia32_Dec(dbgi, irg, block, op1);
1011 arch_set_irn_register(arch_env, res, out_reg);
1015 op2 = create_immediate_from_am(cg, node);
1018 dbgi = get_irn_dbg_info(node);
1019 block = get_nodes_block(node);
1020 noreg = ia32_new_NoReg_gp(cg);
1021 nomem = new_NoMem();
1022 res = new_rd_ia32_Add(dbgi, irg, block, noreg, noreg, nomem, op1, op2);
1023 arch_set_irn_register(arch_env, res, out_reg);
1024 set_ia32_commutative(res);
1028 dbgi = get_irn_dbg_info(node);
1029 block = get_nodes_block(node);
1030 noreg = ia32_new_NoReg_gp(cg);
1031 nomem = new_NoMem();
1032 res = new_rd_ia32_Shl(dbgi, irg, block, op1, op2);
1033 arch_set_irn_register(arch_env, res, out_reg);
1037 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, node));
1039 /* add new ADD/SHL to schedule */
1040 DBG_OPT_LEA2ADD(node, res);
1042 /* exchange the Add and the LEA */
1043 sched_add_before(node, res);
1044 be_peephole_exchange(node, res);
1048 * Split a Imul mem, imm into a Load mem and Imul reg, imm if possible.
1050 static void peephole_ia32_Imul_split(ir_node *imul) {
1051 const ir_node *right = get_irn_n(imul, n_ia32_IMul_right);
1052 const arch_register_t *reg;
1053 ir_node *load, *block, *base, *index, *mem, *res, *noreg;
1057 if (! is_ia32_Immediate(right) || get_ia32_op_type(imul) != ia32_AddrModeS) {
1058 /* no memory, imm form ignore */
1061 /* we need a free register */
1062 reg = get_free_gp_reg();
1066 /* fine, we can rebuild it */
1067 dbgi = get_irn_dbg_info(imul);
1068 block = get_nodes_block(imul);
1069 irg = current_ir_graph;
1070 base = get_irn_n(imul, n_ia32_IMul_base);
1071 index = get_irn_n(imul, n_ia32_IMul_index);
1072 mem = get_irn_n(imul, n_ia32_IMul_mem);
1073 load = new_rd_ia32_Load(dbgi, irg, block, base, index, mem);
1075 /* copy all attributes */
1076 set_irn_pinned(load, get_irn_pinned(imul));
1077 set_ia32_op_type(load, ia32_AddrModeS);
1078 set_ia32_ls_mode(load, get_ia32_ls_mode(imul));
1080 set_ia32_am_scale(load, get_ia32_am_scale(imul));
1081 set_ia32_am_sc(load, get_ia32_am_sc(imul));
1082 set_ia32_am_offs_int(load, get_ia32_am_offs_int(imul));
1083 if (is_ia32_am_sc_sign(imul))
1084 set_ia32_am_sc_sign(load);
1085 if (is_ia32_use_frame(imul))
1086 set_ia32_use_frame(load);
1087 set_ia32_frame_ent(load, get_ia32_frame_ent(imul));
1089 sched_add_before(imul, load);
1091 mem = new_rd_Proj(dbgi, irg, block, load, mode_M, pn_ia32_Load_M);
1092 res = new_rd_Proj(dbgi, irg, block, load, mode_Iu, pn_ia32_Load_res);
1094 arch_set_irn_register(arch_env, res, reg);
1095 be_peephole_new_node(res);
1097 set_irn_n(imul, n_ia32_IMul_mem, mem);
1098 noreg = get_irn_n(imul, n_ia32_IMul_left);
1099 set_irn_n(imul, n_ia32_IMul_left, res);
1100 set_ia32_op_type(imul, ia32_Normal);
1104 * Replace xorps r,r and xorpd r,r by pxor r,r
1106 static void peephole_ia32_xZero(ir_node *xor) {
1107 set_irn_op(xor, op_ia32_xPzero);
1111 * Register a peephole optimisation function.
1113 static void register_peephole_optimisation(ir_op *op, peephole_opt_func func) {
1114 assert(op->ops.generic == NULL);
1115 op->ops.generic = (op_func)func;
1118 /* Perform peephole-optimizations. */
1119 void ia32_peephole_optimization(ia32_code_gen_t *new_cg)
1122 arch_env = cg->arch_env;
1124 /* register peephole optimisations */
1125 clear_irp_opcodes_generic_func();
1126 register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
1127 register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
1128 register_peephole_optimisation(op_ia32_Lea, peephole_ia32_Lea);
1129 register_peephole_optimisation(op_ia32_Cmp, peephole_ia32_Cmp);
1130 register_peephole_optimisation(op_ia32_Cmp8Bit, peephole_ia32_Cmp);
1131 register_peephole_optimisation(op_ia32_Test, peephole_ia32_Test);
1132 register_peephole_optimisation(op_ia32_Test8Bit, peephole_ia32_Test);
1133 register_peephole_optimisation(op_be_Return, peephole_ia32_Return);
1134 if (! ia32_cg_config.use_imul_mem_imm32)
1135 register_peephole_optimisation(op_ia32_IMul, peephole_ia32_Imul_split);
1136 if (ia32_cg_config.use_pxor)
1137 register_peephole_optimisation(op_ia32_xZero, peephole_ia32_xZero);
1139 be_peephole_opt(cg->birg);
1143 * Removes node from schedule if it is not used anymore. If irn is a mode_T node
1144 * all it's Projs are removed as well.
1145 * @param irn The irn to be removed from schedule
1147 static INLINE void try_kill(ir_node *node)
1149 if(get_irn_mode(node) == mode_T) {
1150 const ir_edge_t *edge, *next;
1151 foreach_out_edge_safe(node, edge, next) {
1152 ir_node *proj = get_edge_src_irn(edge);
1157 if(get_irn_n_edges(node) != 0)
1160 if (sched_is_scheduled(node)) {
1167 static void optimize_conv_store(ir_node *node)
1172 ir_mode *store_mode;
1174 if(!is_ia32_Store(node) && !is_ia32_Store8Bit(node))
1177 assert(n_ia32_Store_val == n_ia32_Store8Bit_val);
1178 pred_proj = get_irn_n(node, n_ia32_Store_val);
1179 if(is_Proj(pred_proj)) {
1180 pred = get_Proj_pred(pred_proj);
1184 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1186 if(get_ia32_op_type(pred) != ia32_Normal)
1189 /* the store only stores the lower bits, so we only need the conv
1190 * it it shrinks the mode */
1191 conv_mode = get_ia32_ls_mode(pred);
1192 store_mode = get_ia32_ls_mode(node);
1193 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode))
1196 set_irn_n(node, n_ia32_Store_val, get_irn_n(pred, n_ia32_Conv_I2I_val));
1197 if(get_irn_n_edges(pred_proj) == 0) {
1198 kill_node(pred_proj);
1199 if(pred != pred_proj)
1204 static void optimize_load_conv(ir_node *node)
1206 ir_node *pred, *predpred;
1210 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1213 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
1214 pred = get_irn_n(node, n_ia32_Conv_I2I_val);
1218 predpred = get_Proj_pred(pred);
1219 if(!is_ia32_Load(predpred))
1222 /* the load is sign extending the upper bits, so we only need the conv
1223 * if it shrinks the mode */
1224 load_mode = get_ia32_ls_mode(predpred);
1225 conv_mode = get_ia32_ls_mode(node);
1226 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(load_mode))
1229 if(get_mode_sign(conv_mode) != get_mode_sign(load_mode)) {
1230 /* change the load if it has only 1 user */
1231 if(get_irn_n_edges(pred) == 1) {
1233 if(get_mode_sign(conv_mode)) {
1234 newmode = find_signed_mode(load_mode);
1236 newmode = find_unsigned_mode(load_mode);
1238 assert(newmode != NULL);
1239 set_ia32_ls_mode(predpred, newmode);
1241 /* otherwise we have to keep the conv */
1247 exchange(node, pred);
1250 static void optimize_conv_conv(ir_node *node)
1252 ir_node *pred_proj, *pred, *result_conv;
1253 ir_mode *pred_mode, *conv_mode;
1257 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1260 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
1261 pred_proj = get_irn_n(node, n_ia32_Conv_I2I_val);
1262 if(is_Proj(pred_proj))
1263 pred = get_Proj_pred(pred_proj);
1267 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1270 /* we know that after a conv, the upper bits are sign extended
1271 * so we only need the 2nd conv if it shrinks the mode */
1272 conv_mode = get_ia32_ls_mode(node);
1273 conv_mode_bits = get_mode_size_bits(conv_mode);
1274 pred_mode = get_ia32_ls_mode(pred);
1275 pred_mode_bits = get_mode_size_bits(pred_mode);
1277 if(conv_mode_bits == pred_mode_bits
1278 && get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
1279 result_conv = pred_proj;
1280 } else if(conv_mode_bits <= pred_mode_bits) {
1281 /* if 2nd conv is smaller then first conv, then we can always take the
1283 if(get_irn_n_edges(pred_proj) == 1) {
1284 result_conv = pred_proj;
1285 set_ia32_ls_mode(pred, conv_mode);
1287 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
1288 if (get_mode_size_bits(conv_mode) == 8) {
1289 set_irn_op(pred, op_ia32_Conv_I2I8Bit);
1290 set_ia32_in_req_all(pred, get_ia32_in_req_all(node));
1293 /* we don't want to end up with 2 loads, so we better do nothing */
1294 if(get_irn_mode(pred) == mode_T) {
1298 result_conv = exact_copy(pred);
1299 set_ia32_ls_mode(result_conv, conv_mode);
1301 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
1302 if (get_mode_size_bits(conv_mode) == 8) {
1303 set_irn_op(result_conv, op_ia32_Conv_I2I8Bit);
1304 set_ia32_in_req_all(result_conv, get_ia32_in_req_all(node));
1308 /* if both convs have the same sign, then we can take the smaller one */
1309 if(get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
1310 result_conv = pred_proj;
1312 /* no optimisation possible if smaller conv is sign-extend */
1313 if(mode_is_signed(pred_mode)) {
1316 /* we can take the smaller conv if it is unsigned */
1317 result_conv = pred_proj;
1322 exchange(node, result_conv);
1324 if(get_irn_n_edges(pred_proj) == 0) {
1325 kill_node(pred_proj);
1326 if(pred != pred_proj)
1329 optimize_conv_conv(result_conv);
1332 static void optimize_node(ir_node *node, void *env)
1336 optimize_load_conv(node);
1337 optimize_conv_store(node);
1338 optimize_conv_conv(node);
1342 * Performs conv and address mode optimization.
1344 void ia32_optimize_graph(ia32_code_gen_t *cg)
1346 irg_walk_blkwise_graph(cg->irg, NULL, optimize_node, cg);
1349 be_dump(cg->irg, "-opt", dump_ir_block_graph_sched);
1352 void ia32_init_optimize(void)
1354 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize");