2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Implements several optimizations for IA32.
23 * @author Matthias Braun, Christian Wuerdig
34 #include "firm_types.h"
46 #include "../benode_t.h"
47 #include "../besched_t.h"
48 #include "../bepeephole.h"
50 #include "ia32_new_nodes.h"
51 #include "ia32_optimize.h"
52 #include "bearch_ia32_t.h"
53 #include "gen_ia32_regalloc_if.h"
54 #include "ia32_transform.h"
55 #include "ia32_dbg_stat.h"
56 #include "ia32_util.h"
57 #include "ia32_architecture.h"
59 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
61 static const arch_env_t *arch_env;
62 static ia32_code_gen_t *cg;
65 static void peephole_ia32_Store_IncSP_to_push(ir_node *node)
67 ir_node *base = get_irn_n(node, n_ia32_Store_base);
68 ir_node *index = get_irn_n(node, n_ia32_Store_index);
69 ir_node *mem = get_irn_n(node, n_ia32_Store_mem);
70 ir_node *incsp = base;
82 /* nomem inidicates the store doesn't alias with anything else */
86 /* find an IncSP in front of us, we might have to skip barriers for this */
87 while(is_Proj(incsp)) {
88 ir_node *proj_pred = get_Proj_pred(incsp);
89 if(!be_is_Barrier(proj_pred))
91 incsp = get_irn_n(proj_pred, get_Proj_proj(incsp));
93 if(!be_is_IncSP(incsp))
96 be_peephole_IncSP_IncSP(incsp);
98 /* must be in the same block */
99 if(get_nodes_block(incsp) != get_nodes_block(node))
102 if(!is_ia32_NoReg_GP(index) || get_ia32_am_sc(node) != NULL) {
103 panic("Invalid storeAM found (%+F)", node);
106 /* we should be the store to the end of the stackspace */
107 offset = be_get_IncSP_offset(incsp);
108 mode = get_ia32_ls_mode(node);
109 node_offset = get_ia32_am_offs_int(node);
110 if(node_offset != offset - get_mode_size_bytes(mode))
113 /* we can use a push instead of the store */
114 irg = current_ir_graph;
115 block = get_nodes_block(node);
116 dbgi = get_irn_dbg_info(node);
117 noreg = ia32_new_NoReg_gp(cg);
118 base = be_get_IncSP_pred(incsp);
119 val = get_irn_n(node, n_ia32_Store_val);
120 push = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, mem, val, base);
122 proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
124 be_set_IncSP_offset(incsp, offset - get_mode_size_bytes(mode));
126 sched_add_before(node, push);
129 be_peephole_before_exchange(node, proj);
130 exchange(node, proj);
131 be_peephole_after_exchange(proj);
134 static void peephole_ia32_Store(ir_node *node)
136 peephole_ia32_Store_IncSP_to_push(node);
140 static int produces_zero_flag(ir_node *node, int pn)
143 const ia32_immediate_attr_t *imm_attr;
145 if(!is_ia32_irn(node))
149 if(pn != pn_ia32_res)
153 switch(get_ia32_irn_opcode(node)) {
171 assert(n_ia32_ShlD_count == n_ia32_ShrD_count);
172 assert(n_ia32_Shl_count == n_ia32_Shr_count
173 && n_ia32_Shl_count == n_ia32_Sar_count);
174 if(is_ia32_ShlD(node) || is_ia32_ShrD(node)) {
175 count = get_irn_n(node, n_ia32_ShlD_count);
177 count = get_irn_n(node, n_ia32_Shl_count);
179 /* when shift count is zero the flags are not affected, so we can only
180 * do this for constants != 0 */
181 if(!is_ia32_Immediate(count))
184 imm_attr = get_ia32_immediate_attr_const(count);
185 if(imm_attr->symconst != NULL)
187 if((imm_attr->offset & 0x1f) == 0)
197 static ir_node *turn_into_mode_t(ir_node *node)
202 const arch_register_t *reg;
204 if(get_irn_mode(node) == mode_T)
207 assert(get_irn_mode(node) == mode_Iu);
209 new_node = exact_copy(node);
210 set_irn_mode(new_node, mode_T);
212 block = get_nodes_block(new_node);
213 res_proj = new_r_Proj(current_ir_graph, block, new_node, mode_Iu,
216 reg = arch_get_irn_register(arch_env, node);
217 arch_set_irn_register(arch_env, res_proj, reg);
219 be_peephole_before_exchange(node, res_proj);
220 sched_add_before(node, new_node);
222 exchange(node, res_proj);
223 be_peephole_after_exchange(res_proj);
228 static void peephole_ia32_Test(ir_node *node)
230 ir_node *left = get_irn_n(node, n_ia32_Test_left);
231 ir_node *right = get_irn_n(node, n_ia32_Test_right);
237 const ir_edge_t *edge;
239 assert(n_ia32_Test_left == n_ia32_Test8Bit_left
240 && n_ia32_Test_right == n_ia32_Test8Bit_right);
242 /* we need a test for 0 */
246 block = get_nodes_block(node);
247 if(get_nodes_block(left) != block)
251 pn = get_Proj_proj(left);
252 left = get_Proj_pred(left);
255 /* happens rarely, but if it does code will panic' */
256 if (is_ia32_Unknown_GP(left))
259 /* walk schedule up and abort when we find left or some other node destroys
261 schedpoint = sched_prev(node);
262 while(schedpoint != left) {
263 schedpoint = sched_prev(schedpoint);
264 if(arch_irn_is(arch_env, schedpoint, modify_flags))
266 if(schedpoint == block)
267 panic("couldn't find left");
270 /* make sure only Lg/Eq tests are used */
271 foreach_out_edge(node, edge) {
272 ir_node *user = get_edge_src_irn(edge);
273 int pnc = get_ia32_condcode(user);
275 if(pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) {
280 if(!produces_zero_flag(left, pn))
283 left = turn_into_mode_t(left);
285 flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode;
286 flags_proj = new_r_Proj(current_ir_graph, block, left, flags_mode,
288 arch_set_irn_register(arch_env, flags_proj, &ia32_flags_regs[REG_EFLAGS]);
290 assert(get_irn_mode(node) != mode_T);
292 be_peephole_before_exchange(node, flags_proj);
293 exchange(node, flags_proj);
295 be_peephole_after_exchange(flags_proj);
299 * AMD Athlon works faster when RET is not destination of
300 * conditional jump or directly preceded by other jump instruction.
301 * Can be avoided by placing a Rep prefix before the return.
303 static void peephole_ia32_Return(ir_node *node) {
304 ir_node *block, *irn;
306 if (!ia32_cg_config.use_pad_return)
309 block = get_nodes_block(node);
311 if (get_Block_n_cfgpreds(block) == 1) {
312 ir_node *pred = get_Block_cfgpred(block, 0);
315 /* The block of the return has only one predecessor,
316 which jumps directly to this block.
317 This jump will be encoded as a fall through, so we
319 However, the predecessor might be empty, so it must be
320 ensured that empty blocks are gone away ... */
325 /* check if this return is the first on the block */
326 sched_foreach_reverse_from(node, irn) {
327 switch (get_irn_opcode(irn)) {
329 /* the return node itself, ignore */
332 /* ignore the barrier, no code generated */
335 /* arg, IncSP 0 nodes might occur, ignore these */
336 if (be_get_IncSP_offset(irn) == 0)
345 /* yep, return is the first real instruction in this block */
348 /* add an rep prefix to the return */
349 ir_node *rep = new_rd_ia32_RepPrefix(get_irn_dbg_info(node), current_ir_graph, block);
351 sched_add_before(node, rep);
354 /* ensure, that the 3 byte return is generated */
355 be_Return_set_emit_pop(node, 1);
359 /* only optimize up to 48 stores behind IncSPs */
360 #define MAXPUSH_OPTIMIZE 48
363 * Tries to create pushs from IncSP,Store combinations.
364 * The Stores are replaced by Push's, the IncSP is modified
365 * (possibly into IncSP 0, but not removed).
367 static void peephole_IncSP_Store_to_push(ir_node *irn)
369 int i, maxslot, inc_ofs;
371 ir_node *stores[MAXPUSH_OPTIMIZE];
372 ir_node *block = get_nodes_block(irn);
373 ir_graph *irg = cg->irg;
375 ir_mode *spmode = get_irn_mode(irn);
377 memset(stores, 0, sizeof(stores));
379 assert(be_is_IncSP(irn));
381 inc_ofs = be_get_IncSP_offset(irn);
386 * We first walk the schedule after the IncSP node as long as we find
387 * suitable stores that could be transformed to a push.
388 * We save them into the stores array which is sorted by the frame offset/4
389 * attached to the node
392 for (node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
397 /* it has to be a Store */
398 if (!is_ia32_Store(node))
401 /* it has to use our sp value */
402 if (get_irn_n(node, n_ia32_base) != irn)
404 /* Store has to be attached to NoMem */
405 mem = get_irn_n(node, n_ia32_mem);
409 /* unfortunately we can't support the full AMs possible for push at the
410 * moment. TODO: fix this */
411 if (get_ia32_am_scale(node) > 0 || !is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
414 offset = get_ia32_am_offs_int(node);
415 /* we should NEVER access uninitialized stack BELOW the current SP */
418 offset = inc_ofs - 4 - offset;
420 /* storing at half-slots is bad */
421 if ((offset & 3) != 0)
424 if (offset < 0 || offset >= MAXPUSH_OPTIMIZE * 4)
426 storeslot = offset >> 2;
428 /* storing into the same slot twice is bad (and shouldn't happen...) */
429 if (stores[storeslot] != NULL)
432 stores[storeslot] = node;
433 if (storeslot > maxslot)
437 curr_sp = be_get_IncSP_pred(irn);
439 /* walk through the stores and create Pushs for them */
440 for (i = 0; i <= maxslot; ++i) {
441 const arch_register_t *spreg;
443 ir_node *val, *mem, *mem_proj;
444 ir_node *store = stores[i];
445 ir_node *noreg = ia32_new_NoReg_gp(cg);
450 val = get_irn_n(store, n_ia32_unary_op);
451 mem = get_irn_n(store, n_ia32_mem);
452 spreg = arch_get_irn_register(cg->arch_env, curr_sp);
454 push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, mem, val, curr_sp);
456 sched_add_before(irn, push);
458 /* create stackpointer Proj */
459 curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack);
460 arch_set_irn_register(cg->arch_env, curr_sp, spreg);
462 /* create memory Proj */
463 mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
465 /* use the memproj now */
466 exchange(store, mem_proj);
468 /* we can remove the store now */
474 be_set_IncSP_offset(irn, inc_ofs);
475 be_set_IncSP_pred(irn, curr_sp);
479 * Find a free GP register if possible, else return NULL.
481 static const arch_register_t *get_free_gp_reg(void)
485 for(i = 0; i < N_ia32_gp_REGS; ++i) {
486 const arch_register_t *reg = &ia32_gp_regs[i];
487 if(arch_register_type_is(reg, ignore))
490 if(be_peephole_get_value(CLASS_ia32_gp, i) == NULL)
491 return &ia32_gp_regs[i];
498 * Creates a Pop instruction before the given schedule point.
500 * @param dbgi debug info
501 * @param irg the graph
502 * @param block the block
503 * @param stack the previous stack value
504 * @param schedpoint the new node is added before this node
505 * @param reg the register to pop
507 * @return the new stack value
509 static ir_node *create_pop(dbg_info *dbgi, ir_graph *irg, ir_node *block,
510 ir_node *stack, ir_node *schedpoint,
511 const arch_register_t *reg)
513 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
519 pop = new_rd_ia32_Pop(dbgi, irg, block, new_NoMem(), stack);
521 stack = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack);
522 arch_set_irn_register(arch_env, stack, esp);
523 val = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_res);
524 arch_set_irn_register(arch_env, val, reg);
526 sched_add_before(schedpoint, pop);
529 keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
530 sched_add_before(schedpoint, keep);
536 * Creates a Push instruction before the given schedule point.
538 * @param dbgi debug info
539 * @param irg the graph
540 * @param block the block
541 * @param stack the previous stack value
542 * @param schedpoint the new node is added before this node
543 * @param reg the register to pop
545 * @return the new stack value
547 static ir_node *create_push(dbg_info *dbgi, ir_graph *irg, ir_node *block,
548 ir_node *stack, ir_node *schedpoint,
549 const arch_register_t *reg)
551 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
552 ir_node *noreg, *nomem, *push, *val;
554 val = new_rd_ia32_ProduceVal(NULL, irg, block);
555 arch_set_irn_register(arch_env, val, reg);
556 sched_add_before(schedpoint, val);
558 noreg = ia32_new_NoReg_gp(cg);
559 nomem = get_irg_no_mem(irg);
560 push = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, nomem, val, stack);
561 sched_add_before(schedpoint, push);
563 stack = new_r_Proj(irg, block, push, mode_Iu, pn_ia32_Push_stack);
564 arch_set_irn_register(arch_env, stack, esp);
570 * Optimize an IncSp by replacing it with push/pop
572 static void peephole_be_IncSP(ir_node *node)
574 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
575 const arch_register_t *reg;
576 ir_graph *irg = current_ir_graph;
582 /* first optimize incsp->incsp combinations */
583 be_peephole_IncSP_IncSP(node);
585 /* transform IncSP->Store combinations to Push where possible */
586 peephole_IncSP_Store_to_push(node);
588 if (arch_get_irn_register(arch_env, node) != esp)
591 /* replace IncSP -4 by Pop freereg when possible */
592 offset = be_get_IncSP_offset(node);
593 if ((offset != -8 || ia32_cg_config.use_add_esp_8) &&
594 (offset != -4 || ia32_cg_config.use_add_esp_4) &&
595 (offset != +4 || ia32_cg_config.use_sub_esp_4) &&
596 (offset != +8 || ia32_cg_config.use_sub_esp_8))
600 /* we need a free register for pop */
601 reg = get_free_gp_reg();
605 dbgi = get_irn_dbg_info(node);
606 block = get_nodes_block(node);
607 stack = be_get_IncSP_pred(node);
609 stack = create_pop(dbgi, irg, block, stack, node, reg);
612 stack = create_pop(dbgi, irg, block, stack, node, reg);
615 dbgi = get_irn_dbg_info(node);
616 block = get_nodes_block(node);
617 stack = be_get_IncSP_pred(node);
618 reg = &ia32_gp_regs[REG_EAX];
620 stack = create_push(dbgi, irg, block, stack, node, reg);
623 stack = create_push(dbgi, irg, block, stack, node, reg);
627 be_peephole_before_exchange(node, stack);
629 exchange(node, stack);
630 be_peephole_after_exchange(stack);
634 * Peephole optimisation for ia32_Const's
636 static void peephole_ia32_Const(ir_node *node)
638 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
639 const arch_register_t *reg;
640 ir_graph *irg = current_ir_graph;
647 /* try to transform a mov 0, reg to xor reg reg */
648 if (attr->offset != 0 || attr->symconst != NULL)
650 if (ia32_cg_config.use_mov_0)
652 /* xor destroys the flags, so no-one must be using them */
653 if (be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
656 reg = arch_get_irn_register(arch_env, node);
657 assert(be_peephole_get_reg_value(reg) == NULL);
659 /* create xor(produceval, produceval) */
660 block = get_nodes_block(node);
661 dbgi = get_irn_dbg_info(node);
662 produceval = new_rd_ia32_ProduceVal(dbgi, irg, block);
663 arch_set_irn_register(arch_env, produceval, reg);
665 noreg = ia32_new_NoReg_gp(cg);
666 xor = new_rd_ia32_Xor(dbgi, irg, block, noreg, noreg, new_NoMem(),
667 produceval, produceval);
668 arch_set_irn_register(arch_env, xor, reg);
670 sched_add_before(node, produceval);
671 sched_add_before(node, xor);
673 be_peephole_before_exchange(node, xor);
676 be_peephole_after_exchange(xor);
679 static INLINE int is_noreg(ia32_code_gen_t *cg, const ir_node *node)
681 return node == cg->noreg_gp;
684 static ir_node *create_immediate_from_int(ia32_code_gen_t *cg, int val)
686 ir_graph *irg = current_ir_graph;
687 ir_node *start_block = get_irg_start_block(irg);
688 ir_node *immediate = new_rd_ia32_Immediate(NULL, irg, start_block, NULL,
690 arch_set_irn_register(cg->arch_env, immediate, &ia32_gp_regs[REG_GP_NOREG]);
695 static ir_node *create_immediate_from_am(ia32_code_gen_t *cg,
698 ir_graph *irg = get_irn_irg(node);
699 ir_node *block = get_nodes_block(node);
700 int offset = get_ia32_am_offs_int(node);
701 int sc_sign = is_ia32_am_sc_sign(node);
702 ir_entity *entity = get_ia32_am_sc(node);
705 res = new_rd_ia32_Immediate(NULL, irg, block, entity, sc_sign, offset);
706 arch_set_irn_register(cg->arch_env, res, &ia32_gp_regs[REG_GP_NOREG]);
710 static int is_am_one(const ir_node *node)
712 int offset = get_ia32_am_offs_int(node);
713 ir_entity *entity = get_ia32_am_sc(node);
715 return offset == 1 && entity == NULL;
718 static int is_am_minus_one(const ir_node *node)
720 int offset = get_ia32_am_offs_int(node);
721 ir_entity *entity = get_ia32_am_sc(node);
723 return offset == -1 && entity == NULL;
727 * Transforms a LEA into an Add or SHL if possible.
729 static void peephole_ia32_Lea(ir_node *node)
731 const arch_env_t *arch_env = cg->arch_env;
732 ir_graph *irg = current_ir_graph;
735 const arch_register_t *base_reg;
736 const arch_register_t *index_reg;
737 const arch_register_t *out_reg;
748 assert(is_ia32_Lea(node));
750 /* we can only do this if are allowed to globber the flags */
751 if(be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
754 base = get_irn_n(node, n_ia32_Lea_base);
755 index = get_irn_n(node, n_ia32_Lea_index);
757 if(is_noreg(cg, base)) {
761 base_reg = arch_get_irn_register(arch_env, base);
763 if(is_noreg(cg, index)) {
767 index_reg = arch_get_irn_register(arch_env, index);
770 if(base == NULL && index == NULL) {
771 /* we shouldn't construct these in the first place... */
773 ir_fprintf(stderr, "Optimisation warning: found immediate only lea\n");
778 out_reg = arch_get_irn_register(arch_env, node);
779 scale = get_ia32_am_scale(node);
780 assert(!is_ia32_need_stackent(node) || get_ia32_frame_ent(node) != NULL);
781 /* check if we have immediates values (frame entities should already be
782 * expressed in the offsets) */
783 if(get_ia32_am_offs_int(node) != 0 || get_ia32_am_sc(node) != NULL) {
789 /* we can transform leas where the out register is the same as either the
790 * base or index register back to an Add or Shl */
791 if(out_reg == base_reg) {
794 if(!has_immediates) {
795 ir_fprintf(stderr, "Optimisation warning: found lea which is "
800 goto make_add_immediate;
802 if(scale == 0 && !has_immediates) {
807 /* can't create an add */
809 } else if(out_reg == index_reg) {
811 if(has_immediates && scale == 0) {
813 goto make_add_immediate;
814 } else if(!has_immediates && scale > 0) {
816 op2 = create_immediate_from_int(cg, scale);
818 } else if(!has_immediates) {
820 ir_fprintf(stderr, "Optimisation warning: found lea which is "
824 } else if(scale == 0 && !has_immediates) {
829 /* can't create an add */
832 /* can't create an add */
837 if(ia32_cg_config.use_incdec) {
838 if(is_am_one(node)) {
839 dbgi = get_irn_dbg_info(node);
840 block = get_nodes_block(node);
841 res = new_rd_ia32_Inc(dbgi, irg, block, op1);
842 arch_set_irn_register(arch_env, res, out_reg);
845 if(is_am_minus_one(node)) {
846 dbgi = get_irn_dbg_info(node);
847 block = get_nodes_block(node);
848 res = new_rd_ia32_Dec(dbgi, irg, block, op1);
849 arch_set_irn_register(arch_env, res, out_reg);
853 op2 = create_immediate_from_am(cg, node);
856 dbgi = get_irn_dbg_info(node);
857 block = get_nodes_block(node);
858 noreg = ia32_new_NoReg_gp(cg);
860 res = new_rd_ia32_Add(dbgi, irg, block, noreg, noreg, nomem, op1, op2);
861 arch_set_irn_register(arch_env, res, out_reg);
862 set_ia32_commutative(res);
866 dbgi = get_irn_dbg_info(node);
867 block = get_nodes_block(node);
868 noreg = ia32_new_NoReg_gp(cg);
870 res = new_rd_ia32_Shl(dbgi, irg, block, op1, op2);
871 arch_set_irn_register(arch_env, res, out_reg);
875 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, node));
877 /* add new ADD/SHL to schedule */
878 DBG_OPT_LEA2ADD(node, res);
880 /* exchange the Add and the LEA */
881 be_peephole_before_exchange(node, res);
882 sched_add_before(node, res);
885 be_peephole_after_exchange(res);
889 * Split a Imul mem, imm into a Load mem and Imul reg, imm if possible.
891 static void peephole_ia32_Imul_split(ir_node *imul) {
892 const ir_node *right = get_irn_n(imul, n_ia32_IMul_right);
893 const arch_register_t *reg;
894 ir_node *load, *block, *base, *index, *mem, *res, *noreg;
898 if (! is_ia32_Immediate(right) || get_ia32_op_type(imul) != ia32_AddrModeS) {
899 /* no memory, imm form ignore */
902 /* we need a free register */
903 reg = get_free_gp_reg();
907 /* fine, we can rebuild it */
908 dbgi = get_irn_dbg_info(imul);
909 block = get_nodes_block(imul);
910 irg = current_ir_graph;
911 base = get_irn_n(imul, n_ia32_IMul_base);
912 index = get_irn_n(imul, n_ia32_IMul_index);
913 mem = get_irn_n(imul, n_ia32_IMul_mem);
914 load = new_rd_ia32_Load(dbgi, irg, block, base, index, mem);
916 /* copy all attributes */
917 set_irn_pinned(load, get_irn_pinned(imul));
918 set_ia32_op_type(load, ia32_AddrModeS);
919 set_ia32_ls_mode(load, get_ia32_ls_mode(imul));
921 set_ia32_am_scale(load, get_ia32_am_scale(imul));
922 set_ia32_am_sc(load, get_ia32_am_sc(imul));
923 set_ia32_am_offs_int(load, get_ia32_am_offs_int(imul));
924 if (is_ia32_am_sc_sign(imul))
925 set_ia32_am_sc_sign(load);
926 if (is_ia32_use_frame(imul))
927 set_ia32_use_frame(load);
928 set_ia32_frame_ent(load, get_ia32_frame_ent(imul));
930 sched_add_before(imul, load);
932 mem = new_rd_Proj(dbgi, irg, block, load, mode_M, pn_ia32_Load_M);
933 res = new_rd_Proj(dbgi, irg, block, load, mode_Iu, pn_ia32_Load_res);
935 arch_set_irn_register(arch_env, res, reg);
936 be_peephole_after_exchange(res);
938 set_irn_n(imul, n_ia32_IMul_mem, mem);
939 noreg = get_irn_n(imul, n_ia32_IMul_left);
940 set_irn_n(imul, n_ia32_IMul_left, res);
941 set_ia32_op_type(imul, ia32_Normal);
945 * Replace xorps r,r and xorpd r,r by pxor r,r
947 static void peephole_ia32_xZero(ir_node *xor) {
948 set_irn_op(xor, op_ia32_xPzero);
952 * Register a peephole optimisation function.
954 static void register_peephole_optimisation(ir_op *op, peephole_opt_func func) {
955 assert(op->ops.generic == NULL);
956 op->ops.generic = (op_func)func;
959 /* Perform peephole-optimizations. */
960 void ia32_peephole_optimization(ia32_code_gen_t *new_cg)
963 arch_env = cg->arch_env;
965 /* register peephole optimisations */
966 clear_irp_opcodes_generic_func();
967 register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
968 //register_peephole_optimisation(op_ia32_Store, peephole_ia32_Store);
969 register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
970 register_peephole_optimisation(op_ia32_Lea, peephole_ia32_Lea);
971 register_peephole_optimisation(op_ia32_Test, peephole_ia32_Test);
972 register_peephole_optimisation(op_ia32_Test8Bit, peephole_ia32_Test);
973 register_peephole_optimisation(op_be_Return, peephole_ia32_Return);
974 if (! ia32_cg_config.use_imul_mem_imm32)
975 register_peephole_optimisation(op_ia32_IMul, peephole_ia32_Imul_split);
976 if (ia32_cg_config.use_pxor)
977 register_peephole_optimisation(op_ia32_xZero, peephole_ia32_xZero);
979 be_peephole_opt(cg->birg);
983 * Removes node from schedule if it is not used anymore. If irn is a mode_T node
984 * all it's Projs are removed as well.
985 * @param irn The irn to be removed from schedule
987 static INLINE void try_kill(ir_node *node)
989 if(get_irn_mode(node) == mode_T) {
990 const ir_edge_t *edge, *next;
991 foreach_out_edge_safe(node, edge, next) {
992 ir_node *proj = get_edge_src_irn(edge);
997 if(get_irn_n_edges(node) != 0)
1000 if (sched_is_scheduled(node)) {
1007 static void optimize_conv_store(ir_node *node)
1012 ir_mode *store_mode;
1014 if(!is_ia32_Store(node) && !is_ia32_Store8Bit(node))
1017 assert(n_ia32_Store_val == n_ia32_Store8Bit_val);
1018 pred_proj = get_irn_n(node, n_ia32_Store_val);
1019 if(is_Proj(pred_proj)) {
1020 pred = get_Proj_pred(pred_proj);
1024 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1026 if(get_ia32_op_type(pred) != ia32_Normal)
1029 /* the store only stores the lower bits, so we only need the conv
1030 * it it shrinks the mode */
1031 conv_mode = get_ia32_ls_mode(pred);
1032 store_mode = get_ia32_ls_mode(node);
1033 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode))
1036 set_irn_n(node, n_ia32_Store_val, get_irn_n(pred, n_ia32_Conv_I2I_val));
1037 if(get_irn_n_edges(pred_proj) == 0) {
1038 be_kill_node(pred_proj);
1039 if(pred != pred_proj)
1044 static void optimize_load_conv(ir_node *node)
1046 ir_node *pred, *predpred;
1050 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1053 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
1054 pred = get_irn_n(node, n_ia32_Conv_I2I_val);
1058 predpred = get_Proj_pred(pred);
1059 if(!is_ia32_Load(predpred))
1062 /* the load is sign extending the upper bits, so we only need the conv
1063 * if it shrinks the mode */
1064 load_mode = get_ia32_ls_mode(predpred);
1065 conv_mode = get_ia32_ls_mode(node);
1066 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(load_mode))
1069 if(get_mode_sign(conv_mode) != get_mode_sign(load_mode)) {
1070 /* change the load if it has only 1 user */
1071 if(get_irn_n_edges(pred) == 1) {
1073 if(get_mode_sign(conv_mode)) {
1074 newmode = find_signed_mode(load_mode);
1076 newmode = find_unsigned_mode(load_mode);
1078 assert(newmode != NULL);
1079 set_ia32_ls_mode(predpred, newmode);
1081 /* otherwise we have to keep the conv */
1087 exchange(node, pred);
1090 static void optimize_conv_conv(ir_node *node)
1092 ir_node *pred_proj, *pred, *result_conv;
1093 ir_mode *pred_mode, *conv_mode;
1097 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1100 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
1101 pred_proj = get_irn_n(node, n_ia32_Conv_I2I_val);
1102 if(is_Proj(pred_proj))
1103 pred = get_Proj_pred(pred_proj);
1107 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1110 /* we know that after a conv, the upper bits are sign extended
1111 * so we only need the 2nd conv if it shrinks the mode */
1112 conv_mode = get_ia32_ls_mode(node);
1113 conv_mode_bits = get_mode_size_bits(conv_mode);
1114 pred_mode = get_ia32_ls_mode(pred);
1115 pred_mode_bits = get_mode_size_bits(pred_mode);
1117 if(conv_mode_bits == pred_mode_bits
1118 && get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
1119 result_conv = pred_proj;
1120 } else if(conv_mode_bits <= pred_mode_bits) {
1121 /* if 2nd conv is smaller then first conv, then we can always take the
1123 if(get_irn_n_edges(pred_proj) == 1) {
1124 result_conv = pred_proj;
1125 set_ia32_ls_mode(pred, conv_mode);
1127 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
1128 if (get_mode_size_bits(conv_mode) == 8) {
1129 set_irn_op(pred, op_ia32_Conv_I2I8Bit);
1130 set_ia32_in_req_all(pred, get_ia32_in_req_all(node));
1133 /* we don't want to end up with 2 loads, so we better do nothing */
1134 if(get_irn_mode(pred) == mode_T) {
1138 result_conv = exact_copy(pred);
1139 set_ia32_ls_mode(result_conv, conv_mode);
1141 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
1142 if (get_mode_size_bits(conv_mode) == 8) {
1143 set_irn_op(result_conv, op_ia32_Conv_I2I8Bit);
1144 set_ia32_in_req_all(result_conv, get_ia32_in_req_all(node));
1148 /* if both convs have the same sign, then we can take the smaller one */
1149 if(get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
1150 result_conv = pred_proj;
1152 /* no optimisation possible if smaller conv is sign-extend */
1153 if(mode_is_signed(pred_mode)) {
1156 /* we can take the smaller conv if it is unsigned */
1157 result_conv = pred_proj;
1162 exchange(node, result_conv);
1164 if(get_irn_n_edges(pred_proj) == 0) {
1165 be_kill_node(pred_proj);
1166 if(pred != pred_proj)
1169 optimize_conv_conv(result_conv);
1172 static void optimize_node(ir_node *node, void *env)
1176 optimize_load_conv(node);
1177 optimize_conv_store(node);
1178 optimize_conv_conv(node);
1182 * Performs conv and address mode optimization.
1184 void ia32_optimize_graph(ia32_code_gen_t *cg)
1186 irg_walk_blkwise_graph(cg->irg, NULL, optimize_node, cg);
1189 be_dump(cg->irg, "-opt", dump_ir_block_graph_sched);
1192 void ia32_init_optimize(void)
1194 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize");