2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Implements several optimizations for IA32.
23 * @author Matthias Braun, Christian Wuerdig
34 #include "firm_types.h"
46 #include "../benode_t.h"
47 #include "../besched_t.h"
48 #include "../bepeephole.h"
50 #include "ia32_new_nodes.h"
51 #include "ia32_optimize.h"
52 #include "bearch_ia32_t.h"
53 #include "gen_ia32_regalloc_if.h"
54 #include "ia32_transform.h"
55 #include "ia32_dbg_stat.h"
56 #include "ia32_util.h"
57 #include "ia32_architecture.h"
59 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
61 static const arch_env_t *arch_env;
62 static ia32_code_gen_t *cg;
65 * Returns non-zero if the given node produces
68 * @param node the node to check
69 * @param pn if >= 0, the projection number of the used result
71 static int produces_zero_flag(ir_node *node, int pn)
74 const ia32_immediate_attr_t *imm_attr;
76 if (!is_ia32_irn(node))
80 if (pn != pn_ia32_res)
84 switch (get_ia32_irn_opcode(node)) {
102 assert(n_ia32_ShlD_count == n_ia32_ShrD_count);
103 assert(n_ia32_Shl_count == n_ia32_Shr_count
104 && n_ia32_Shl_count == n_ia32_Sar_count);
105 if (is_ia32_ShlD(node) || is_ia32_ShrD(node)) {
106 count = get_irn_n(node, n_ia32_ShlD_count);
108 count = get_irn_n(node, n_ia32_Shl_count);
110 /* when shift count is zero the flags are not affected, so we can only
111 * do this for constants != 0 */
112 if (!is_ia32_Immediate(count))
115 imm_attr = get_ia32_immediate_attr_const(count);
116 if (imm_attr->symconst != NULL)
118 if ((imm_attr->offset & 0x1f) == 0)
129 * If the given node has not mode_T, creates a mode_T version (with a result Proj).
131 * @param node the node to change
133 * @return the new mode_T node (if the mode was changed) or node itself
135 static ir_node *turn_into_mode_t(ir_node *node)
140 const arch_register_t *reg;
142 if(get_irn_mode(node) == mode_T)
145 assert(get_irn_mode(node) == mode_Iu);
147 new_node = exact_copy(node);
148 set_irn_mode(new_node, mode_T);
150 block = get_nodes_block(new_node);
151 res_proj = new_r_Proj(current_ir_graph, block, new_node, mode_Iu,
154 reg = arch_get_irn_register(arch_env, node);
155 arch_set_irn_register(arch_env, res_proj, reg);
157 be_peephole_before_exchange(node, res_proj);
158 sched_add_before(node, new_node);
160 exchange(node, res_proj);
161 be_peephole_after_exchange(res_proj);
167 * Peephole optimization for Test instructions.
168 * We can remove the Test, if a zero flags was produced which is still
171 static void peephole_ia32_Test(ir_node *node)
173 ir_node *left = get_irn_n(node, n_ia32_Test_left);
174 ir_node *right = get_irn_n(node, n_ia32_Test_right);
180 const ir_edge_t *edge;
182 assert(n_ia32_Test_left == n_ia32_Test8Bit_left
183 && n_ia32_Test_right == n_ia32_Test8Bit_right);
185 /* we need a test for 0 */
189 block = get_nodes_block(node);
190 if(get_nodes_block(left) != block)
194 pn = get_Proj_proj(left);
195 left = get_Proj_pred(left);
198 /* happens rarely, but if it does code will panic' */
199 if (is_ia32_Unknown_GP(left))
202 /* walk schedule up and abort when we find left or some other node destroys
204 schedpoint = sched_prev(node);
205 while(schedpoint != left) {
206 schedpoint = sched_prev(schedpoint);
207 if(arch_irn_is(arch_env, schedpoint, modify_flags))
209 if(schedpoint == block)
210 panic("couldn't find left");
213 /* make sure only Lg/Eq tests are used */
214 foreach_out_edge(node, edge) {
215 ir_node *user = get_edge_src_irn(edge);
216 int pnc = get_ia32_condcode(user);
218 if(pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) {
223 if(!produces_zero_flag(left, pn))
226 left = turn_into_mode_t(left);
228 flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode;
229 flags_proj = new_r_Proj(current_ir_graph, block, left, flags_mode,
231 arch_set_irn_register(arch_env, flags_proj, &ia32_flags_regs[REG_EFLAGS]);
233 assert(get_irn_mode(node) != mode_T);
235 be_peephole_before_exchange(node, flags_proj);
236 exchange(node, flags_proj);
238 be_peephole_after_exchange(flags_proj);
242 * AMD Athlon works faster when RET is not destination of
243 * conditional jump or directly preceded by other jump instruction.
244 * Can be avoided by placing a Rep prefix before the return.
246 static void peephole_ia32_Return(ir_node *node) {
247 ir_node *block, *irn;
249 if (!ia32_cg_config.use_pad_return)
252 block = get_nodes_block(node);
254 if (get_Block_n_cfgpreds(block) == 1) {
255 ir_node *pred = get_Block_cfgpred(block, 0);
258 /* The block of the return has only one predecessor,
259 which jumps directly to this block.
260 This jump will be encoded as a fall through, so we
262 However, the predecessor might be empty, so it must be
263 ensured that empty blocks are gone away ... */
268 /* check if this return is the first on the block */
269 sched_foreach_reverse_from(node, irn) {
270 switch (get_irn_opcode(irn)) {
272 /* the return node itself, ignore */
275 /* ignore the barrier, no code generated */
278 /* arg, IncSP 0 nodes might occur, ignore these */
279 if (be_get_IncSP_offset(irn) == 0)
288 /* yep, return is the first real instruction in this block */
291 /* add an rep prefix to the return */
292 ir_node *rep = new_rd_ia32_RepPrefix(get_irn_dbg_info(node), current_ir_graph, block);
294 sched_add_before(node, rep);
297 /* ensure, that the 3 byte return is generated */
298 be_Return_set_emit_pop(node, 1);
302 /* only optimize up to 48 stores behind IncSPs */
303 #define MAXPUSH_OPTIMIZE 48
306 * Tries to create Push's from IncSP, Store combinations.
307 * The Stores are replaced by Push's, the IncSP is modified
308 * (possibly into IncSP 0, but not removed).
310 static void peephole_IncSP_Store_to_push(ir_node *irn)
312 int i, maxslot, inc_ofs;
314 ir_node *stores[MAXPUSH_OPTIMIZE];
320 memset(stores, 0, sizeof(stores));
322 assert(be_is_IncSP(irn));
324 inc_ofs = be_get_IncSP_offset(irn);
329 * We first walk the schedule after the IncSP node as long as we find
330 * suitable Stores that could be transformed to a Push.
331 * We save them into the stores array which is sorted by the frame offset/4
332 * attached to the node
335 for (node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
340 /* it has to be a Store */
341 if (!is_ia32_Store(node))
344 /* it has to use our sp value */
345 if (get_irn_n(node, n_ia32_base) != irn)
347 /* Store has to be attached to NoMem */
348 mem = get_irn_n(node, n_ia32_mem);
352 /* unfortunately we can't support the full AMs possible for push at the
353 * moment. TODO: fix this */
354 if (get_ia32_am_scale(node) > 0 || !is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
357 offset = get_ia32_am_offs_int(node);
358 /* we should NEVER access uninitialized stack BELOW the current SP */
361 offset = inc_ofs - 4 - offset;
363 /* storing at half-slots is bad */
364 if ((offset & 3) != 0)
367 if (offset < 0 || offset >= MAXPUSH_OPTIMIZE * 4)
369 storeslot = offset >> 2;
371 /* storing into the same slot twice is bad (and shouldn't happen...) */
372 if (stores[storeslot] != NULL)
375 stores[storeslot] = node;
376 if (storeslot > maxslot)
380 curr_sp = be_get_IncSP_pred(irn);
382 /* walk through the Stores and create Pushs for them */
383 block = get_nodes_block(irn);
384 spmode = get_irn_mode(irn);
386 for (i = 0; i <= maxslot; ++i) {
387 const arch_register_t *spreg;
389 ir_node *val, *mem, *mem_proj;
390 ir_node *store = stores[i];
391 ir_node *noreg = ia32_new_NoReg_gp(cg);
396 val = get_irn_n(store, n_ia32_unary_op);
397 mem = get_irn_n(store, n_ia32_mem);
398 spreg = arch_get_irn_register(cg->arch_env, curr_sp);
400 push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, mem, val, curr_sp);
402 sched_add_before(irn, push);
404 /* create stackpointer Proj */
405 curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack);
406 arch_set_irn_register(cg->arch_env, curr_sp, spreg);
408 /* create memory Proj */
409 mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
411 /* use the memproj now */
412 exchange(store, mem_proj);
414 /* we can remove the Store now */
420 be_set_IncSP_offset(irn, inc_ofs);
421 be_set_IncSP_pred(irn, curr_sp);
425 * Tries to create Pops from Load, IncSP combinations.
426 * The Loads are replaced by Pops, the IncSP is modified
427 * (possibly into IncSP 0, but not removed).
429 static void peephole_Load_IncSP_to_pop(ir_node *irn)
431 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
432 int i, maxslot, inc_ofs;
433 ir_node *node, *pred_sp, *block;
434 ir_node *loads[MAXPUSH_OPTIMIZE];
436 unsigned regmask = 0;
438 memset(loads, 0, sizeof(loads));
439 assert(be_is_IncSP(irn));
441 inc_ofs = -be_get_IncSP_offset(irn);
446 * We first walk the schedule before the IncSP node as long as we find
447 * suitable Loads that could be transformed to a Pop.
448 * We save them into the stores array which is sorted by the frame offset/4
449 * attached to the node
452 pred_sp = be_get_IncSP_pred(irn);
453 for (node = sched_prev(irn); !sched_is_end(node); node = sched_prev(node)) {
457 const arch_register_t *dreg;
459 /* it has to be a Load */
460 if (!is_ia32_Load(node)) {
461 if (be_is_Copy(node)) {
462 if (get_irn_mode(node) != mode_Iu) {
463 /* not a GP copy, ignore */
466 dreg = arch_get_irn_register(arch_env, node);
467 if (regmask & (1 << dreg->index)) {
470 /* we CAN skip Copies if the destination is not in our regmask, ie
471 none of our future Pop will overwrite it */
472 regmask |= (1 << dreg->index);
478 /* we can handle only GP loads */
479 if (get_ia32_ls_mode(node) != mode_Iu)
482 /* it has to use our predecessor sp value */
483 if (get_irn_n(node, n_ia32_base) != pred_sp)
485 /* Load has to be attached to Spill-Mem */
486 mem = skip_Proj(get_irn_n(node, n_ia32_mem));
487 if (!is_Phi(mem) && !is_ia32_Store(mem) && !is_ia32_Push(mem))
490 /* should have NO index */
491 if (get_ia32_am_scale(node) > 0 || !is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
494 offset = get_ia32_am_offs_int(node);
495 /* we should NEVER access uninitialized stack BELOW the current SP */
498 /* storing at half-slots is bad */
499 if ((offset & 3) != 0)
502 if (offset < 0 || offset >= MAXPUSH_OPTIMIZE * 4)
504 loadslot = offset >> 2;
506 /* loading from the same slot twice is bad (and shouldn't happen...) */
507 if (loads[loadslot] != NULL)
510 dreg = arch_get_irn_register(arch_env, node);
511 if (regmask & (1 << dreg->index)) {
512 /* this register is already used */
515 regmask |= 1 << dreg->index;
517 loads[loadslot] = node;
518 if (loadslot > maxslot)
525 /* walk through the Loads and create Pops for them */
526 for (i = maxslot; i >= 0; --i) {
527 ir_node *load = loads[i];
534 /* create a new IncSP if needed */
535 block = get_nodes_block(irn);
539 pred_sp = be_new_IncSP(esp, irg, block, pred_sp, -inc_ofs, be_get_IncSP_align(irn));
540 sched_add_before(irn, pred_sp);
543 for (++i; i <= maxslot; ++i) {
544 ir_node *load = loads[i];
546 const ir_edge_t *edge, *tmp;
547 const arch_register_t *reg;
549 mem = get_irn_n(load, n_ia32_mem);
550 reg = arch_get_irn_register(arch_env, load);
552 pop = new_rd_ia32_Pop(get_irn_dbg_info(load), irg, block, mem, pred_sp);
553 arch_set_irn_register(arch_env, pop, reg);
555 /* create stackpointer Proj */
556 pred_sp = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack);
557 arch_set_irn_register(arch_env, pred_sp, esp);
559 sched_add_before(irn, pop);
562 foreach_out_edge_safe(load, edge, tmp) {
563 ir_node *proj = get_edge_src_irn(edge);
565 set_Proj_pred(proj, pop);
569 /* we can remove the Load now */
574 be_set_IncSP_offset(irn, 0);
575 be_set_IncSP_pred(irn, pred_sp);
581 * Find a free GP register if possible, else return NULL.
583 static const arch_register_t *get_free_gp_reg(void)
587 for(i = 0; i < N_ia32_gp_REGS; ++i) {
588 const arch_register_t *reg = &ia32_gp_regs[i];
589 if(arch_register_type_is(reg, ignore))
592 if(be_peephole_get_value(CLASS_ia32_gp, i) == NULL)
593 return &ia32_gp_regs[i];
600 * Creates a Pop instruction before the given schedule point.
602 * @param dbgi debug info
603 * @param irg the graph
604 * @param block the block
605 * @param stack the previous stack value
606 * @param schedpoint the new node is added before this node
607 * @param reg the register to pop
609 * @return the new stack value
611 static ir_node *create_pop(dbg_info *dbgi, ir_graph *irg, ir_node *block,
612 ir_node *stack, ir_node *schedpoint,
613 const arch_register_t *reg)
615 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
621 pop = new_rd_ia32_Pop(dbgi, irg, block, new_NoMem(), stack);
623 stack = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack);
624 arch_set_irn_register(arch_env, stack, esp);
625 val = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_res);
626 arch_set_irn_register(arch_env, val, reg);
628 sched_add_before(schedpoint, pop);
631 keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
632 sched_add_before(schedpoint, keep);
638 * Creates a Push instruction before the given schedule point.
640 * @param dbgi debug info
641 * @param irg the graph
642 * @param block the block
643 * @param stack the previous stack value
644 * @param schedpoint the new node is added before this node
645 * @param reg the register to pop
647 * @return the new stack value
649 static ir_node *create_push(dbg_info *dbgi, ir_graph *irg, ir_node *block,
650 ir_node *stack, ir_node *schedpoint,
651 const arch_register_t *reg)
653 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
654 ir_node *noreg, *nomem, *push, *val;
656 val = new_rd_ia32_ProduceVal(NULL, irg, block);
657 arch_set_irn_register(arch_env, val, reg);
658 sched_add_before(schedpoint, val);
660 noreg = ia32_new_NoReg_gp(cg);
661 nomem = get_irg_no_mem(irg);
662 push = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, nomem, val, stack);
663 sched_add_before(schedpoint, push);
665 stack = new_r_Proj(irg, block, push, mode_Iu, pn_ia32_Push_stack);
666 arch_set_irn_register(arch_env, stack, esp);
672 * Optimize an IncSp by replacing it with Push/Pop.
674 static void peephole_be_IncSP(ir_node *node)
676 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
677 const arch_register_t *reg;
678 ir_graph *irg = current_ir_graph;
684 /* first optimize incsp->incsp combinations */
685 node = be_peephole_IncSP_IncSP(node);
687 /* transform IncSP->Store combinations to Push where possible */
688 peephole_IncSP_Store_to_push(node);
690 /* transform Load->IncSP combinations to Pop where possible */
691 peephole_Load_IncSP_to_pop(node);
693 if (arch_get_irn_register(arch_env, node) != esp)
696 /* replace IncSP -4 by Pop freereg when possible */
697 offset = be_get_IncSP_offset(node);
698 if ((offset != -8 || ia32_cg_config.use_add_esp_8) &&
699 (offset != -4 || ia32_cg_config.use_add_esp_4) &&
700 (offset != +4 || ia32_cg_config.use_sub_esp_4) &&
701 (offset != +8 || ia32_cg_config.use_sub_esp_8))
705 /* we need a free register for pop */
706 reg = get_free_gp_reg();
710 dbgi = get_irn_dbg_info(node);
711 block = get_nodes_block(node);
712 stack = be_get_IncSP_pred(node);
714 stack = create_pop(dbgi, irg, block, stack, node, reg);
717 stack = create_pop(dbgi, irg, block, stack, node, reg);
720 dbgi = get_irn_dbg_info(node);
721 block = get_nodes_block(node);
722 stack = be_get_IncSP_pred(node);
723 reg = &ia32_gp_regs[REG_EAX];
725 stack = create_push(dbgi, irg, block, stack, node, reg);
728 stack = create_push(dbgi, irg, block, stack, node, reg);
732 be_peephole_before_exchange(node, stack);
734 exchange(node, stack);
735 be_peephole_after_exchange(stack);
739 * Peephole optimisation for ia32_Const's
741 static void peephole_ia32_Const(ir_node *node)
743 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
744 const arch_register_t *reg;
745 ir_graph *irg = current_ir_graph;
752 /* try to transform a mov 0, reg to xor reg reg */
753 if (attr->offset != 0 || attr->symconst != NULL)
755 if (ia32_cg_config.use_mov_0)
757 /* xor destroys the flags, so no-one must be using them */
758 if (be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
761 reg = arch_get_irn_register(arch_env, node);
762 assert(be_peephole_get_reg_value(reg) == NULL);
764 /* create xor(produceval, produceval) */
765 block = get_nodes_block(node);
766 dbgi = get_irn_dbg_info(node);
767 produceval = new_rd_ia32_ProduceVal(dbgi, irg, block);
768 arch_set_irn_register(arch_env, produceval, reg);
770 noreg = ia32_new_NoReg_gp(cg);
771 xor = new_rd_ia32_Xor(dbgi, irg, block, noreg, noreg, new_NoMem(),
772 produceval, produceval);
773 arch_set_irn_register(arch_env, xor, reg);
775 sched_add_before(node, produceval);
776 sched_add_before(node, xor);
778 be_peephole_before_exchange(node, xor);
781 be_peephole_after_exchange(xor);
784 static INLINE int is_noreg(ia32_code_gen_t *cg, const ir_node *node)
786 return node == cg->noreg_gp;
789 static ir_node *create_immediate_from_int(ia32_code_gen_t *cg, int val)
791 ir_graph *irg = current_ir_graph;
792 ir_node *start_block = get_irg_start_block(irg);
793 ir_node *immediate = new_rd_ia32_Immediate(NULL, irg, start_block, NULL,
795 arch_set_irn_register(cg->arch_env, immediate, &ia32_gp_regs[REG_GP_NOREG]);
800 static ir_node *create_immediate_from_am(ia32_code_gen_t *cg,
803 ir_graph *irg = get_irn_irg(node);
804 ir_node *block = get_nodes_block(node);
805 int offset = get_ia32_am_offs_int(node);
806 int sc_sign = is_ia32_am_sc_sign(node);
807 ir_entity *entity = get_ia32_am_sc(node);
810 res = new_rd_ia32_Immediate(NULL, irg, block, entity, sc_sign, offset);
811 arch_set_irn_register(cg->arch_env, res, &ia32_gp_regs[REG_GP_NOREG]);
815 static int is_am_one(const ir_node *node)
817 int offset = get_ia32_am_offs_int(node);
818 ir_entity *entity = get_ia32_am_sc(node);
820 return offset == 1 && entity == NULL;
823 static int is_am_minus_one(const ir_node *node)
825 int offset = get_ia32_am_offs_int(node);
826 ir_entity *entity = get_ia32_am_sc(node);
828 return offset == -1 && entity == NULL;
832 * Transforms a LEA into an Add or SHL if possible.
834 static void peephole_ia32_Lea(ir_node *node)
836 const arch_env_t *arch_env = cg->arch_env;
837 ir_graph *irg = current_ir_graph;
840 const arch_register_t *base_reg;
841 const arch_register_t *index_reg;
842 const arch_register_t *out_reg;
853 assert(is_ia32_Lea(node));
855 /* we can only do this if are allowed to globber the flags */
856 if(be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
859 base = get_irn_n(node, n_ia32_Lea_base);
860 index = get_irn_n(node, n_ia32_Lea_index);
862 if(is_noreg(cg, base)) {
866 base_reg = arch_get_irn_register(arch_env, base);
868 if(is_noreg(cg, index)) {
872 index_reg = arch_get_irn_register(arch_env, index);
875 if(base == NULL && index == NULL) {
876 /* we shouldn't construct these in the first place... */
878 ir_fprintf(stderr, "Optimisation warning: found immediate only lea\n");
883 out_reg = arch_get_irn_register(arch_env, node);
884 scale = get_ia32_am_scale(node);
885 assert(!is_ia32_need_stackent(node) || get_ia32_frame_ent(node) != NULL);
886 /* check if we have immediates values (frame entities should already be
887 * expressed in the offsets) */
888 if(get_ia32_am_offs_int(node) != 0 || get_ia32_am_sc(node) != NULL) {
894 /* we can transform leas where the out register is the same as either the
895 * base or index register back to an Add or Shl */
896 if(out_reg == base_reg) {
899 if(!has_immediates) {
900 ir_fprintf(stderr, "Optimisation warning: found lea which is "
905 goto make_add_immediate;
907 if(scale == 0 && !has_immediates) {
912 /* can't create an add */
914 } else if(out_reg == index_reg) {
916 if(has_immediates && scale == 0) {
918 goto make_add_immediate;
919 } else if(!has_immediates && scale > 0) {
921 op2 = create_immediate_from_int(cg, scale);
923 } else if(!has_immediates) {
925 ir_fprintf(stderr, "Optimisation warning: found lea which is "
929 } else if(scale == 0 && !has_immediates) {
934 /* can't create an add */
937 /* can't create an add */
942 if(ia32_cg_config.use_incdec) {
943 if(is_am_one(node)) {
944 dbgi = get_irn_dbg_info(node);
945 block = get_nodes_block(node);
946 res = new_rd_ia32_Inc(dbgi, irg, block, op1);
947 arch_set_irn_register(arch_env, res, out_reg);
950 if(is_am_minus_one(node)) {
951 dbgi = get_irn_dbg_info(node);
952 block = get_nodes_block(node);
953 res = new_rd_ia32_Dec(dbgi, irg, block, op1);
954 arch_set_irn_register(arch_env, res, out_reg);
958 op2 = create_immediate_from_am(cg, node);
961 dbgi = get_irn_dbg_info(node);
962 block = get_nodes_block(node);
963 noreg = ia32_new_NoReg_gp(cg);
965 res = new_rd_ia32_Add(dbgi, irg, block, noreg, noreg, nomem, op1, op2);
966 arch_set_irn_register(arch_env, res, out_reg);
967 set_ia32_commutative(res);
971 dbgi = get_irn_dbg_info(node);
972 block = get_nodes_block(node);
973 noreg = ia32_new_NoReg_gp(cg);
975 res = new_rd_ia32_Shl(dbgi, irg, block, op1, op2);
976 arch_set_irn_register(arch_env, res, out_reg);
980 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, node));
982 /* add new ADD/SHL to schedule */
983 DBG_OPT_LEA2ADD(node, res);
985 /* exchange the Add and the LEA */
986 be_peephole_before_exchange(node, res);
987 sched_add_before(node, res);
990 be_peephole_after_exchange(res);
994 * Split a Imul mem, imm into a Load mem and Imul reg, imm if possible.
996 static void peephole_ia32_Imul_split(ir_node *imul) {
997 const ir_node *right = get_irn_n(imul, n_ia32_IMul_right);
998 const arch_register_t *reg;
999 ir_node *load, *block, *base, *index, *mem, *res, *noreg;
1003 if (! is_ia32_Immediate(right) || get_ia32_op_type(imul) != ia32_AddrModeS) {
1004 /* no memory, imm form ignore */
1007 /* we need a free register */
1008 reg = get_free_gp_reg();
1012 /* fine, we can rebuild it */
1013 dbgi = get_irn_dbg_info(imul);
1014 block = get_nodes_block(imul);
1015 irg = current_ir_graph;
1016 base = get_irn_n(imul, n_ia32_IMul_base);
1017 index = get_irn_n(imul, n_ia32_IMul_index);
1018 mem = get_irn_n(imul, n_ia32_IMul_mem);
1019 load = new_rd_ia32_Load(dbgi, irg, block, base, index, mem);
1021 /* copy all attributes */
1022 set_irn_pinned(load, get_irn_pinned(imul));
1023 set_ia32_op_type(load, ia32_AddrModeS);
1024 set_ia32_ls_mode(load, get_ia32_ls_mode(imul));
1026 set_ia32_am_scale(load, get_ia32_am_scale(imul));
1027 set_ia32_am_sc(load, get_ia32_am_sc(imul));
1028 set_ia32_am_offs_int(load, get_ia32_am_offs_int(imul));
1029 if (is_ia32_am_sc_sign(imul))
1030 set_ia32_am_sc_sign(load);
1031 if (is_ia32_use_frame(imul))
1032 set_ia32_use_frame(load);
1033 set_ia32_frame_ent(load, get_ia32_frame_ent(imul));
1035 sched_add_before(imul, load);
1037 mem = new_rd_Proj(dbgi, irg, block, load, mode_M, pn_ia32_Load_M);
1038 res = new_rd_Proj(dbgi, irg, block, load, mode_Iu, pn_ia32_Load_res);
1040 arch_set_irn_register(arch_env, res, reg);
1041 be_peephole_after_exchange(res);
1043 set_irn_n(imul, n_ia32_IMul_mem, mem);
1044 noreg = get_irn_n(imul, n_ia32_IMul_left);
1045 set_irn_n(imul, n_ia32_IMul_left, res);
1046 set_ia32_op_type(imul, ia32_Normal);
1050 * Replace xorps r,r and xorpd r,r by pxor r,r
1052 static void peephole_ia32_xZero(ir_node *xor) {
1053 set_irn_op(xor, op_ia32_xPzero);
1057 * Register a peephole optimisation function.
1059 static void register_peephole_optimisation(ir_op *op, peephole_opt_func func) {
1060 assert(op->ops.generic == NULL);
1061 op->ops.generic = (op_func)func;
1064 /* Perform peephole-optimizations. */
1065 void ia32_peephole_optimization(ia32_code_gen_t *new_cg)
1068 arch_env = cg->arch_env;
1070 /* register peephole optimisations */
1071 clear_irp_opcodes_generic_func();
1072 register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
1073 register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
1074 register_peephole_optimisation(op_ia32_Lea, peephole_ia32_Lea);
1075 register_peephole_optimisation(op_ia32_Test, peephole_ia32_Test);
1076 register_peephole_optimisation(op_ia32_Test8Bit, peephole_ia32_Test);
1077 register_peephole_optimisation(op_be_Return, peephole_ia32_Return);
1078 if (! ia32_cg_config.use_imul_mem_imm32)
1079 register_peephole_optimisation(op_ia32_IMul, peephole_ia32_Imul_split);
1080 if (ia32_cg_config.use_pxor)
1081 register_peephole_optimisation(op_ia32_xZero, peephole_ia32_xZero);
1083 be_peephole_opt(cg->birg);
1087 * Removes node from schedule if it is not used anymore. If irn is a mode_T node
1088 * all it's Projs are removed as well.
1089 * @param irn The irn to be removed from schedule
1091 static INLINE void try_kill(ir_node *node)
1093 if(get_irn_mode(node) == mode_T) {
1094 const ir_edge_t *edge, *next;
1095 foreach_out_edge_safe(node, edge, next) {
1096 ir_node *proj = get_edge_src_irn(edge);
1101 if(get_irn_n_edges(node) != 0)
1104 if (sched_is_scheduled(node)) {
1111 static void optimize_conv_store(ir_node *node)
1116 ir_mode *store_mode;
1118 if(!is_ia32_Store(node) && !is_ia32_Store8Bit(node))
1121 assert(n_ia32_Store_val == n_ia32_Store8Bit_val);
1122 pred_proj = get_irn_n(node, n_ia32_Store_val);
1123 if(is_Proj(pred_proj)) {
1124 pred = get_Proj_pred(pred_proj);
1128 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1130 if(get_ia32_op_type(pred) != ia32_Normal)
1133 /* the store only stores the lower bits, so we only need the conv
1134 * it it shrinks the mode */
1135 conv_mode = get_ia32_ls_mode(pred);
1136 store_mode = get_ia32_ls_mode(node);
1137 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode))
1140 set_irn_n(node, n_ia32_Store_val, get_irn_n(pred, n_ia32_Conv_I2I_val));
1141 if(get_irn_n_edges(pred_proj) == 0) {
1142 be_kill_node(pred_proj);
1143 if(pred != pred_proj)
1148 static void optimize_load_conv(ir_node *node)
1150 ir_node *pred, *predpred;
1154 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1157 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
1158 pred = get_irn_n(node, n_ia32_Conv_I2I_val);
1162 predpred = get_Proj_pred(pred);
1163 if(!is_ia32_Load(predpred))
1166 /* the load is sign extending the upper bits, so we only need the conv
1167 * if it shrinks the mode */
1168 load_mode = get_ia32_ls_mode(predpred);
1169 conv_mode = get_ia32_ls_mode(node);
1170 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(load_mode))
1173 if(get_mode_sign(conv_mode) != get_mode_sign(load_mode)) {
1174 /* change the load if it has only 1 user */
1175 if(get_irn_n_edges(pred) == 1) {
1177 if(get_mode_sign(conv_mode)) {
1178 newmode = find_signed_mode(load_mode);
1180 newmode = find_unsigned_mode(load_mode);
1182 assert(newmode != NULL);
1183 set_ia32_ls_mode(predpred, newmode);
1185 /* otherwise we have to keep the conv */
1191 exchange(node, pred);
1194 static void optimize_conv_conv(ir_node *node)
1196 ir_node *pred_proj, *pred, *result_conv;
1197 ir_mode *pred_mode, *conv_mode;
1201 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1204 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
1205 pred_proj = get_irn_n(node, n_ia32_Conv_I2I_val);
1206 if(is_Proj(pred_proj))
1207 pred = get_Proj_pred(pred_proj);
1211 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1214 /* we know that after a conv, the upper bits are sign extended
1215 * so we only need the 2nd conv if it shrinks the mode */
1216 conv_mode = get_ia32_ls_mode(node);
1217 conv_mode_bits = get_mode_size_bits(conv_mode);
1218 pred_mode = get_ia32_ls_mode(pred);
1219 pred_mode_bits = get_mode_size_bits(pred_mode);
1221 if(conv_mode_bits == pred_mode_bits
1222 && get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
1223 result_conv = pred_proj;
1224 } else if(conv_mode_bits <= pred_mode_bits) {
1225 /* if 2nd conv is smaller then first conv, then we can always take the
1227 if(get_irn_n_edges(pred_proj) == 1) {
1228 result_conv = pred_proj;
1229 set_ia32_ls_mode(pred, conv_mode);
1231 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
1232 if (get_mode_size_bits(conv_mode) == 8) {
1233 set_irn_op(pred, op_ia32_Conv_I2I8Bit);
1234 set_ia32_in_req_all(pred, get_ia32_in_req_all(node));
1237 /* we don't want to end up with 2 loads, so we better do nothing */
1238 if(get_irn_mode(pred) == mode_T) {
1242 result_conv = exact_copy(pred);
1243 set_ia32_ls_mode(result_conv, conv_mode);
1245 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
1246 if (get_mode_size_bits(conv_mode) == 8) {
1247 set_irn_op(result_conv, op_ia32_Conv_I2I8Bit);
1248 set_ia32_in_req_all(result_conv, get_ia32_in_req_all(node));
1252 /* if both convs have the same sign, then we can take the smaller one */
1253 if(get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
1254 result_conv = pred_proj;
1256 /* no optimisation possible if smaller conv is sign-extend */
1257 if(mode_is_signed(pred_mode)) {
1260 /* we can take the smaller conv if it is unsigned */
1261 result_conv = pred_proj;
1266 exchange(node, result_conv);
1268 if(get_irn_n_edges(pred_proj) == 0) {
1269 be_kill_node(pred_proj);
1270 if(pred != pred_proj)
1273 optimize_conv_conv(result_conv);
1276 static void optimize_node(ir_node *node, void *env)
1280 optimize_load_conv(node);
1281 optimize_conv_store(node);
1282 optimize_conv_conv(node);
1286 * Performs conv and address mode optimization.
1288 void ia32_optimize_graph(ia32_code_gen_t *cg)
1290 irg_walk_blkwise_graph(cg->irg, NULL, optimize_node, cg);
1293 be_dump(cg->irg, "-opt", dump_ir_block_graph_sched);
1296 void ia32_init_optimize(void)
1298 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize");