2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Implements several optimizations for IA32.
23 * @author Matthias Braun, Christian Wuerdig
34 #include "firm_types.h"
46 #include "../benode_t.h"
47 #include "../besched_t.h"
48 #include "../bepeephole.h"
50 #include "ia32_new_nodes.h"
51 #include "ia32_optimize.h"
52 #include "bearch_ia32_t.h"
53 #include "gen_ia32_regalloc_if.h"
54 #include "ia32_transform.h"
55 #include "ia32_dbg_stat.h"
56 #include "ia32_util.h"
57 #include "ia32_architecture.h"
59 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
61 static const arch_env_t *arch_env;
62 static ia32_code_gen_t *cg;
64 static void peephole_IncSP_IncSP(ir_node *node);
67 static void peephole_ia32_Store_IncSP_to_push(ir_node *node)
69 ir_node *base = get_irn_n(node, n_ia32_Store_base);
70 ir_node *index = get_irn_n(node, n_ia32_Store_index);
71 ir_node *mem = get_irn_n(node, n_ia32_Store_mem);
72 ir_node *incsp = base;
84 /* nomem inidicates the store doesn't alias with anything else */
88 /* find an IncSP in front of us, we might have to skip barriers for this */
89 while(is_Proj(incsp)) {
90 ir_node *proj_pred = get_Proj_pred(incsp);
91 if(!be_is_Barrier(proj_pred))
93 incsp = get_irn_n(proj_pred, get_Proj_proj(incsp));
95 if(!be_is_IncSP(incsp))
98 peephole_IncSP_IncSP(incsp);
100 /* must be in the same block */
101 if(get_nodes_block(incsp) != get_nodes_block(node))
104 if(!is_ia32_NoReg_GP(index) || get_ia32_am_sc(node) != NULL) {
105 panic("Invalid storeAM found (%+F)", node);
108 /* we should be the store to the end of the stackspace */
109 offset = be_get_IncSP_offset(incsp);
110 mode = get_ia32_ls_mode(node);
111 node_offset = get_ia32_am_offs_int(node);
112 if(node_offset != offset - get_mode_size_bytes(mode))
115 /* we can use a push instead of the store */
116 irg = current_ir_graph;
117 block = get_nodes_block(node);
118 dbgi = get_irn_dbg_info(node);
119 noreg = ia32_new_NoReg_gp(cg);
120 base = be_get_IncSP_pred(incsp);
121 val = get_irn_n(node, n_ia32_Store_val);
122 push = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, mem, base, val);
124 proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
126 be_set_IncSP_offset(incsp, offset - get_mode_size_bytes(mode));
128 sched_add_before(node, push);
131 be_peephole_before_exchange(node, proj);
132 exchange(node, proj);
133 be_peephole_after_exchange(proj);
136 static void peephole_ia32_Store(ir_node *node)
138 peephole_ia32_Store_IncSP_to_push(node);
142 static int produces_zero_flag(ir_node *node, int pn)
145 const ia32_immediate_attr_t *imm_attr;
147 if(!is_ia32_irn(node))
151 if(pn != pn_ia32_res)
155 switch(get_ia32_irn_opcode(node)) {
173 assert(n_ia32_ShlD_count == n_ia32_ShrD_count);
174 assert(n_ia32_Shl_count == n_ia32_Shr_count
175 && n_ia32_Shl_count == n_ia32_Sar_count);
176 if(is_ia32_ShlD(node) || is_ia32_ShrD(node)) {
177 count = get_irn_n(node, n_ia32_ShlD_count);
179 count = get_irn_n(node, n_ia32_Shl_count);
181 /* when shift count is zero the flags are not affected, so we can only
182 * do this for constants != 0 */
183 if(!is_ia32_Immediate(count))
186 imm_attr = get_ia32_immediate_attr_const(count);
187 if(imm_attr->symconst != NULL)
189 if((imm_attr->offset & 0x1f) == 0)
199 static ir_node *turn_into_mode_t(ir_node *node)
204 const arch_register_t *reg;
206 if(get_irn_mode(node) == mode_T)
209 assert(get_irn_mode(node) == mode_Iu);
211 new_node = exact_copy(node);
212 set_irn_mode(new_node, mode_T);
214 block = get_nodes_block(new_node);
215 res_proj = new_r_Proj(current_ir_graph, block, new_node, mode_Iu,
218 reg = arch_get_irn_register(arch_env, node);
219 arch_set_irn_register(arch_env, res_proj, reg);
221 be_peephole_before_exchange(node, res_proj);
222 sched_add_before(node, new_node);
224 exchange(node, res_proj);
225 be_peephole_after_exchange(res_proj);
230 static void peephole_ia32_Test(ir_node *node)
232 ir_node *left = get_irn_n(node, n_ia32_Test_left);
233 ir_node *right = get_irn_n(node, n_ia32_Test_right);
239 const ir_edge_t *edge;
241 assert(n_ia32_Test_left == n_ia32_Test8Bit_left
242 && n_ia32_Test_right == n_ia32_Test8Bit_right);
244 /* we need a test for 0 */
248 block = get_nodes_block(node);
249 if(get_nodes_block(left) != block)
253 pn = get_Proj_proj(left);
254 left = get_Proj_pred(left);
257 /* happens rarely, but if it does code will panic' */
258 if (is_ia32_Unknown_GP(left))
261 /* walk schedule up and abort when we find left or some other node destroys
263 schedpoint = sched_prev(node);
264 while(schedpoint != left) {
265 schedpoint = sched_prev(schedpoint);
266 if(arch_irn_is(arch_env, schedpoint, modify_flags))
268 if(schedpoint == block)
269 panic("couldn't find left");
272 /* make sure only Lg/Eq tests are used */
273 foreach_out_edge(node, edge) {
274 ir_node *user = get_edge_src_irn(edge);
275 int pnc = get_ia32_condcode(user);
277 if(pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) {
282 if(!produces_zero_flag(left, pn))
285 left = turn_into_mode_t(left);
287 flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode;
288 flags_proj = new_r_Proj(current_ir_graph, block, left, flags_mode,
290 arch_set_irn_register(arch_env, flags_proj, &ia32_flags_regs[REG_EFLAGS]);
292 assert(get_irn_mode(node) != mode_T);
294 be_peephole_before_exchange(node, flags_proj);
295 exchange(node, flags_proj);
297 be_peephole_after_exchange(flags_proj);
301 * AMD Athlon works faster when RET is not destination of
302 * conditional jump or directly preceded by other jump instruction.
303 * Can be avoided by placing a Rep prefix before the return.
305 static void peephole_ia32_Return(ir_node *node) {
306 ir_node *block, *irn, *rep;
308 if (!ia32_cg_config.use_pad_return)
311 block = get_nodes_block(node);
313 if (get_Block_n_cfgpreds(block) == 1) {
314 ir_node *pred = get_Block_cfgpred(block, 0);
317 /* The block of the return has only one predecessor,
318 which jumps directly to this block.
319 This jump will be encoded as a fall through, so we
321 However, the predecessor might be empty, so it must be
322 ensured that empty blocks are gone away ... */
327 /* check if this return is the first on the block */
328 sched_foreach_reverse_from(node, irn) {
329 switch (be_get_irn_opcode(irn)) {
331 /* the return node itself, ignore */
334 /* ignore the barrier, no code generated */
337 /* arg, IncSP 0 nodes might occur, ignore these */
338 if (be_get_IncSP_offset(irn) == 0)
347 /* yep, return is the first real instruction in this block */
348 rep = new_rd_ia32_RepPrefix(get_irn_dbg_info(node), current_ir_graph, block);
350 sched_add_before(node, rep);
353 /* only optimize up to 48 stores behind IncSPs */
354 #define MAXPUSH_OPTIMIZE 48
357 * Tries to create pushs from IncSP,Store combinations.
358 * The Stores are replaced by Push's, the IncSP is modified
359 * (possibly into IncSP 0, but not removed).
361 static void peephole_IncSP_Store_to_push(ir_node *irn)
366 ir_node *stores[MAXPUSH_OPTIMIZE];
367 ir_node *block = get_nodes_block(irn);
368 ir_graph *irg = cg->irg;
370 ir_mode *spmode = get_irn_mode(irn);
372 memset(stores, 0, sizeof(stores));
374 assert(be_is_IncSP(irn));
376 offset = be_get_IncSP_offset(irn);
381 * We first walk the schedule after the IncSP node as long as we find
382 * suitable stores that could be transformed to a push.
383 * We save them into the stores array which is sorted by the frame offset/4
384 * attached to the node
386 for(node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
391 // it has to be a store
392 if(!is_ia32_Store(node))
395 // it has to use our sp value
396 if(get_irn_n(node, n_ia32_base) != irn)
398 // store has to be attached to NoMem
399 mem = get_irn_n(node, n_ia32_mem);
404 /* unfortunately we can't support the full AMs possible for push at the
405 * moment. TODO: fix this */
406 if(get_ia32_am_scale(node) > 0 || !is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
409 offset = get_ia32_am_offs_int(node);
411 storeslot = offset / 4;
412 if(storeslot >= MAXPUSH_OPTIMIZE)
415 // storing into the same slot twice is bad (and shouldn't happen...)
416 if(stores[storeslot] != NULL)
419 // storing at half-slots is bad
423 stores[storeslot] = node;
426 curr_sp = be_get_IncSP_pred(irn);
428 // walk the stores in inverse order and create pushs for them
429 i = (offset / 4) - 1;
430 if(i >= MAXPUSH_OPTIMIZE) {
431 i = MAXPUSH_OPTIMIZE - 1;
434 for( ; i >= 0; --i) {
435 const arch_register_t *spreg;
437 ir_node *val, *mem, *mem_proj;
438 ir_node *store = stores[i];
439 ir_node *noreg = ia32_new_NoReg_gp(cg);
441 if(store == NULL || is_Bad(store))
444 val = get_irn_n(store, n_ia32_unary_op);
445 mem = get_irn_n(store, n_ia32_mem);
446 spreg = arch_get_irn_register(cg->arch_env, curr_sp);
448 push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, mem, curr_sp, val);
450 sched_add_before(irn, push);
452 // create stackpointer proj
453 curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack);
454 arch_set_irn_register(cg->arch_env, curr_sp, spreg);
456 // create memory proj
457 mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
459 // use the memproj now
460 exchange(store, mem_proj);
462 // we can remove the store now
468 be_set_IncSP_offset(irn, offset);
469 be_set_IncSP_pred(irn, curr_sp);
473 * Tries to optimize two following IncSP.
475 static void peephole_IncSP_IncSP(ir_node *node)
480 ir_node *pred = be_get_IncSP_pred(node);
483 if(!be_is_IncSP(pred))
486 if(get_irn_n_edges(pred) > 1)
489 pred_offs = be_get_IncSP_offset(pred);
490 curr_offs = be_get_IncSP_offset(node);
492 if(pred_offs == BE_STACK_FRAME_SIZE_EXPAND) {
493 if(curr_offs != BE_STACK_FRAME_SIZE_SHRINK) {
497 } else if(pred_offs == BE_STACK_FRAME_SIZE_SHRINK) {
498 if(curr_offs != BE_STACK_FRAME_SIZE_EXPAND) {
502 } else if(curr_offs == BE_STACK_FRAME_SIZE_EXPAND
503 || curr_offs == BE_STACK_FRAME_SIZE_SHRINK) {
506 offs = curr_offs + pred_offs;
509 /* add pred offset to ours and remove pred IncSP */
510 be_set_IncSP_offset(node, offs);
512 predpred = be_get_IncSP_pred(pred);
513 be_peephole_before_exchange(pred, predpred);
515 /* rewire dependency edges */
516 edges_reroute_kind(pred, predpred, EDGE_KIND_DEP, current_ir_graph);
517 be_set_IncSP_pred(node, predpred);
521 be_peephole_after_exchange(predpred);
525 * Find a free GP register if possible, else return NULL.
527 static const arch_register_t *get_free_gp_reg(void)
531 for(i = 0; i < N_ia32_gp_REGS; ++i) {
532 const arch_register_t *reg = &ia32_gp_regs[i];
533 if(arch_register_type_is(reg, ignore))
536 if(be_peephole_get_value(CLASS_ia32_gp, i) == NULL)
537 return &ia32_gp_regs[i];
543 static void peephole_be_IncSP(ir_node *node)
545 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
546 const arch_register_t *reg;
556 /* first optimize incsp->incsp combinations */
557 peephole_IncSP_IncSP(node);
559 /* transform IncSP->Store combinations to Push where possible */
560 peephole_IncSP_Store_to_push(node);
562 if (arch_get_irn_register(arch_env, node) != esp)
565 /* replace IncSP -4 by Pop freereg when possible */
566 offset = be_get_IncSP_offset(node);
567 if (!(offset == -4 && !ia32_cg_config.use_add_esp_4) &&
568 !(offset == -8 && !ia32_cg_config.use_add_esp_8) &&
569 !(offset == +4 && !ia32_cg_config.use_sub_esp_4) &&
570 !(offset == +8 && !ia32_cg_config.use_sub_esp_8))
574 /* we need a free register for pop */
575 reg = get_free_gp_reg();
579 irg = current_ir_graph;
580 dbgi = get_irn_dbg_info(node);
581 block = get_nodes_block(node);
582 stack = be_get_IncSP_pred(node);
583 pop = new_rd_ia32_Pop(dbgi, irg, block, new_NoMem(), stack);
585 stack = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack);
586 arch_set_irn_register(arch_env, stack, esp);
587 val = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_res);
588 arch_set_irn_register(arch_env, val, reg);
590 sched_add_before(node, pop);
592 keep = sched_next(node);
593 if (!be_is_Keep(keep)) {
596 keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
597 sched_add_before(node, keep);
599 be_Keep_add_node(keep, &ia32_reg_classes[CLASS_ia32_gp], val);
603 pop2 = new_rd_ia32_Pop(dbgi, irg, block, new_NoMem(), stack);
605 stack = new_r_Proj(irg, block, pop2, mode_Iu, pn_ia32_Pop_stack);
606 arch_set_irn_register(arch_env, stack, esp);
607 val = new_r_Proj(irg, block, pop2, mode_Iu, pn_ia32_Pop_res);
608 arch_set_irn_register(arch_env, val, reg);
610 sched_add_after(pop, pop2);
611 be_Keep_add_node(keep, &ia32_reg_classes[CLASS_ia32_gp], val);
618 be_peephole_before_exchange(node, stack);
620 exchange(node, stack);
621 be_peephole_after_exchange(stack);
625 * Peephole optimisation for ia32_Const's
627 static void peephole_ia32_Const(ir_node *node)
629 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
630 const arch_register_t *reg;
631 ir_graph *irg = current_ir_graph;
638 /* try to transform a mov 0, reg to xor reg reg */
639 if (attr->offset != 0 || attr->symconst != NULL)
641 if (ia32_cg_config.use_mov_0)
643 /* xor destroys the flags, so no-one must be using them */
644 if (be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
647 reg = arch_get_irn_register(arch_env, node);
648 assert(be_peephole_get_reg_value(reg) == NULL);
650 /* create xor(produceval, produceval) */
651 block = get_nodes_block(node);
652 dbgi = get_irn_dbg_info(node);
653 produceval = new_rd_ia32_ProduceVal(dbgi, irg, block);
654 arch_set_irn_register(arch_env, produceval, reg);
656 noreg = ia32_new_NoReg_gp(cg);
657 xor = new_rd_ia32_Xor(dbgi, irg, block, noreg, noreg, new_NoMem(),
658 produceval, produceval);
659 arch_set_irn_register(arch_env, xor, reg);
661 sched_add_before(node, produceval);
662 sched_add_before(node, xor);
664 be_peephole_before_exchange(node, xor);
667 be_peephole_after_exchange(xor);
670 static INLINE int is_noreg(ia32_code_gen_t *cg, const ir_node *node)
672 return node == cg->noreg_gp;
675 static ir_node *create_immediate_from_int(ia32_code_gen_t *cg, int val)
677 ir_graph *irg = current_ir_graph;
678 ir_node *start_block = get_irg_start_block(irg);
679 ir_node *immediate = new_rd_ia32_Immediate(NULL, irg, start_block, NULL,
681 arch_set_irn_register(cg->arch_env, immediate, &ia32_gp_regs[REG_GP_NOREG]);
686 static ir_node *create_immediate_from_am(ia32_code_gen_t *cg,
689 ir_graph *irg = get_irn_irg(node);
690 ir_node *block = get_nodes_block(node);
691 int offset = get_ia32_am_offs_int(node);
692 int sc_sign = is_ia32_am_sc_sign(node);
693 ir_entity *entity = get_ia32_am_sc(node);
696 res = new_rd_ia32_Immediate(NULL, irg, block, entity, sc_sign, offset);
697 arch_set_irn_register(cg->arch_env, res, &ia32_gp_regs[REG_GP_NOREG]);
701 static int is_am_one(const ir_node *node)
703 int offset = get_ia32_am_offs_int(node);
704 ir_entity *entity = get_ia32_am_sc(node);
706 return offset == 1 && entity == NULL;
709 static int is_am_minus_one(const ir_node *node)
711 int offset = get_ia32_am_offs_int(node);
712 ir_entity *entity = get_ia32_am_sc(node);
714 return offset == -1 && entity == NULL;
718 * Transforms a LEA into an Add or SHL if possible.
720 static void peephole_ia32_Lea(ir_node *node)
722 const arch_env_t *arch_env = cg->arch_env;
723 ir_graph *irg = current_ir_graph;
726 const arch_register_t *base_reg;
727 const arch_register_t *index_reg;
728 const arch_register_t *out_reg;
739 assert(is_ia32_Lea(node));
741 /* we can only do this if are allowed to globber the flags */
742 if(be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
745 base = get_irn_n(node, n_ia32_Lea_base);
746 index = get_irn_n(node, n_ia32_Lea_index);
748 if(is_noreg(cg, base)) {
752 base_reg = arch_get_irn_register(arch_env, base);
754 if(is_noreg(cg, index)) {
758 index_reg = arch_get_irn_register(arch_env, index);
761 if(base == NULL && index == NULL) {
762 /* we shouldn't construct these in the first place... */
764 ir_fprintf(stderr, "Optimisation warning: found immediate only lea\n");
769 out_reg = arch_get_irn_register(arch_env, node);
770 scale = get_ia32_am_scale(node);
771 assert(!is_ia32_need_stackent(node) || get_ia32_frame_ent(node) != NULL);
772 /* check if we have immediates values (frame entities should already be
773 * expressed in the offsets) */
774 if(get_ia32_am_offs_int(node) != 0 || get_ia32_am_sc(node) != NULL) {
780 /* we can transform leas where the out register is the same as either the
781 * base or index register back to an Add or Shl */
782 if(out_reg == base_reg) {
785 if(!has_immediates) {
786 ir_fprintf(stderr, "Optimisation warning: found lea which is "
791 goto make_add_immediate;
793 if(scale == 0 && !has_immediates) {
798 /* can't create an add */
800 } else if(out_reg == index_reg) {
802 if(has_immediates && scale == 0) {
804 goto make_add_immediate;
805 } else if(!has_immediates && scale > 0) {
807 op2 = create_immediate_from_int(cg, scale);
809 } else if(!has_immediates) {
811 ir_fprintf(stderr, "Optimisation warning: found lea which is "
815 } else if(scale == 0 && !has_immediates) {
820 /* can't create an add */
823 /* can't create an add */
828 if(ia32_cg_config.use_incdec) {
829 if(is_am_one(node)) {
830 dbgi = get_irn_dbg_info(node);
831 block = get_nodes_block(node);
832 res = new_rd_ia32_Inc(dbgi, irg, block, op1);
833 arch_set_irn_register(arch_env, res, out_reg);
836 if(is_am_minus_one(node)) {
837 dbgi = get_irn_dbg_info(node);
838 block = get_nodes_block(node);
839 res = new_rd_ia32_Dec(dbgi, irg, block, op1);
840 arch_set_irn_register(arch_env, res, out_reg);
844 op2 = create_immediate_from_am(cg, node);
847 dbgi = get_irn_dbg_info(node);
848 block = get_nodes_block(node);
849 noreg = ia32_new_NoReg_gp(cg);
851 res = new_rd_ia32_Add(dbgi, irg, block, noreg, noreg, nomem, op1, op2);
852 arch_set_irn_register(arch_env, res, out_reg);
853 set_ia32_commutative(res);
857 dbgi = get_irn_dbg_info(node);
858 block = get_nodes_block(node);
859 noreg = ia32_new_NoReg_gp(cg);
861 res = new_rd_ia32_Shl(dbgi, irg, block, op1, op2);
862 arch_set_irn_register(arch_env, res, out_reg);
866 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, node));
868 /* add new ADD/SHL to schedule */
869 DBG_OPT_LEA2ADD(node, res);
871 /* exchange the Add and the LEA */
872 be_peephole_before_exchange(node, res);
873 sched_add_before(node, res);
876 be_peephole_after_exchange(res);
880 * Register a peephole optimisation function.
882 static void register_peephole_optimisation(ir_op *op, peephole_opt_func func) {
883 assert(op->ops.generic == NULL);
884 op->ops.generic = (void*) func;
887 /* Perform peephole-optimizations. */
888 void ia32_peephole_optimization(ia32_code_gen_t *new_cg)
891 arch_env = cg->arch_env;
893 /* register peephole optimisations */
894 clear_irp_opcodes_generic_func();
895 register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
896 //register_peephole_optimisation(op_ia32_Store, peephole_ia32_Store);
897 register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
898 register_peephole_optimisation(op_ia32_Lea, peephole_ia32_Lea);
899 register_peephole_optimisation(op_ia32_Test, peephole_ia32_Test);
900 register_peephole_optimisation(op_ia32_Test8Bit, peephole_ia32_Test);
901 register_peephole_optimisation(op_be_Return, peephole_ia32_Return);
903 be_peephole_opt(cg->birg);
907 * Removes node from schedule if it is not used anymore. If irn is a mode_T node
908 * all it's Projs are removed as well.
909 * @param irn The irn to be removed from schedule
911 static INLINE void try_kill(ir_node *node)
913 if(get_irn_mode(node) == mode_T) {
914 const ir_edge_t *edge, *next;
915 foreach_out_edge_safe(node, edge, next) {
916 ir_node *proj = get_edge_src_irn(edge);
921 if(get_irn_n_edges(node) != 0)
924 if (sched_is_scheduled(node)) {
931 static void optimize_conv_store(ir_node *node)
938 if(!is_ia32_Store(node) && !is_ia32_Store8Bit(node))
941 assert(n_ia32_Store_val == n_ia32_Store8Bit_val);
942 pred_proj = get_irn_n(node, n_ia32_Store_val);
943 if(is_Proj(pred_proj)) {
944 pred = get_Proj_pred(pred_proj);
948 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
950 if(get_ia32_op_type(pred) != ia32_Normal)
953 /* the store only stores the lower bits, so we only need the conv
954 * it it shrinks the mode */
955 conv_mode = get_ia32_ls_mode(pred);
956 store_mode = get_ia32_ls_mode(node);
957 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode))
960 set_irn_n(node, n_ia32_Store_val, get_irn_n(pred, n_ia32_Conv_I2I_val));
961 if(get_irn_n_edges(pred_proj) == 0) {
962 be_kill_node(pred_proj);
963 if(pred != pred_proj)
968 static void optimize_load_conv(ir_node *node)
970 ir_node *pred, *predpred;
974 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
977 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
978 pred = get_irn_n(node, n_ia32_Conv_I2I_val);
982 predpred = get_Proj_pred(pred);
983 if(!is_ia32_Load(predpred))
986 /* the load is sign extending the upper bits, so we only need the conv
987 * if it shrinks the mode */
988 load_mode = get_ia32_ls_mode(predpred);
989 conv_mode = get_ia32_ls_mode(node);
990 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(load_mode))
993 if(get_mode_sign(conv_mode) != get_mode_sign(load_mode)) {
994 /* change the load if it has only 1 user */
995 if(get_irn_n_edges(pred) == 1) {
997 if(get_mode_sign(conv_mode)) {
998 newmode = find_signed_mode(load_mode);
1000 newmode = find_unsigned_mode(load_mode);
1002 assert(newmode != NULL);
1003 set_ia32_ls_mode(predpred, newmode);
1005 /* otherwise we have to keep the conv */
1011 exchange(node, pred);
1014 static void optimize_conv_conv(ir_node *node)
1016 ir_node *pred_proj, *pred, *result_conv;
1017 ir_mode *pred_mode, *conv_mode;
1021 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
1024 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
1025 pred_proj = get_irn_n(node, n_ia32_Conv_I2I_val);
1026 if(is_Proj(pred_proj))
1027 pred = get_Proj_pred(pred_proj);
1031 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
1034 /* we know that after a conv, the upper bits are sign extended
1035 * so we only need the 2nd conv if it shrinks the mode */
1036 conv_mode = get_ia32_ls_mode(node);
1037 conv_mode_bits = get_mode_size_bits(conv_mode);
1038 pred_mode = get_ia32_ls_mode(pred);
1039 pred_mode_bits = get_mode_size_bits(pred_mode);
1041 if(conv_mode_bits == pred_mode_bits
1042 && get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
1043 result_conv = pred_proj;
1044 } else if(conv_mode_bits <= pred_mode_bits) {
1045 /* if 2nd conv is smaller then first conv, then we can always take the
1047 if(get_irn_n_edges(pred_proj) == 1) {
1048 result_conv = pred_proj;
1049 set_ia32_ls_mode(pred, conv_mode);
1051 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
1052 if (get_mode_size_bits(conv_mode) == 8) {
1053 set_irn_op(pred, op_ia32_Conv_I2I8Bit);
1054 set_ia32_in_req_all(pred, get_ia32_in_req_all(node));
1057 /* we don't want to end up with 2 loads, so we better do nothing */
1058 if(get_irn_mode(pred) == mode_T) {
1062 result_conv = exact_copy(pred);
1063 set_ia32_ls_mode(result_conv, conv_mode);
1065 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
1066 if (get_mode_size_bits(conv_mode) == 8) {
1067 set_irn_op(result_conv, op_ia32_Conv_I2I8Bit);
1068 set_ia32_in_req_all(result_conv, get_ia32_in_req_all(node));
1072 /* if both convs have the same sign, then we can take the smaller one */
1073 if(get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
1074 result_conv = pred_proj;
1076 /* no optimisation possible if smaller conv is sign-extend */
1077 if(mode_is_signed(pred_mode)) {
1080 /* we can take the smaller conv if it is unsigned */
1081 result_conv = pred_proj;
1086 exchange(node, result_conv);
1088 if(get_irn_n_edges(pred_proj) == 0) {
1089 be_kill_node(pred_proj);
1090 if(pred != pred_proj)
1093 optimize_conv_conv(result_conv);
1096 static void optimize_node(ir_node *node, void *env)
1100 optimize_load_conv(node);
1101 optimize_conv_store(node);
1102 optimize_conv_conv(node);
1106 * Performs conv and address mode optimization.
1108 void ia32_optimize_graph(ia32_code_gen_t *cg)
1110 irg_walk_blkwise_graph(cg->irg, NULL, optimize_node, cg);
1113 be_dump(cg->irg, "-opt", dump_ir_block_graph_sched);
1116 void ia32_init_optimize(void)
1118 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize");