2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Implements several optimizations for IA32.
23 * @author Matthias Braun, Christian Wuerdig
34 #include "firm_types.h"
45 #include "../benode_t.h"
46 #include "../besched_t.h"
47 #include "../bepeephole.h"
49 #include "ia32_new_nodes.h"
50 #include "ia32_optimize.h"
51 #include "bearch_ia32_t.h"
52 #include "gen_ia32_regalloc_if.h"
53 #include "ia32_transform.h"
54 #include "ia32_dbg_stat.h"
55 #include "ia32_util.h"
57 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
59 static const arch_env_t *arch_env;
60 static ia32_code_gen_t *cg;
62 typedef int is_op_func_t(const ir_node *n);
63 typedef ir_node *load_func_t(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, ir_node *mem);
66 * checks if a node represents the NOREG value
68 static INLINE int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) {
69 return irn == cg->noreg_gp || irn == cg->noreg_xmm || irn == cg->noreg_vfp;
72 /********************************************************************************************************
73 * _____ _ _ ____ _ _ _ _ _
74 * | __ \ | | | | / __ \ | | (_) (_) | | (_)
75 * | |__) |__ ___ _ __ | |__ ___ | | ___ | | | |_ __ | |_ _ _ __ ___ _ ______ _| |_ _ ___ _ __
76 * | ___/ _ \/ _ \ '_ \| '_ \ / _ \| |/ _ \ | | | | '_ \| __| | '_ ` _ \| |_ / _` | __| |/ _ \| '_ \
77 * | | | __/ __/ |_) | | | | (_) | | __/ | |__| | |_) | |_| | | | | | | |/ / (_| | |_| | (_) | | | |
78 * |_| \___|\___| .__/|_| |_|\___/|_|\___| \____/| .__/ \__|_|_| |_| |_|_/___\__,_|\__|_|\___/|_| |_|
81 ********************************************************************************************************/
84 * NOTE: THESE PEEPHOLE OPTIMIZATIONS MUST BE CALLED AFTER SCHEDULING AND REGISTER ALLOCATION.
87 // only optimize up to 48 stores behind IncSPs
88 #define MAXPUSH_OPTIMIZE 48
91 * Tries to create pushs from IncSP,Store combinations
93 static void peephole_IncSP_Store_to_push(ir_node *irn)
98 ir_node *stores[MAXPUSH_OPTIMIZE];
99 ir_node *block = get_nodes_block(irn);
100 ir_graph *irg = cg->irg;
102 ir_mode *spmode = get_irn_mode(irn);
104 memset(stores, 0, sizeof(stores));
106 assert(be_is_IncSP(irn));
108 offset = be_get_IncSP_offset(irn);
113 * We first walk the schedule after the IncSP node as long as we find
114 * suitable stores that could be transformed to a push.
115 * We save them into the stores array which is sorted by the frame offset/4
116 * attached to the node
118 for(node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
123 // it has to be a store
124 if(!is_ia32_Store(node))
127 // it has to use our sp value
128 if(get_irn_n(node, n_ia32_base) != irn)
130 // store has to be attached to NoMem
131 mem = get_irn_n(node, n_ia32_mem);
136 /* unfortunately we can't support the full AMs possible for push at the
137 * moment. TODO: fix this */
138 if(get_ia32_am_scale(node) > 0 || !is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
141 offset = get_ia32_am_offs_int(node);
143 storeslot = offset / 4;
144 if(storeslot >= MAXPUSH_OPTIMIZE)
147 // storing into the same slot twice is bad (and shouldn't happen...)
148 if(stores[storeslot] != NULL)
151 // storing at half-slots is bad
155 stores[storeslot] = node;
158 curr_sp = be_get_IncSP_pred(irn);
160 // walk the stores in inverse order and create pushs for them
161 i = (offset / 4) - 1;
162 if(i >= MAXPUSH_OPTIMIZE) {
163 i = MAXPUSH_OPTIMIZE - 1;
166 for( ; i >= 0; --i) {
167 const arch_register_t *spreg;
169 ir_node *val, *mem, *mem_proj;
170 ir_node *store = stores[i];
171 ir_node *noreg = ia32_new_NoReg_gp(cg);
173 if(store == NULL || is_Bad(store))
176 val = get_irn_n(store, n_ia32_unary_op);
177 mem = get_irn_n(store, n_ia32_mem);
178 spreg = arch_get_irn_register(cg->arch_env, curr_sp);
180 push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, mem, curr_sp, val);
182 set_ia32_am_support(push, ia32_am_Source, ia32_am_unary);
184 sched_add_before(irn, push);
186 // create stackpointer proj
187 curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack);
188 arch_set_irn_register(cg->arch_env, curr_sp, spreg);
190 // create memory proj
191 mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
193 // use the memproj now
194 exchange(store, mem_proj);
196 // we can remove the store now
202 be_set_IncSP_offset(irn, offset);
203 be_set_IncSP_pred(irn, curr_sp);
204 be_peephole_node_replaced(irn, irn);
208 * Tries to optimize two following IncSP.
210 static void peephole_IncSP_IncSP(ir_node *node)
215 ir_node *pred = be_get_IncSP_pred(node);
218 if(!be_is_IncSP(pred))
221 if(get_irn_n_edges(pred) > 1)
224 pred_offs = be_get_IncSP_offset(pred);
225 curr_offs = be_get_IncSP_offset(node);
227 if(pred_offs == BE_STACK_FRAME_SIZE_EXPAND) {
228 if(curr_offs != BE_STACK_FRAME_SIZE_SHRINK) {
232 } else if(pred_offs == BE_STACK_FRAME_SIZE_SHRINK) {
233 if(curr_offs != BE_STACK_FRAME_SIZE_EXPAND) {
237 } else if(curr_offs == BE_STACK_FRAME_SIZE_EXPAND
238 || curr_offs == BE_STACK_FRAME_SIZE_SHRINK) {
241 offs = curr_offs + pred_offs;
244 /* add pred offset to ours and remove pred IncSP */
245 be_set_IncSP_offset(node, offs);
247 predpred = be_get_IncSP_pred(pred);
248 be_peephole_node_replaced(pred, predpred);
250 /* rewire dependency edges */
251 edges_reroute_kind(pred, predpred, EDGE_KIND_DEP, current_ir_graph);
252 be_set_IncSP_pred(node, predpred);
258 static const arch_register_t *get_free_gp_reg(void)
262 for(i = 0; i < N_ia32_gp_REGS; ++i) {
263 const arch_register_t *reg = &ia32_gp_regs[i];
264 if(arch_register_type_is(reg, ignore))
267 if(be_peephole_get_value(CLASS_ia32_gp, i) == NULL)
268 return &ia32_gp_regs[i];
274 static void peephole_be_IncSP(ir_node *node)
276 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
277 const arch_register_t *reg;
288 /* transform IncSP->Store combinations to Push where possible */
289 peephole_IncSP_Store_to_push(node);
291 /* first optimize incsp->incsp combinations */
292 peephole_IncSP_IncSP(node);
294 /* replace IncSP -4 by Pop freereg when possible */
295 offset = be_get_IncSP_offset(node);
299 if(arch_get_irn_register(arch_env, node) != esp)
302 reg = get_free_gp_reg();
306 irg = current_ir_graph;
307 dbgi = get_irn_dbg_info(node);
308 block = get_nodes_block(node);
309 noreg = ia32_new_NoReg_gp(cg);
310 stack = be_get_IncSP_pred(node);
311 pop = new_rd_ia32_Pop(dbgi, irg, block, noreg, noreg, new_NoMem(), stack);
313 stack = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack);
314 arch_set_irn_register(arch_env, stack, esp);
315 val = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_res);
316 arch_set_irn_register(arch_env, val, reg);
318 sched_add_before(node, pop);
320 keep = sched_next(node);
321 if(!be_is_Keep(keep)) {
324 keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
325 sched_add_before(node, keep);
327 be_Keep_add_node(keep, &ia32_reg_classes[CLASS_ia32_gp], val);
330 be_peephole_node_replaced(node, stack);
332 exchange(node, stack);
337 * Peephole optimisation for ia32_Const's
339 static void peephole_ia32_Const(ir_node *node)
341 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
342 const arch_register_t *reg;
343 ir_graph *irg = current_ir_graph;
350 /* try to transform a mov 0, reg to xor reg reg */
351 if(attr->offset != 0 || attr->symconst != NULL)
353 /* xor destroys the flags, so no-one must be using them */
354 if(be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
357 reg = arch_get_irn_register(arch_env, node);
358 assert(be_peephole_get_reg_value(reg) == NULL);
360 /* create xor(produceval, produceval) */
361 block = get_nodes_block(node);
362 dbgi = get_irn_dbg_info(node);
363 produceval = new_rd_ia32_ProduceVal(dbgi, irg, block);
364 arch_set_irn_register(arch_env, produceval, reg);
366 noreg = ia32_new_NoReg_gp(cg);
367 xor = new_rd_ia32_Xor(dbgi, irg, block, noreg, noreg, new_NoMem(),
368 produceval, produceval);
369 arch_set_irn_register(arch_env, xor, reg);
371 sched_add_before(node, produceval);
372 sched_add_before(node, xor);
374 be_peephole_node_replaced(node, xor);
379 static INLINE int is_noreg(ia32_code_gen_t *cg, const ir_node *node)
381 return node == cg->noreg_gp;
384 static ir_node *create_immediate_from_int(ia32_code_gen_t *cg, int val)
386 ir_graph *irg = current_ir_graph;
387 ir_node *start_block = get_irg_start_block(irg);
388 ir_node *immediate = new_rd_ia32_Immediate(NULL, irg, start_block, NULL,
390 arch_set_irn_register(cg->arch_env, immediate, &ia32_gp_regs[REG_GP_NOREG]);
395 static ir_node *create_immediate_from_am(ia32_code_gen_t *cg,
398 ir_graph *irg = get_irn_irg(node);
399 ir_node *block = get_nodes_block(node);
400 int offset = get_ia32_am_offs_int(node);
401 int sc_sign = is_ia32_am_sc_sign(node);
402 ir_entity *entity = get_ia32_am_sc(node);
405 res = new_rd_ia32_Immediate(NULL, irg, block, entity, sc_sign, offset);
406 arch_set_irn_register(cg->arch_env, res, &ia32_gp_regs[REG_GP_NOREG]);
410 static int is_am_one(const ir_node *node)
412 int offset = get_ia32_am_offs_int(node);
413 ir_entity *entity = get_ia32_am_sc(node);
415 return offset == 1 && entity == NULL;
418 static int is_am_minus_one(const ir_node *node)
420 int offset = get_ia32_am_offs_int(node);
421 ir_entity *entity = get_ia32_am_sc(node);
423 return offset == -1 && entity == NULL;
427 * Transforms a LEA into an Add or SHL if possible.
429 static void peephole_ia32_Lea(ir_node *node)
431 const arch_env_t *arch_env = cg->arch_env;
432 ir_graph *irg = current_ir_graph;
435 const arch_register_t *base_reg;
436 const arch_register_t *index_reg;
437 const arch_register_t *out_reg;
448 assert(is_ia32_Lea(node));
450 /* we can only do this if are allowed to globber the flags */
451 if(be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
454 base = get_irn_n(node, n_ia32_Lea_base);
455 index = get_irn_n(node, n_ia32_Lea_index);
457 if(is_noreg(cg, base)) {
461 base_reg = arch_get_irn_register(arch_env, base);
463 if(is_noreg(cg, index)) {
467 index_reg = arch_get_irn_register(arch_env, index);
470 if(base == NULL && index == NULL) {
471 /* we shouldn't construct these in the first place... */
473 ir_fprintf(stderr, "Optimisation warning: found immediate only lea\n");
478 out_reg = arch_get_irn_register(arch_env, node);
479 scale = get_ia32_am_scale(node);
480 assert(!is_ia32_need_stackent(node) || get_ia32_frame_ent(node) != NULL);
481 /* check if we have immediates values (frame entities should already be
482 * expressed in the offsets) */
483 if(get_ia32_am_offs_int(node) != 0 || get_ia32_am_sc(node) != NULL) {
489 /* we can transform leas where the out register is the same as either the
490 * base or index register back to an Add or Shl */
491 if(out_reg == base_reg) {
494 if(!has_immediates) {
495 ir_fprintf(stderr, "Optimisation warning: found lea which is "
500 goto make_add_immediate;
502 if(scale == 0 && !has_immediates) {
507 /* can't create an add */
509 } else if(out_reg == index_reg) {
511 if(has_immediates && scale == 0) {
513 goto make_add_immediate;
514 } else if(!has_immediates && scale > 0) {
516 op2 = create_immediate_from_int(cg, scale);
518 } else if(!has_immediates) {
520 ir_fprintf(stderr, "Optimisation warning: found lea which is "
524 } else if(scale == 0 && !has_immediates) {
529 /* can't create an add */
532 /* can't create an add */
537 if(cg->isa->opt & IA32_OPT_INCDEC) {
538 if(is_am_one(node)) {
539 dbgi = get_irn_dbg_info(node);
540 block = get_nodes_block(node);
541 res = new_rd_ia32_Inc(dbgi, irg, block, op1);
542 arch_set_irn_register(arch_env, res, out_reg);
545 if(is_am_minus_one(node)) {
546 dbgi = get_irn_dbg_info(node);
547 block = get_nodes_block(node);
548 res = new_rd_ia32_Dec(dbgi, irg, block, op1);
549 arch_set_irn_register(arch_env, res, out_reg);
553 op2 = create_immediate_from_am(cg, node);
556 dbgi = get_irn_dbg_info(node);
557 block = get_nodes_block(node);
558 noreg = ia32_new_NoReg_gp(cg);
560 res = new_rd_ia32_Add(dbgi, irg, block, noreg, noreg, nomem, op1, op2);
561 arch_set_irn_register(arch_env, res, out_reg);
562 set_ia32_commutative(res);
566 dbgi = get_irn_dbg_info(node);
567 block = get_nodes_block(node);
568 noreg = ia32_new_NoReg_gp(cg);
570 res = new_rd_ia32_Shl(dbgi, irg, block, op1, op2);
571 arch_set_irn_register(arch_env, res, out_reg);
575 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, node));
577 /* add new ADD/SHL to schedule */
578 sched_add_before(node, res);
580 DBG_OPT_LEA2ADD(node, res);
582 /* remove the old LEA */
585 /* exchange the Add and the LEA */
586 be_peephole_node_replaced(node, res);
591 * Register a peephole optimisation function.
593 static void register_peephole_optimisation(ir_op *op, peephole_opt_func func) {
594 assert(op->ops.generic == NULL);
595 op->ops.generic = (void*) func;
598 /* Perform peephole-optimizations. */
599 void ia32_peephole_optimization(ia32_code_gen_t *new_cg)
602 arch_env = cg->arch_env;
604 /* register peephole optimisations */
605 clear_irp_opcodes_generic_func();
606 register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
607 register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
608 register_peephole_optimisation(op_ia32_Lea, peephole_ia32_Lea);
610 be_peephole_opt(cg->birg);
614 * Removes node from schedule if it is not used anymore. If irn is a mode_T node
615 * all it's Projs are removed as well.
616 * @param irn The irn to be removed from schedule
618 static INLINE void try_kill(ir_node *node)
620 if(get_irn_mode(node) == mode_T) {
621 const ir_edge_t *edge, *next;
622 foreach_out_edge_safe(node, edge, next) {
623 ir_node *proj = get_edge_src_irn(edge);
628 if(get_irn_n_edges(node) != 0)
631 if (sched_is_scheduled(node)) {
638 static void optimize_conv_store(ir_node *node)
644 if(!is_ia32_Store(node) && !is_ia32_Store8Bit(node))
647 pred = get_irn_n(node, 2);
648 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
651 /* the store only stores the lower bits, so we only need the conv
652 * it it shrinks the mode */
653 conv_mode = get_ia32_ls_mode(pred);
654 store_mode = get_ia32_ls_mode(node);
655 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode))
658 set_irn_n(node, 2, get_irn_n(pred, 2));
659 if(get_irn_n_edges(pred) == 0) {
664 static void optimize_load_conv(ir_node *node)
666 ir_node *pred, *predpred;
670 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
673 pred = get_irn_n(node, 2);
677 predpred = get_Proj_pred(pred);
678 if(!is_ia32_Load(predpred))
681 /* the load is sign extending the upper bits, so we only need the conv
682 * if it shrinks the mode */
683 load_mode = get_ia32_ls_mode(predpred);
684 conv_mode = get_ia32_ls_mode(node);
685 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(load_mode))
688 if(get_mode_sign(conv_mode) != get_mode_sign(load_mode)) {
689 /* change the load if it has only 1 user */
690 if(get_irn_n_edges(pred) == 1) {
692 if(get_mode_sign(conv_mode)) {
693 newmode = find_signed_mode(load_mode);
695 newmode = find_unsigned_mode(load_mode);
697 assert(newmode != NULL);
698 set_ia32_ls_mode(predpred, newmode);
700 /* otherwise we have to keep the conv */
706 exchange(node, pred);
709 static void optimize_conv_conv(ir_node *node)
711 ir_node *pred_proj, *pred, *result_conv;
712 ir_mode *pred_mode, *conv_mode;
714 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
717 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
718 pred_proj = get_irn_n(node, n_ia32_Conv_I2I_val);
719 if(is_Proj(pred_proj))
720 pred = get_Proj_pred(pred_proj);
724 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
727 /* we know that after a conv, the upper bits are sign extended
728 * so we only need the 2nd conv if it shrinks the mode */
729 conv_mode = get_ia32_ls_mode(node);
730 pred_mode = get_ia32_ls_mode(pred);
731 /* if 2nd conv is smaller then first conv, then we can always take the 2nd
733 if(get_mode_size_bits(conv_mode) <= get_mode_size_bits(pred_mode)) {
734 if(get_irn_n_edges(pred_proj) == 1) {
735 result_conv = pred_proj;
736 set_ia32_ls_mode(pred, conv_mode);
738 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
739 if (get_mode_size_bits(conv_mode) == 8) {
740 set_irn_op(pred, op_ia32_Conv_I2I8Bit);
741 set_ia32_in_req_all(pred, get_ia32_in_req_all(node));
744 /* we don't want to end up with 2 loads, so we better do nothing */
745 if(get_irn_mode(pred) == mode_T) {
749 result_conv = exact_copy(pred);
750 set_ia32_ls_mode(result_conv, conv_mode);
752 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
753 if (get_mode_size_bits(conv_mode) == 8) {
754 set_irn_op(result_conv, op_ia32_Conv_I2I8Bit);
755 set_ia32_in_req_all(result_conv, get_ia32_in_req_all(node));
759 /* if both convs have the same sign, then we can take the smaller one */
760 if(get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
761 result_conv = pred_proj;
763 /* no optimisation possible if smaller conv is sign-extend */
764 if(mode_is_signed(pred_mode)) {
767 /* we can take the smaller conv if it is unsigned */
768 result_conv = pred_proj;
773 exchange(node, result_conv);
775 if(get_irn_n_edges(pred) == 0) {
778 optimize_conv_conv(result_conv);
781 static void optimize_node(ir_node *node, void *env)
785 optimize_load_conv(node);
786 optimize_conv_store(node);
787 optimize_conv_conv(node);
791 * Performs conv and address mode optimization.
793 void ia32_optimize_graph(ia32_code_gen_t *cg)
795 irg_walk_blkwise_graph(cg->irg, NULL, optimize_node, cg);
798 be_dump(cg->irg, "-opt", dump_ir_block_graph_sched);
801 void ia32_init_optimize(void)
803 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize");