2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Implements several optimizations for IA32.
23 * @author Matthias Braun, Christian Wuerdig
34 #include "firm_types.h"
44 #include "../benode_t.h"
45 #include "../besched_t.h"
46 #include "../bepeephole.h"
48 #include "ia32_new_nodes.h"
49 #include "bearch_ia32_t.h"
50 #include "gen_ia32_regalloc_if.h"
51 #include "ia32_transform.h"
52 #include "ia32_dbg_stat.h"
53 #include "ia32_util.h"
55 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
57 static const arch_env_t *arch_env;
58 static ia32_code_gen_t *cg;
60 typedef int is_op_func_t(const ir_node *n);
61 typedef ir_node *load_func_t(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, ir_node *mem);
64 * checks if a node represents the NOREG value
66 static INLINE int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) {
67 return irn == cg->noreg_gp || irn == cg->noreg_xmm || irn == cg->noreg_vfp;
70 /********************************************************************************************************
71 * _____ _ _ ____ _ _ _ _ _
72 * | __ \ | | | | / __ \ | | (_) (_) | | (_)
73 * | |__) |__ ___ _ __ | |__ ___ | | ___ | | | |_ __ | |_ _ _ __ ___ _ ______ _| |_ _ ___ _ __
74 * | ___/ _ \/ _ \ '_ \| '_ \ / _ \| |/ _ \ | | | | '_ \| __| | '_ ` _ \| |_ / _` | __| |/ _ \| '_ \
75 * | | | __/ __/ |_) | | | | (_) | | __/ | |__| | |_) | |_| | | | | | | |/ / (_| | |_| | (_) | | | |
76 * |_| \___|\___| .__/|_| |_|\___/|_|\___| \____/| .__/ \__|_|_| |_| |_|_/___\__,_|\__|_|\___/|_| |_|
79 ********************************************************************************************************/
82 * NOTE: THESE PEEPHOLE OPTIMIZATIONS MUST BE CALLED AFTER SCHEDULING AND REGISTER ALLOCATION.
85 // only optimize up to 48 stores behind IncSPs
86 #define MAXPUSH_OPTIMIZE 48
89 * Tries to create pushs from IncSP,Store combinations
91 static void ia32_create_Pushs(ir_node *irn)
96 ir_node *stores[MAXPUSH_OPTIMIZE];
97 ir_node *block = get_nodes_block(irn);
98 ir_graph *irg = cg->irg;
100 ir_mode *spmode = get_irn_mode(irn);
102 memset(stores, 0, sizeof(stores));
104 assert(be_is_IncSP(irn));
106 offset = be_get_IncSP_offset(irn);
111 * We first walk the schedule after the IncSP node as long as we find
112 * suitable stores that could be transformed to a push.
113 * We save them into the stores array which is sorted by the frame offset/4
114 * attached to the node
116 for(node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
121 // it has to be a store
122 if(!is_ia32_Store(node))
125 // it has to use our sp value
126 if(get_irn_n(node, n_ia32_base) != irn)
128 // store has to be attached to NoMem
129 mem = get_irn_n(node, n_ia32_mem);
134 /* unfortunately we can't support the full AMs possible for push at the
135 * moment. TODO: fix this */
136 if(get_ia32_am_scale(node) > 0 || !is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
139 offset = get_ia32_am_offs_int(node);
141 storeslot = offset / 4;
142 if(storeslot >= MAXPUSH_OPTIMIZE)
145 // storing into the same slot twice is bad (and shouldn't happen...)
146 if(stores[storeslot] != NULL)
149 // storing at half-slots is bad
153 stores[storeslot] = node;
156 curr_sp = be_get_IncSP_pred(irn);
158 // walk the stores in inverse order and create pushs for them
159 i = (offset / 4) - 1;
160 if(i >= MAXPUSH_OPTIMIZE) {
161 i = MAXPUSH_OPTIMIZE - 1;
164 for( ; i >= 0; --i) {
165 const arch_register_t *spreg;
167 ir_node *val, *mem, *mem_proj;
168 ir_node *store = stores[i];
169 ir_node *noreg = ia32_new_NoReg_gp(cg);
171 if(store == NULL || is_Bad(store))
174 val = get_irn_n(store, n_ia32_unary_op);
175 mem = get_irn_n(store, n_ia32_mem);
176 spreg = arch_get_irn_register(cg->arch_env, curr_sp);
178 push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, mem, curr_sp, val);
180 set_ia32_am_support(push, ia32_am_Source, ia32_am_unary);
182 sched_add_before(irn, push);
184 // create stackpointer proj
185 curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack);
186 arch_set_irn_register(cg->arch_env, curr_sp, spreg);
188 // create memory proj
189 mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
191 // use the memproj now
192 exchange(store, mem_proj);
194 // we can remove the store now
200 be_set_IncSP_offset(irn, offset);
202 // can we remove the IncSP now?
204 const ir_edge_t *edge, *next;
206 foreach_out_edge_safe(irn, edge, next) {
207 ir_node *arg = get_edge_src_irn(edge);
208 int pos = get_edge_src_pos(edge);
210 set_irn_n(arg, pos, curr_sp);
213 set_irn_n(irn, 0, new_Bad());
216 set_irn_n(irn, 0, curr_sp);
221 * Performs Peephole Optimizations for IncSP nodes.
223 static void ia32_peephole_optimize_node(ir_node *node, void *env)
226 if (be_is_IncSP(node)) {
227 ia32_create_Pushs(node);
232 * Tries to optimize two following IncSP.
234 static void peephole_IncSP_IncSP(ir_node *node)
239 ir_node *pred = be_get_IncSP_pred(node);
242 if(!be_is_IncSP(pred))
245 if(get_irn_n_edges(pred) > 1)
248 pred_offs = be_get_IncSP_offset(pred);
249 curr_offs = be_get_IncSP_offset(node);
251 if(pred_offs == BE_STACK_FRAME_SIZE_EXPAND) {
252 if(curr_offs != BE_STACK_FRAME_SIZE_SHRINK) {
256 } else if(pred_offs == BE_STACK_FRAME_SIZE_SHRINK) {
257 if(curr_offs != BE_STACK_FRAME_SIZE_EXPAND) {
261 } else if(curr_offs == BE_STACK_FRAME_SIZE_EXPAND
262 || curr_offs == BE_STACK_FRAME_SIZE_SHRINK) {
265 offs = curr_offs + pred_offs;
268 /* add pred offset to ours and remove pred IncSP */
269 be_set_IncSP_offset(node, offs);
271 predpred = be_get_IncSP_pred(pred);
272 be_peephole_node_replaced(pred, predpred);
274 /* rewire dependency edges */
275 edges_reroute_kind(pred, predpred, EDGE_KIND_DEP, current_ir_graph);
276 be_set_IncSP_pred(node, predpred);
282 static const arch_register_t *get_free_gp_reg(void)
286 for(i = 0; i < N_ia32_gp_REGS; ++i) {
287 const arch_register_t *reg = &ia32_gp_regs[i];
288 if(arch_register_type_is(reg, ignore))
291 if(be_peephole_get_value(CLASS_ia32_gp, i) == NULL)
292 return &ia32_gp_regs[i];
298 static void peephole_be_IncSP(ir_node *node)
300 const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
301 const arch_register_t *reg;
312 /* first optimize incsp->incsp combinations */
313 peephole_IncSP_IncSP(node);
315 /* replace IncSP -4 by Pop freereg when possible */
316 offset = be_get_IncSP_offset(node);
320 if(arch_get_irn_register(arch_env, node) != esp)
323 reg = get_free_gp_reg();
327 irg = current_ir_graph;
328 dbgi = get_irn_dbg_info(node);
329 block = get_nodes_block(node);
330 noreg = ia32_new_NoReg_gp(cg);
331 stack = be_get_IncSP_pred(node);
332 pop = new_rd_ia32_Pop(dbgi, irg, block, noreg, noreg, new_NoMem(), stack);
334 stack = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack);
335 arch_set_irn_register(arch_env, stack, esp);
336 val = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_res);
337 arch_set_irn_register(arch_env, val, reg);
339 sched_add_before(node, pop);
341 keep = sched_next(node);
342 if(!be_is_Keep(keep)) {
345 keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
346 sched_add_before(node, keep);
348 be_Keep_add_node(keep, &ia32_reg_classes[CLASS_ia32_gp], val);
351 be_peephole_node_replaced(node, stack);
353 exchange(node, stack);
358 * Peephole optimisation for ia32_Const's
360 static void peephole_ia32_Const(ir_node *node)
362 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
363 const arch_register_t *reg;
364 ir_graph *irg = current_ir_graph;
371 /* try to transform a mov 0, reg to xor reg reg */
372 if(attr->offset != 0 || attr->symconst != NULL)
374 /* xor destroys the flags, so no-one must be using them */
375 if(be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
378 reg = arch_get_irn_register(arch_env, node);
379 assert(be_peephole_get_reg_value(reg) == NULL);
381 /* create xor(produceval, produceval) */
382 block = get_nodes_block(node);
383 dbgi = get_irn_dbg_info(node);
384 produceval = new_rd_ia32_ProduceVal(dbgi, irg, block);
385 arch_set_irn_register(arch_env, produceval, reg);
387 noreg = ia32_new_NoReg_gp(cg);
388 xor = new_rd_ia32_Xor(dbgi, irg, block, noreg, noreg, new_NoMem(),
389 produceval, produceval);
390 arch_set_irn_register(arch_env, xor, reg);
392 sched_add_before(node, produceval);
393 sched_add_before(node, xor);
395 be_peephole_node_replaced(node, xor);
401 * Register a peephole optimisation function.
403 static void register_peephole_optimisation(ir_op *op, peephole_opt_func func) {
404 assert(op->ops.generic == NULL);
405 op->ops.generic = (void*) func;
408 /* Perform peephole-optimizations. */
409 void ia32_peephole_optimization(ir_graph *irg, ia32_code_gen_t *new_cg)
412 arch_env = cg->arch_env;
414 /* register peephole optimisations */
415 clear_irp_opcodes_generic_func();
416 register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
417 register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
419 be_peephole_opt(cg->birg);
420 irg_walk_graph(irg, ia32_peephole_optimize_node, NULL, NULL);
424 * Removes node from schedule if it is not used anymore. If irn is a mode_T node
425 * all it's Projs are removed as well.
426 * @param irn The irn to be removed from schedule
428 static INLINE void try_kill(ir_node *node)
430 if(get_irn_mode(node) == mode_T) {
431 const ir_edge_t *edge, *next;
432 foreach_out_edge_safe(node, edge, next) {
433 ir_node *proj = get_edge_src_irn(edge);
438 if(get_irn_n_edges(node) != 0)
441 if (sched_is_scheduled(node)) {
448 static void optimize_conv_store(ir_node *node)
454 if(!is_ia32_Store(node) && !is_ia32_Store8Bit(node))
457 pred = get_irn_n(node, 2);
458 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
461 /* the store only stores the lower bits, so we only need the conv
462 * it it shrinks the mode */
463 conv_mode = get_ia32_ls_mode(pred);
464 store_mode = get_ia32_ls_mode(node);
465 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode))
468 set_irn_n(node, 2, get_irn_n(pred, 2));
469 if(get_irn_n_edges(pred) == 0) {
474 static void optimize_load_conv(ir_node *node)
476 ir_node *pred, *predpred;
480 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
483 pred = get_irn_n(node, 2);
487 predpred = get_Proj_pred(pred);
488 if(!is_ia32_Load(predpred))
491 /* the load is sign extending the upper bits, so we only need the conv
492 * if it shrinks the mode */
493 load_mode = get_ia32_ls_mode(predpred);
494 conv_mode = get_ia32_ls_mode(node);
495 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(load_mode))
498 if(get_mode_sign(conv_mode) != get_mode_sign(load_mode)) {
499 /* change the load if it has only 1 user */
500 if(get_irn_n_edges(pred) == 1) {
502 if(get_mode_sign(conv_mode)) {
503 newmode = find_signed_mode(load_mode);
505 newmode = find_unsigned_mode(load_mode);
507 assert(newmode != NULL);
508 set_ia32_ls_mode(predpred, newmode);
510 /* otherwise we have to keep the conv */
516 exchange(node, pred);
519 static void optimize_conv_conv(ir_node *node)
521 ir_node *pred_proj, *pred, *result_conv;
522 ir_mode *pred_mode, *conv_mode;
524 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
527 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
528 pred_proj = get_irn_n(node, n_ia32_Conv_I2I_val);
529 if(is_Proj(pred_proj))
530 pred = get_Proj_pred(pred_proj);
534 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
537 /* we know that after a conv, the upper bits are sign extended
538 * so we only need the 2nd conv if it shrinks the mode */
539 conv_mode = get_ia32_ls_mode(node);
540 pred_mode = get_ia32_ls_mode(pred);
541 /* if 2nd conv is smaller then first conv, then we can always take the 2nd
543 if(get_mode_size_bits(conv_mode) <= get_mode_size_bits(pred_mode)) {
544 if(get_irn_n_edges(pred_proj) == 1) {
545 result_conv = pred_proj;
546 set_ia32_ls_mode(pred, conv_mode);
548 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
549 if (get_mode_size_bits(conv_mode) == 8) {
550 set_irn_op(pred, op_ia32_Conv_I2I8Bit);
551 set_ia32_in_req_all(pred, get_ia32_in_req_all(node));
554 /* we don't want to end up with 2 loads, so we better do nothing */
555 if(get_irn_mode(pred) == mode_T) {
559 result_conv = exact_copy(pred);
560 set_ia32_ls_mode(result_conv, conv_mode);
562 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
563 if (get_mode_size_bits(conv_mode) == 8) {
564 set_irn_op(result_conv, op_ia32_Conv_I2I8Bit);
565 set_ia32_in_req_all(result_conv, get_ia32_in_req_all(node));
569 /* if both convs have the same sign, then we can take the smaller one */
570 if(get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
571 result_conv = pred_proj;
573 /* no optimisation possible if smaller conv is sign-extend */
574 if(mode_is_signed(pred_mode)) {
577 /* we can take the smaller conv if it is unsigned */
578 result_conv = pred_proj;
583 exchange(node, result_conv);
585 if(get_irn_n_edges(pred) == 0) {
588 optimize_conv_conv(result_conv);
591 static void optimize_node(ir_node *node, void *env)
595 optimize_load_conv(node);
596 optimize_conv_store(node);
597 optimize_conv_conv(node);
601 * Performs conv and address mode optimization.
603 void ia32_optimize_graph(ia32_code_gen_t *cg)
605 irg_walk_blkwise_graph(cg->irg, NULL, optimize_node, cg);
608 be_dump(cg->irg, "-opt", dump_ir_block_graph_sched);
611 void ia32_init_optimize(void)
613 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize");