2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Implements several optimizations for IA32.
23 * @author Matthias Braun, Christian Wuerdig
34 #include "firm_types.h"
44 #include "../benode_t.h"
45 #include "../besched_t.h"
46 #include "../bepeephole.h"
48 #include "ia32_new_nodes.h"
49 #include "bearch_ia32_t.h"
50 #include "gen_ia32_regalloc_if.h"
51 #include "ia32_transform.h"
52 #include "ia32_dbg_stat.h"
53 #include "ia32_util.h"
55 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
57 static const arch_env_t *arch_env;
58 static ia32_code_gen_t *cg;
60 typedef int is_op_func_t(const ir_node *n);
61 typedef ir_node *load_func_t(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, ir_node *mem);
64 * checks if a node represents the NOREG value
66 static INLINE int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) {
67 return irn == cg->noreg_gp || irn == cg->noreg_xmm || irn == cg->noreg_vfp;
70 /********************************************************************************************************
71 * _____ _ _ ____ _ _ _ _ _
72 * | __ \ | | | | / __ \ | | (_) (_) | | (_)
73 * | |__) |__ ___ _ __ | |__ ___ | | ___ | | | |_ __ | |_ _ _ __ ___ _ ______ _| |_ _ ___ _ __
74 * | ___/ _ \/ _ \ '_ \| '_ \ / _ \| |/ _ \ | | | | '_ \| __| | '_ ` _ \| |_ / _` | __| |/ _ \| '_ \
75 * | | | __/ __/ |_) | | | | (_) | | __/ | |__| | |_) | |_| | | | | | | |/ / (_| | |_| | (_) | | | |
76 * |_| \___|\___| .__/|_| |_|\___/|_|\___| \____/| .__/ \__|_|_| |_| |_|_/___\__,_|\__|_|\___/|_| |_|
79 ********************************************************************************************************/
82 * NOTE: THESE PEEPHOLE OPTIMIZATIONS MUST BE CALLED AFTER SCHEDULING AND REGISTER ALLOCATION.
85 // only optimize up to 48 stores behind IncSPs
86 #define MAXPUSH_OPTIMIZE 48
89 * Tries to create pushs from IncSP,Store combinations
91 static void ia32_create_Pushs(ir_node *irn)
96 ir_node *stores[MAXPUSH_OPTIMIZE];
97 ir_node *block = get_nodes_block(irn);
98 ir_graph *irg = cg->irg;
100 ir_mode *spmode = get_irn_mode(irn);
102 memset(stores, 0, sizeof(stores));
104 assert(be_is_IncSP(irn));
106 offset = be_get_IncSP_offset(irn);
111 * We first walk the schedule after the IncSP node as long as we find
112 * suitable stores that could be transformed to a push.
113 * We save them into the stores array which is sorted by the frame offset/4
114 * attached to the node
116 for(node = sched_next(irn); !sched_is_end(node); node = sched_next(node)) {
121 // it has to be a store
122 if(!is_ia32_Store(node))
125 // it has to use our sp value
126 if(get_irn_n(node, n_ia32_base) != irn)
128 // store has to be attached to NoMem
129 mem = get_irn_n(node, n_ia32_mem);
134 /* unfortunately we can't support the full AMs possible for push at the
135 * moment. TODO: fix this */
136 if(get_ia32_am_scale(node) > 0 || !is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
139 offset = get_ia32_am_offs_int(node);
141 storeslot = offset / 4;
142 if(storeslot >= MAXPUSH_OPTIMIZE)
145 // storing into the same slot twice is bad (and shouldn't happen...)
146 if(stores[storeslot] != NULL)
149 // storing at half-slots is bad
153 stores[storeslot] = node;
156 curr_sp = be_get_IncSP_pred(irn);
158 // walk the stores in inverse order and create pushs for them
159 i = (offset / 4) - 1;
160 if(i >= MAXPUSH_OPTIMIZE) {
161 i = MAXPUSH_OPTIMIZE - 1;
164 for( ; i >= 0; --i) {
165 const arch_register_t *spreg;
167 ir_node *val, *mem, *mem_proj;
168 ir_node *store = stores[i];
169 ir_node *noreg = ia32_new_NoReg_gp(cg);
171 if(store == NULL || is_Bad(store))
174 val = get_irn_n(store, n_ia32_unary_op);
175 mem = get_irn_n(store, n_ia32_mem);
176 spreg = arch_get_irn_register(cg->arch_env, curr_sp);
178 push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, mem, val, curr_sp);
180 set_ia32_am_support(push, ia32_am_Source, ia32_am_unary);
182 sched_add_before(irn, push);
184 // create stackpointer proj
185 curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack);
186 arch_set_irn_register(cg->arch_env, curr_sp, spreg);
188 // create memory proj
189 mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
191 // use the memproj now
192 exchange(store, mem_proj);
194 // we can remove the store now
200 be_set_IncSP_offset(irn, offset);
202 // can we remove the IncSP now?
204 const ir_edge_t *edge, *next;
206 foreach_out_edge_safe(irn, edge, next) {
207 ir_node *arg = get_edge_src_irn(edge);
208 int pos = get_edge_src_pos(edge);
210 set_irn_n(arg, pos, curr_sp);
213 set_irn_n(irn, 0, new_Bad());
216 set_irn_n(irn, 0, curr_sp);
221 * Tries to optimize two following IncSP.
223 static void ia32_optimize_IncSP(ir_node *node)
228 ir_node *pred = be_get_IncSP_pred(node);
231 if(!be_is_IncSP(pred))
234 if(get_irn_n_edges(pred) > 1)
237 pred_offs = be_get_IncSP_offset(pred);
238 curr_offs = be_get_IncSP_offset(node);
240 if(pred_offs == BE_STACK_FRAME_SIZE_EXPAND) {
241 if(curr_offs != BE_STACK_FRAME_SIZE_SHRINK) {
245 } else if(pred_offs == BE_STACK_FRAME_SIZE_SHRINK) {
246 if(curr_offs != BE_STACK_FRAME_SIZE_EXPAND) {
250 } else if(curr_offs == BE_STACK_FRAME_SIZE_EXPAND
251 || curr_offs == BE_STACK_FRAME_SIZE_SHRINK) {
254 offs = curr_offs + pred_offs;
257 be_set_IncSP_offset(node, offs);
259 /* rewire dependency edges */
260 predpred = be_get_IncSP_pred(pred);
261 edges_reroute_kind(pred, predpred, EDGE_KIND_DEP, current_ir_graph);
264 be_set_IncSP_pred(node, predpred);
270 * Performs Peephole Optimizations.
272 static void ia32_peephole_optimize_node(ir_node *node, void *env)
275 if (be_is_IncSP(node)) {
276 ia32_optimize_IncSP(node);
278 if (cg->opt & IA32_OPT_PUSHARGS)
279 ia32_create_Pushs(node);
283 static ir_node *optimize_ia32_Const(ir_node *node)
285 const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node);
286 const arch_register_t *reg;
287 ir_graph *irg = current_ir_graph;
294 /* try to transform a mov 0, reg to xor reg reg */
295 if(attr->offset != 0 || attr->symconst != NULL)
297 /* xor destroys the flags, so noone must be using them */
298 if(be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
301 reg = arch_get_irn_register(arch_env, node);
302 assert(be_peephole_get_reg_value(reg) == NULL);
304 /* create xor(produceval, produceval) */
305 block = get_nodes_block(node);
306 dbgi = get_irn_dbg_info(node);
307 produceval = new_rd_ia32_ProduceVal(dbgi, irg, block);
308 arch_set_irn_register(arch_env, produceval, reg);
310 noreg = ia32_new_NoReg_gp(cg);
311 xor = new_rd_ia32_Xor(dbgi, irg, block, noreg, noreg, new_NoMem(),
312 produceval, produceval);
313 arch_set_irn_register(arch_env, xor, reg);
315 sched_add_before(node, produceval);
316 sched_add_before(node, xor);
323 static void register_peephole_optimisation(ir_op *op, peephole_opt_func func)
325 assert(op->ops.generic == NULL);
326 op->ops.generic = (void*) func;
329 void ia32_peephole_optimization(ir_graph *irg, ia32_code_gen_t *new_cg)
332 arch_env = cg->arch_env;
334 /* register peephole optimisations */
335 clear_irp_opcodes_generic_func();
336 register_peephole_optimisation(op_ia32_Const, optimize_ia32_Const);
338 be_peephole_opt(cg->birg);
339 irg_walk_graph(irg, ia32_peephole_optimize_node, NULL, NULL);
343 * Removes node from schedule if it is not used anymore. If irn is a mode_T node
344 * all it's Projs are removed as well.
345 * @param irn The irn to be removed from schedule
347 static INLINE void try_kill(ir_node *node)
349 if(get_irn_mode(node) == mode_T) {
350 const ir_edge_t *edge, *next;
351 foreach_out_edge_safe(node, edge, next) {
352 ir_node *proj = get_edge_src_irn(edge);
357 if(get_irn_n_edges(node) != 0)
360 if (sched_is_scheduled(node)) {
367 static void optimize_conv_store(ir_node *node)
373 if(!is_ia32_Store(node) && !is_ia32_Store8Bit(node))
376 pred = get_irn_n(node, 2);
377 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
380 /* the store only stores the lower bits, so we only need the conv
381 * it it shrinks the mode */
382 conv_mode = get_ia32_ls_mode(pred);
383 store_mode = get_ia32_ls_mode(node);
384 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode))
387 set_irn_n(node, 2, get_irn_n(pred, 2));
388 if(get_irn_n_edges(pred) == 0) {
393 static void optimize_load_conv(ir_node *node)
395 ir_node *pred, *predpred;
399 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
402 pred = get_irn_n(node, 2);
406 predpred = get_Proj_pred(pred);
407 if(!is_ia32_Load(predpred))
410 /* the load is sign extending the upper bits, so we only need the conv
411 * if it shrinks the mode */
412 load_mode = get_ia32_ls_mode(predpred);
413 conv_mode = get_ia32_ls_mode(node);
414 if(get_mode_size_bits(conv_mode) < get_mode_size_bits(load_mode))
417 if(get_mode_sign(conv_mode) != get_mode_sign(load_mode)) {
418 /* change the load if it has only 1 user */
419 if(get_irn_n_edges(pred) == 1) {
421 if(get_mode_sign(conv_mode)) {
422 newmode = find_signed_mode(load_mode);
424 newmode = find_unsigned_mode(load_mode);
426 assert(newmode != NULL);
427 set_ia32_ls_mode(predpred, newmode);
429 /* otherwise we have to keep the conv */
435 exchange(node, pred);
438 static void optimize_conv_conv(ir_node *node)
440 ir_node *pred_proj, *pred, *result_conv;
441 ir_mode *pred_mode, *conv_mode;
443 if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
446 assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
447 pred_proj = get_irn_n(node, n_ia32_Conv_I2I_val);
448 if(is_Proj(pred_proj))
449 pred = get_Proj_pred(pred_proj);
453 if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
456 /* we know that after a conv, the upper bits are sign extended
457 * so we only need the 2nd conv if it shrinks the mode */
458 conv_mode = get_ia32_ls_mode(node);
459 pred_mode = get_ia32_ls_mode(pred);
460 /* if 2nd conv is smaller then first conv, then we can always take the 2nd
462 if(get_mode_size_bits(conv_mode) <= get_mode_size_bits(pred_mode)) {
463 if(get_irn_n_edges(pred_proj) == 1) {
464 result_conv = pred_proj;
465 set_ia32_ls_mode(pred, conv_mode);
467 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
468 if (get_mode_size_bits(conv_mode) == 8) {
469 set_irn_op(pred, op_ia32_Conv_I2I8Bit);
470 set_ia32_in_req_all(pred, get_ia32_in_req_all(node));
473 /* TODO: construct syncs/stuff here but we'll probably end up with
474 * 2 statements anyway */
475 if(get_irn_mode(pred) == mode_T) {
479 result_conv = exact_copy(pred);
480 set_ia32_ls_mode(result_conv, conv_mode);
482 /* Argh:We must change the opcode to 8bit AND copy the register constraints */
483 if (get_mode_size_bits(conv_mode) == 8) {
484 set_irn_op(result_conv, op_ia32_Conv_I2I8Bit);
485 set_ia32_in_req_all(result_conv, get_ia32_in_req_all(node));
489 /* if both convs have the same sign, then we can take the smaller one */
490 if(get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) {
491 result_conv = pred_proj;
493 /* no optimisation possible if smaller conv is sign-extend */
494 if(mode_is_signed(pred_mode)) {
497 /* we can take the smaller conv if it is unsigned */
498 result_conv = pred_proj;
503 exchange(node, result_conv);
505 if(get_irn_n_edges(pred) == 0) {
508 optimize_conv_conv(result_conv);
511 static void optimize_node(ir_node *node, void *env)
515 optimize_load_conv(node);
516 optimize_conv_store(node);
517 optimize_conv_conv(node);
521 * Performs conv and address mode optimization.
523 void ia32_optimize_graph(ia32_code_gen_t *cg)
525 irg_walk_blkwise_graph(cg->irg, NULL, optimize_node, cg);
528 be_dump(cg->irg, "-opt", dump_ir_block_graph_sched);
531 void ia32_init_optimize(void)
533 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.optimize");