2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements functions to finalize the irg for emit.
23 * @author Christian Wuerdig
39 #include "../bearch_t.h"
40 #include "../besched_t.h"
41 #include "../benode_t.h"
43 #include "bearch_ia32_t.h"
44 #include "ia32_finish.h"
45 #include "ia32_new_nodes.h"
46 #include "ia32_map_regs.h"
47 #include "ia32_transform.h"
48 #include "ia32_dbg_stat.h"
49 #include "ia32_optimize.h"
50 #include "gen_ia32_regalloc_if.h"
52 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
55 * Transforms a Sub or xSub into Neg--Add iff OUT_REG == SRC2_REG.
56 * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
58 static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) {
60 ir_node *in1, *in2, *noreg, *nomem, *res;
61 ir_node *noreg_fp, *block;
62 ir_mode *mode = get_irn_mode(irn);
63 dbg_info *dbg = get_irn_dbg_info(irn);
64 const arch_register_t *in1_reg, *in2_reg, *out_reg, **slots;
67 /* Return if AM node or not a Sub or xSub */
68 if (!(is_ia32_Sub(irn) || is_ia32_xSub(irn)) || get_ia32_op_type(irn) != ia32_Normal)
71 noreg = ia32_new_NoReg_gp(cg);
72 noreg_fp = ia32_new_NoReg_fp(cg);
73 nomem = new_rd_NoMem(cg->irg);
74 in1 = get_irn_n(irn, 2);
75 in2 = get_irn_n(irn, 3);
76 in1_reg = arch_get_irn_register(cg->arch_env, in1);
77 in2_reg = arch_get_irn_register(cg->arch_env, in2);
78 out_reg = get_ia32_out_reg(irn, 0);
81 block = get_nodes_block(irn);
83 /* in case of sub and OUT == SRC2 we can transform the sequence into neg src2 -- add */
84 if (out_reg != in2_reg)
87 /* generate the neg src2 */
88 if(mode_is_float(mode)) {
92 res = new_rd_ia32_xXor(dbg, irg, block, noreg, noreg, in2, noreg_fp, nomem);
93 size = get_mode_size_bits(mode);
94 entity = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN);
95 set_ia32_am_sc(res, entity);
96 set_ia32_op_type(res, ia32_AddrModeS);
97 set_ia32_ls_mode(res, get_ia32_ls_mode(irn));
99 res = new_rd_ia32_Neg(dbg, irg, block, noreg, noreg, in2, nomem);
101 arch_set_irn_register(cg->arch_env, res, in2_reg);
103 /* add to schedule */
104 sched_add_before(irn, res);
106 /* generate the add */
107 if (mode_is_float(mode)) {
108 res = new_rd_ia32_xAdd(dbg, irg, block, noreg, noreg, res, in1, nomem);
109 set_ia32_am_support(res, ia32_am_Source, ia32_am_binary);
110 set_ia32_ls_mode(res, get_ia32_ls_mode(irn));
113 res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, res, in1, nomem);
114 set_ia32_am_support(res, ia32_am_Full, ia32_am_binary);
115 set_ia32_commutative(res);
118 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
120 slots = get_ia32_slots(res);
123 /* exchange the add and the sub */
124 edges_reroute(irn, res, irg);
126 /* add to schedule */
127 sched_add_before(irn, res);
129 /* remove the old sub */
131 arity = get_irn_arity(irn);
132 for(i = 0; i < arity; ++i) {
133 set_irn_n(irn, i, new_Bad());
136 DBG_OPT_SUB2NEGADD(irn, res);
140 * Transforms a LEA into an Add or SHL if possible.
141 * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
143 static void ia32_transform_lea_to_add_or_shl(ir_node *irn, ia32_code_gen_t *cg) {
144 ia32_am_flavour_t am_flav;
145 dbg_info *dbg = get_irn_dbg_info(irn);
148 ir_node *nomem, *noreg, *base, *index, *op1, *op2;
151 const arch_register_t *out_reg, *base_reg, *index_reg;
154 if (! is_ia32_Lea(irn))
157 am_flav = get_ia32_am_flavour(irn);
159 /* mustn't have a symconst */
160 if (get_ia32_am_sc(irn) != NULL || get_ia32_frame_ent(irn) != NULL)
163 if (am_flav == ia32_am_IS) {
167 noreg = ia32_new_NoReg_gp(cg);
168 nomem = new_rd_NoMem(cg->irg);
169 index = get_irn_n(irn, 1);
170 index_reg = arch_get_irn_register(cg->arch_env, index);
171 out_reg = arch_get_irn_register(cg->arch_env, irn);
173 if (out_reg != index_reg)
176 /* ok, we can transform it */
178 block = get_nodes_block(irn);
180 res = new_rd_ia32_Shl(dbg, irg, block, noreg, noreg, index, noreg, nomem);
181 offs = get_ia32_am_scale(irn);
182 tv = new_tarval_from_long(offs, mode_Iu);
183 set_ia32_Immop_tarval(res, tv);
184 arch_set_irn_register(cg->arch_env, res, out_reg);
186 /* only some LEAs can be transformed to an Add */
187 if (am_flav != ia32_am_B && am_flav != ia32_am_OB && am_flav != ia32_am_BI)
190 noreg = ia32_new_NoReg_gp(cg);
191 nomem = new_rd_NoMem(cg->irg);
194 base = get_irn_n(irn, 0);
195 index = get_irn_n(irn, 1);
196 offs = get_ia32_am_offs_int(irn);
198 out_reg = arch_get_irn_register(cg->arch_env, irn);
199 base_reg = arch_get_irn_register(cg->arch_env, base);
200 index_reg = arch_get_irn_register(cg->arch_env, index);
203 block = get_nodes_block(irn);
208 /* out register must be same as base register */
209 if (out_reg != base_reg)
213 op2 = new_rd_ia32_Immediate(NULL, irg, block, NULL, 0, offs);
214 arch_set_irn_register(cg->arch_env, op2,
215 &ia32_gp_regs[REG_GP_NOREG]);
219 /* out register must be same as one in register */
220 if (out_reg == base_reg) {
223 } else if (out_reg == index_reg) {
227 /* in registers a different from out -> no Add possible */
237 res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, op1, op2, nomem);
238 arch_set_irn_register(cg->arch_env, res, out_reg);
239 set_ia32_op_type(res, ia32_Normal);
240 set_ia32_commutative(res);
243 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
245 /* add new ADD/SHL to schedule */
246 sched_add_before(irn, res);
248 DBG_OPT_LEA2ADD(irn, res);
250 /* remove the old LEA */
253 /* exchange the Add and the LEA */
257 static INLINE int need_constraint_copy(ir_node *irn) {
258 return ! is_ia32_Lea(irn) &&
259 ! is_ia32_Conv_I2I(irn) &&
260 ! is_ia32_Conv_I2I8Bit(irn) &&
261 ! is_ia32_TestCMov(irn) &&
262 ! is_ia32_CmpCMov(irn);
266 * Insert copies for all ia32 nodes where the should_be_same requirement
268 * Transform Sub into Neg -- Add if IN2 == OUT
270 static void assure_should_be_same_requirements(ia32_code_gen_t *cg,
273 ir_graph *irg = cg->irg;
274 const arch_env_t *arch_env = cg->arch_env;
275 const arch_register_req_t **reqs;
276 const arch_register_t *out_reg, *in_reg;
278 ir_node *in_node, *block;
279 ia32_op_type_t op_tp;
281 if(!is_ia32_irn(node))
284 /* some nodes are just a bit less efficient, but need no fixing if the
285 * should be same requirement is not fulfilled */
286 if(!need_constraint_copy(node))
289 reqs = get_ia32_out_req_all(node);
290 n_res = get_ia32_n_res(node);
291 block = get_nodes_block(node);
293 /* check all OUT requirements, if there is a should_be_same */
294 for (i = 0; i < n_res; i++) {
301 ir_node *uses_out_reg;
302 const arch_register_req_t *req = reqs[i];
303 const arch_register_class_t *class;
304 int uses_out_reg_pos;
306 if (!arch_register_req_is(req, should_be_same))
309 same_pos = req->other_same;
311 /* get in and out register */
312 out_reg = get_ia32_out_reg(node, i);
313 in_node = get_irn_n(node, same_pos);
314 in_reg = arch_get_irn_register(arch_env, in_node);
316 /* requirement already fulfilled? */
317 if (in_reg == out_reg)
319 /* unknowns can be changed to any register we want on emitting */
320 if (is_unknown_reg(in_reg))
322 class = arch_register_get_class(in_reg);
323 assert(class == arch_register_get_class(out_reg));
325 /* check if any other input operands uses the out register */
326 arity = get_irn_arity(node);
328 uses_out_reg_pos = -1;
329 for(i2 = 0; i2 < arity; ++i2) {
330 ir_node *in = get_irn_n(node, i2);
331 const arch_register_t *in_reg = arch_get_irn_register(arch_env, in);
333 if(in_reg != out_reg)
336 if(uses_out_reg != NULL && in != uses_out_reg) {
337 panic("invalid register allocation");
340 if(uses_out_reg_pos >= 0)
341 uses_out_reg_pos = -1; /* multiple inputs... */
343 uses_out_reg_pos = i2;
346 /* noone else is using the out reg, we can simply copy it
347 * (the register can't be live since the operation will override it
349 if(uses_out_reg == NULL) {
350 ir_node *copy = be_new_Copy(class, irg, block, in_node);
351 DBG_OPT_2ADDRCPY(copy);
353 /* destination is the out register */
354 arch_set_irn_register(arch_env, copy, out_reg);
356 /* insert copy before the node into the schedule */
357 sched_add_before(node, copy);
360 set_irn_n(node, same_pos, copy);
362 DBG((dbg, LEVEL_1, "created copy %+F for should be same argument "
363 "at input %d of %+F\n", copy, same_pos, node));
367 /* for commutative nodes we can simply swap the left/right */
368 if(is_ia32_commutative(node) && uses_out_reg_pos == 3) {
369 ia32_swap_left_right(node);
370 DBG((dbg, LEVEL_1, "swapped left/right input of %+F to resolve "
371 "should be same constraint\n", node));
376 ir_fprintf(stderr, "Note: need perm to resolve should_be_same constraint at %+F (this is unsafe and should not happen in theory...)\n", node);
378 /* the out reg is used as node input: we need to permutate our input
379 * and the other (this is allowed, since the other node can't be live
380 * after! the operation as we will override the register. */
382 in[1] = uses_out_reg;
383 perm = be_new_Perm(class, irg, block, 2, in);
385 perm_proj0 = new_r_Proj(irg, block, perm, get_irn_mode(in[0]), 0);
386 perm_proj1 = new_r_Proj(irg, block, perm, get_irn_mode(in[1]), 1);
388 arch_set_irn_register(arch_env, perm_proj0, out_reg);
389 arch_set_irn_register(arch_env, perm_proj1, in_reg);
391 sched_add_before(node, perm);
393 DBG((dbg, LEVEL_1, "created perm %+F for should be same argument "
394 "at input %d of %+F (need permutate with %+F)\n", perm, same_pos,
395 node, uses_out_reg));
397 /* use the perm results */
398 for(i2 = 0; i2 < arity; ++i2) {
399 ir_node *in = get_irn_n(node, i2);
402 set_irn_n(node, i2, perm_proj0);
403 } else if(in == uses_out_reg) {
404 set_irn_n(node, i2, perm_proj1);
409 /* check xCmp: try to avoid unordered cmp */
410 if ((is_ia32_xCmp(node) || is_ia32_xCmpCMov(node) || is_ia32_xCmpSet(node)) &&
411 op_tp == ia32_Normal)
413 long pnc = get_ia32_pncode(node);
415 if (pnc & pn_Cmp_Uo) {
417 int idx1 = 2, idx2 = 3;
419 if (is_ia32_xCmpCMov(node)) {
424 /** Matze: TODO this looks wrong, I assume we should exchange
425 * the proj numbers and not the inputs... */
427 tmp = get_irn_n(node, idx1);
428 set_irn_n(node, idx1, get_irn_n(node, idx2));
429 set_irn_n(node, idx2, tmp);
431 set_ia32_pncode(node, get_negated_pnc(pnc, mode_E));
438 * We have a source address mode node with base or index register equal to
439 * result register and unfulfilled should_be_same requirement. The constraint
440 * handler will insert a copy from the remaining input operand to the result
441 * register -> base or index is broken then.
442 * Solution: Turn back this address mode into explicit Load + Operation.
444 static void fix_am_source(ir_node *irn, void *env) {
445 ia32_code_gen_t *cg = env;
446 ir_node *base, *index, *noreg;
447 const arch_register_t *reg_base, *reg_index;
448 const arch_register_req_t **reqs;
451 /* check only ia32 nodes with source address mode */
452 if (! is_ia32_irn(irn) || get_ia32_op_type(irn) != ia32_AddrModeS)
454 /* only need to fix binary operations */
455 if (get_ia32_am_arity(irn) != ia32_am_binary)
458 base = get_irn_n(irn, 0);
459 index = get_irn_n(irn, 1);
461 reg_base = arch_get_irn_register(cg->arch_env, base);
462 reg_index = arch_get_irn_register(cg->arch_env, index);
463 reqs = get_ia32_out_req_all(irn);
465 noreg = ia32_new_NoReg_gp(cg);
467 n_res = get_ia32_n_res(irn);
469 for (i = 0; i < n_res; i++) {
470 if (arch_register_req_is(reqs[i], should_be_same)) {
471 /* get in and out register */
472 const arch_register_t *out_reg = get_ia32_out_reg(irn, i);
473 int same_pos = reqs[i]->other_same;
476 there is a constraint for the remaining operand
477 and the result register is equal to base or index register
480 (out_reg == reg_base || out_reg == reg_index))
482 /* turn back address mode */
483 ir_node *in_node = get_irn_n(irn, 2);
484 const arch_register_t *in_reg = arch_get_irn_register(cg->arch_env, in_node);
485 ir_node *block = get_nodes_block(irn);
486 ir_mode *ls_mode = get_ia32_ls_mode(irn);
490 if (arch_register_get_class(in_reg) == &ia32_reg_classes[CLASS_ia32_gp]) {
491 load = new_rd_ia32_Load(NULL, cg->irg, block, base, index, get_irn_n(irn, 4));
492 pnres = pn_ia32_Load_res;
494 else if (arch_register_get_class(in_reg) == &ia32_reg_classes[CLASS_ia32_xmm]) {
495 load = new_rd_ia32_xLoad(NULL, cg->irg, block, base, index, get_irn_n(irn, 4));
496 pnres = pn_ia32_xLoad_res;
499 panic("cannot turn back address mode for this register class");
502 /* copy address mode information to load */
503 set_ia32_ls_mode(load, ls_mode);
504 set_ia32_am_flavour(load, get_ia32_am_flavour(irn));
505 set_ia32_op_type(load, ia32_AddrModeS);
506 set_ia32_am_scale(load, get_ia32_am_scale(irn));
507 set_ia32_am_sc(load, get_ia32_am_sc(irn));
508 add_ia32_am_offs_int(load, get_ia32_am_offs_int(irn));
509 set_ia32_frame_ent(load, get_ia32_frame_ent(irn));
511 if (is_ia32_use_frame(irn))
512 set_ia32_use_frame(load);
514 /* insert the load into schedule */
515 sched_add_before(irn, load);
517 DBG((dbg, LEVEL_3, "irg %+F: build back AM source for node %+F, inserted load %+F\n", cg->irg, irn, load));
519 load = new_r_Proj(cg->irg, block, load, ls_mode, pnres);
520 arch_set_irn_register(cg->arch_env, load, out_reg);
522 /* insert the load result proj into schedule */
523 sched_add_before(irn, load);
525 /* set the new input operand */
526 set_irn_n(irn, 3, load);
528 /* this is a normal node now */
529 set_irn_n(irn, 0, noreg);
530 set_irn_n(irn, 1, noreg);
531 set_ia32_op_type(irn, ia32_Normal);
540 * Block walker: finishes a block
542 static void ia32_finish_irg_walker(ir_node *block, void *env) {
543 ia32_code_gen_t *cg = env;
546 /* first: turn back AM source if necessary */
547 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
548 next = sched_next(irn);
549 fix_am_source(irn, env);
552 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
553 ia32_code_gen_t *cg = env;
555 next = sched_next(irn);
557 /* check if there is a sub which need to be transformed */
558 ia32_transform_sub_to_neg_add(irn, cg);
560 /* transform a LEA into an Add if possible */
561 ia32_transform_lea_to_add_or_shl(irn, cg);
564 /* second: insert copies and finish irg */
565 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
566 next = sched_next(irn);
567 assure_should_be_same_requirements(cg, irn);
572 * Block walker: pushes all blocks on a wait queue
574 static void ia32_push_on_queue_walker(ir_node *block, void *env) {
576 waitq_put(wq, block);
581 * Add Copy nodes for not fulfilled should_be_equal constraints
583 void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg) {
584 waitq *wq = new_waitq();
586 /* Push the blocks on the waitq because ia32_finish_irg_walker starts more walks ... */
587 irg_block_walk_graph(irg, NULL, ia32_push_on_queue_walker, wq);
589 while (! waitq_empty(wq)) {
590 ir_node *block = waitq_get(wq);
591 ia32_finish_irg_walker(block, cg);
596 void ia32_init_finish(void)
598 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.finish");