2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements functions to finalize the irg for emit.
23 * @author Christian Wuerdig
38 #include "../bearch_t.h"
39 #include "../besched_t.h"
40 #include "../benode_t.h"
42 #include "bearch_ia32_t.h"
43 #include "ia32_finish.h"
44 #include "ia32_new_nodes.h"
45 #include "ia32_map_regs.h"
46 #include "ia32_transform.h"
47 #include "ia32_dbg_stat.h"
48 #include "ia32_optimize.h"
49 #include "gen_ia32_regalloc_if.h"
51 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
54 * Transforms a Sub or xSub into Neg--Add iff OUT_REG == SRC2_REG.
55 * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
57 static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) {
59 ir_node *in1, *in2, *noreg, *nomem, *res;
60 ir_node *noreg_fp, *block;
61 ir_mode *mode = get_irn_mode(irn);
62 dbg_info *dbg = get_irn_dbg_info(irn);
63 const arch_register_t *in1_reg, *in2_reg, *out_reg, **slots;
66 /* Return if AM node or not a Sub or xSub */
67 if (!(is_ia32_Sub(irn) || is_ia32_xSub(irn)) || get_ia32_op_type(irn) != ia32_Normal)
70 noreg = ia32_new_NoReg_gp(cg);
71 noreg_fp = ia32_new_NoReg_fp(cg);
72 nomem = new_rd_NoMem(cg->irg);
73 in1 = get_irn_n(irn, 2);
74 in2 = get_irn_n(irn, 3);
75 in1_reg = arch_get_irn_register(cg->arch_env, in1);
76 in2_reg = arch_get_irn_register(cg->arch_env, in2);
77 out_reg = get_ia32_out_reg(irn, 0);
80 block = get_nodes_block(irn);
82 /* in case of sub and OUT == SRC2 we can transform the sequence into neg src2 -- add */
83 if (!REGS_ARE_EQUAL(out_reg, in2_reg))
86 /* generate the neg src2 */
87 if(mode_is_float(mode)) {
91 res = new_rd_ia32_xXor(dbg, irg, block, noreg, noreg, in2, noreg_fp, nomem);
92 size = get_mode_size_bits(mode);
93 entity = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN);
94 set_ia32_am_sc(res, entity);
95 set_ia32_op_type(res, ia32_AddrModeS);
96 set_ia32_ls_mode(res, get_ia32_ls_mode(irn));
98 res = new_rd_ia32_Neg(dbg, irg, block, noreg, noreg, in2, nomem);
100 arch_set_irn_register(cg->arch_env, res, in2_reg);
102 /* add to schedule */
103 sched_add_before(irn, res);
105 /* generate the add */
106 if (mode_is_float(mode)) {
107 res = new_rd_ia32_xAdd(dbg, irg, block, noreg, noreg, res, in1, nomem);
108 set_ia32_am_support(res, ia32_am_Source);
109 set_ia32_ls_mode(res, get_ia32_ls_mode(irn));
112 res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, res, in1, nomem);
113 set_ia32_am_support(res, ia32_am_Full);
114 set_ia32_commutative(res);
117 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
119 slots = get_ia32_slots(res);
122 /* exchange the add and the sub */
123 edges_reroute(irn, res, irg);
125 /* add to schedule */
126 sched_add_before(irn, res);
128 /* remove the old sub */
130 arity = get_irn_arity(irn);
131 for(i = 0; i < arity; ++i) {
132 set_irn_n(irn, i, new_Bad());
135 DBG_OPT_SUB2NEGADD(irn, res);
139 * Transforms a LEA into an Add or SHL if possible.
140 * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
142 static void ia32_transform_lea_to_add_or_shl(ir_node *irn, ia32_code_gen_t *cg) {
143 ia32_am_flavour_t am_flav;
145 dbg_info *dbg = get_irn_dbg_info(irn);
148 ir_node *nomem, *noreg, *base, *index, *op1, *op2;
151 const arch_register_t *out_reg, *base_reg, *index_reg;
154 if (! is_ia32_Lea(irn))
157 am_flav = get_ia32_am_flavour(irn);
159 /* mustn't have a symconst */
160 if (get_ia32_am_sc(irn) != NULL || get_ia32_frame_ent(irn) != NULL)
163 if (am_flav == ia32_am_IS) {
167 noreg = ia32_new_NoReg_gp(cg);
168 nomem = new_rd_NoMem(cg->irg);
169 index = get_irn_n(irn, 1);
170 index_reg = arch_get_irn_register(cg->arch_env, index);
171 out_reg = arch_get_irn_register(cg->arch_env, irn);
173 if (! REGS_ARE_EQUAL(out_reg, index_reg))
176 /* ok, we can transform it */
178 block = get_nodes_block(irn);
180 res = new_rd_ia32_Shl(dbg, irg, block, noreg, noreg, index, noreg, nomem);
181 offs = get_ia32_am_scale(irn);
182 tv = new_tarval_from_long(offs, mode_Iu);
183 set_ia32_Immop_tarval(res, tv);
184 arch_set_irn_register(cg->arch_env, res, out_reg);
186 /* only some LEAs can be transformed to an Add */
187 if (am_flav != ia32_am_B && am_flav != ia32_am_OB && am_flav != ia32_am_BI)
190 noreg = ia32_new_NoReg_gp(cg);
191 nomem = new_rd_NoMem(cg->irg);
194 base = get_irn_n(irn, 0);
195 index = get_irn_n(irn, 1);
197 if (am_flav & ia32_O) {
198 offs = get_ia32_am_offs_int(irn);
201 out_reg = arch_get_irn_register(cg->arch_env, irn);
202 base_reg = arch_get_irn_register(cg->arch_env, base);
203 index_reg = arch_get_irn_register(cg->arch_env, index);
206 block = get_nodes_block(irn);
208 switch(get_ia32_am_flavour(irn)) {
210 /* out register must be same as base register */
211 if (! REGS_ARE_EQUAL(out_reg, base_reg))
217 /* out register must be same as base register */
218 if (! REGS_ARE_EQUAL(out_reg, base_reg))
225 /* out register must be same as one in register */
226 if (REGS_ARE_EQUAL(out_reg, base_reg)) {
230 else if (REGS_ARE_EQUAL(out_reg, index_reg)) {
235 /* in registers a different from out -> no Add possible */
245 res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, op1, op2, nomem);
246 arch_set_irn_register(cg->arch_env, res, out_reg);
247 set_ia32_op_type(res, ia32_Normal);
248 set_ia32_commutative(res);
251 tarval *tv = new_tarval_from_long(offs, mode_Iu);
252 set_ia32_Immop_tarval(res, tv);
256 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
258 /* add new ADD/SHL to schedule */
259 sched_add_before(irn, res);
261 DBG_OPT_LEA2ADD(irn, res);
263 /* remove the old LEA */
266 /* exchange the Add and the LEA */
270 static INLINE int need_constraint_copy(ir_node *irn) {
271 return ! is_ia32_Lea(irn) &&
272 ! is_ia32_Conv_I2I(irn) &&
273 ! is_ia32_Conv_I2I8Bit(irn) &&
274 ! is_ia32_CmpCMov(irn) &&
275 ! is_ia32_PsiCondCMov(irn) &&
276 ! is_ia32_CmpSet(irn);
280 * Insert copies for all ia32 nodes where the should_be_same requirement
282 * Transform Sub into Neg -- Add if IN2 == OUT
284 static void ia32_finish_node(ir_node *irn, void *env) {
285 ia32_code_gen_t *cg = env;
286 const arch_register_req_t **reqs;
287 const arch_register_t *out_reg, *in_reg, *in2_reg;
289 ir_node *copy, *in_node, *block, *in2_node;
290 ia32_op_type_t op_tp;
292 if (is_ia32_irn(irn)) {
293 /* AM Dest nodes don't produce any values */
294 op_tp = get_ia32_op_type(irn);
295 if (op_tp == ia32_AddrModeD)
298 reqs = get_ia32_out_req_all(irn);
299 n_res = get_ia32_n_res(irn);
300 block = get_nodes_block(irn);
302 /* check all OUT requirements, if there is a should_be_same */
303 if ((op_tp == ia32_Normal || op_tp == ia32_AddrModeS) && need_constraint_copy(irn))
305 for (i = 0; i < n_res; i++) {
306 if (arch_register_req_is(reqs[i], should_be_same)) {
307 int same_pos = reqs[i]->other_same;
309 /* get in and out register */
310 out_reg = get_ia32_out_reg(irn, i);
311 in_node = get_irn_n(irn, same_pos);
312 in_reg = arch_get_irn_register(cg->arch_env, in_node);
314 /* don't copy ignore nodes */
315 if (arch_irn_is(cg->arch_env, in_node, ignore) && is_Proj(in_node))
318 /* check if in and out register are equal */
319 if (! REGS_ARE_EQUAL(out_reg, in_reg)) {
320 /* in case of a commutative op: just exchange the in's */
321 /* beware: the current op could be everything, so test for ia32 */
322 /* commutativity first before getting the second in */
323 if (is_ia32_commutative(irn)) {
324 in2_node = get_irn_n(irn, same_pos ^ 1);
325 in2_reg = arch_get_irn_register(cg->arch_env, in2_node);
327 if (REGS_ARE_EQUAL(out_reg, in2_reg)) {
328 set_irn_n(irn, same_pos, in2_node);
329 set_irn_n(irn, same_pos ^ 1, in_node);
336 DBG((dbg, LEVEL_1, "inserting copy for %+F in_pos %d\n", irn, same_pos));
337 /* create copy from in register */
338 copy = be_new_Copy(arch_register_get_class(in_reg), cg->irg, block, in_node);
340 DBG_OPT_2ADDRCPY(copy);
342 /* destination is the out register */
343 arch_set_irn_register(cg->arch_env, copy, out_reg);
345 /* insert copy before the node into the schedule */
346 sched_add_before(irn, copy);
349 set_irn_n(irn, same_pos, copy);
356 /* check xCmp: try to avoid unordered cmp */
357 if ((is_ia32_xCmp(irn) || is_ia32_xCmpCMov(irn) || is_ia32_xCmpSet(irn)) &&
358 op_tp == ia32_Normal &&
359 ! is_ia32_ImmConst(irn) && ! is_ia32_ImmSymConst(irn))
361 long pnc = get_ia32_pncode(irn);
363 if (pnc & pn_Cmp_Uo) {
365 int idx1 = 2, idx2 = 3;
367 if (is_ia32_xCmpCMov(irn)) {
372 tmp = get_irn_n(irn, idx1);
373 set_irn_n(irn, idx1, get_irn_n(irn, idx2));
374 set_irn_n(irn, idx2, tmp);
376 set_ia32_pncode(irn, get_negated_pnc(pnc, mode_E));
385 * We have a source address mode node with base or index register equal to
386 * result register. The constraint handler will insert a copy from the
387 * remaining input operand to the result register -> base or index is
389 * Solution: Turn back this address mode into explicit Load + Operation.
391 static void fix_am_source(ir_node *irn, void *env) {
392 ia32_code_gen_t *cg = env;
393 ir_node *base, *index, *noreg;
394 const arch_register_t *reg_base, *reg_index;
395 const arch_register_req_t **reqs;
398 /* check only ia32 nodes with source address mode */
399 if (! is_ia32_irn(irn) || get_ia32_op_type(irn) != ia32_AddrModeS)
401 /* no need to fix unary operations */
402 if (get_irn_arity(irn) == 4)
405 base = get_irn_n(irn, 0);
406 index = get_irn_n(irn, 1);
408 reg_base = arch_get_irn_register(cg->arch_env, base);
409 reg_index = arch_get_irn_register(cg->arch_env, index);
410 reqs = get_ia32_out_req_all(irn);
412 noreg = ia32_new_NoReg_gp(cg);
414 n_res = get_ia32_n_res(irn);
416 for (i = 0; i < n_res; i++) {
417 if (arch_register_req_is(reqs[i], should_be_same)) {
418 /* get in and out register */
419 const arch_register_t *out_reg = get_ia32_out_reg(irn, i);
420 int same_pos = reqs[i]->other_same;
423 there is a constraint for the remaining operand
424 and the result register is equal to base or index register
427 (REGS_ARE_EQUAL(out_reg, reg_base) || REGS_ARE_EQUAL(out_reg, reg_index)))
429 /* turn back address mode */
430 ir_node *in_node = get_irn_n(irn, 2);
431 const arch_register_t *in_reg = arch_get_irn_register(cg->arch_env, in_node);
432 ir_node *block = get_nodes_block(irn);
433 ir_mode *ls_mode = get_ia32_ls_mode(irn);
437 if (arch_register_get_class(in_reg) == &ia32_reg_classes[CLASS_ia32_gp]) {
438 load = new_rd_ia32_Load(NULL, cg->irg, block, base, index, get_irn_n(irn, 4));
439 pnres = pn_ia32_Load_res;
441 else if (arch_register_get_class(in_reg) == &ia32_reg_classes[CLASS_ia32_xmm]) {
442 load = new_rd_ia32_xLoad(NULL, cg->irg, block, base, index, get_irn_n(irn, 4));
443 pnres = pn_ia32_xLoad_res;
446 panic("cannot turn back address mode for this register class");
449 /* copy address mode information to load */
450 set_ia32_ls_mode(load, ls_mode);
451 set_ia32_am_flavour(load, get_ia32_am_flavour(irn));
452 set_ia32_op_type(load, ia32_AddrModeS);
453 set_ia32_am_support(load, ia32_am_Source);
454 set_ia32_am_scale(load, get_ia32_am_scale(irn));
455 set_ia32_am_sc(load, get_ia32_am_sc(irn));
456 add_ia32_am_offs_int(load, get_ia32_am_offs_int(irn));
457 set_ia32_frame_ent(load, get_ia32_frame_ent(irn));
459 if (is_ia32_use_frame(irn))
460 set_ia32_use_frame(load);
462 /* insert the load into schedule */
463 sched_add_before(irn, load);
465 DBG((dbg, LEVEL_3, "irg %+F: build back AM source for node %+F, inserted load %+F\n", cg->irg, irn, load));
467 load = new_r_Proj(cg->irg, block, load, ls_mode, pnres);
468 arch_set_irn_register(cg->arch_env, load, out_reg);
470 /* insert the load result proj into schedule */
471 sched_add_before(irn, load);
473 /* set the new input operand */
474 set_irn_n(irn, 3, load);
476 /* this is a normal node now */
477 set_irn_n(irn, 0, noreg);
478 set_irn_n(irn, 1, noreg);
479 set_ia32_op_type(irn, ia32_Normal);
488 * Block walker: finishes a block
490 static void ia32_finish_irg_walker(ir_node *block, void *env) {
493 /* first: turn back AM source if necessary */
494 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
495 next = sched_next(irn);
496 fix_am_source(irn, env);
499 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
500 ia32_code_gen_t *cg = env;
502 next = sched_next(irn);
504 /* check if there is a sub which need to be transformed */
505 ia32_transform_sub_to_neg_add(irn, cg);
507 /* transform a LEA into an Add if possible */
508 ia32_transform_lea_to_add_or_shl(irn, cg);
511 /* second: insert copies and finish irg */
512 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
513 next = sched_next(irn);
514 ia32_finish_node(irn, env);
519 * Block walker: pushes all blocks on a wait queue
521 static void ia32_push_on_queue_walker(ir_node *block, void *env) {
523 waitq_put(wq, block);
528 * Add Copy nodes for not fulfilled should_be_equal constraints
530 void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg) {
531 waitq *wq = new_waitq();
533 /* Push the blocks on the waitq because ia32_finish_irg_walker starts more walks ... */
534 irg_block_walk_graph(irg, NULL, ia32_push_on_queue_walker, wq);
536 while (! waitq_empty(wq)) {
537 ir_node *block = waitq_get(wq);
538 ia32_finish_irg_walker(block, cg);
543 void ia32_init_finish(void)
545 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.finish");