2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements functions to finalize the irg for emit.
23 * @author Christian Wuerdig
39 #include "../bearch_t.h"
40 #include "../besched_t.h"
41 #include "../benode_t.h"
43 #include "bearch_ia32_t.h"
44 #include "ia32_finish.h"
45 #include "ia32_new_nodes.h"
46 #include "ia32_map_regs.h"
47 #include "ia32_common_transform.h"
48 #include "ia32_transform.h"
49 #include "ia32_dbg_stat.h"
50 #include "ia32_optimize.h"
51 #include "gen_ia32_regalloc_if.h"
53 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
56 * Transforms a Sub or xSub into Neg--Add iff OUT_REG == SRC2_REG.
57 * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
59 static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg)
62 ir_node *in1, *in2, *noreg, *nomem, *res;
63 ir_node *noreg_fp, *block;
65 const arch_register_t *in1_reg, *in2_reg, *out_reg;
67 /* fix_am will solve this for AddressMode variants */
68 if (get_ia32_op_type(irn) != ia32_Normal)
71 noreg = ia32_new_NoReg_gp(cg);
72 noreg_fp = ia32_new_NoReg_xmm(cg);
73 nomem = new_rd_NoMem(cg->irg);
74 in1 = get_irn_n(irn, n_ia32_binary_left);
75 in2 = get_irn_n(irn, n_ia32_binary_right);
76 in1_reg = arch_get_irn_register(cg->arch_env, in1);
77 in2_reg = arch_get_irn_register(cg->arch_env, in2);
78 out_reg = get_ia32_out_reg(irn, 0);
81 block = get_nodes_block(irn);
83 /* in case of sub and OUT == SRC2 we can transform the sequence into neg src2 -- add */
84 if (out_reg != in2_reg)
87 dbg = get_irn_dbg_info(irn);
89 /* generate the neg src2 */
90 if (is_ia32_xSub(irn)) {
93 ir_mode *op_mode = get_ia32_ls_mode(irn);
95 assert(get_irn_mode(irn) != mode_T);
97 res = new_rd_ia32_xXor(dbg, irg, block, noreg, noreg, nomem, in2, noreg_fp);
98 size = get_mode_size_bits(op_mode);
99 entity = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN);
100 set_ia32_am_sc(res, entity);
101 set_ia32_op_type(res, ia32_AddrModeS);
102 set_ia32_ls_mode(res, op_mode);
104 arch_set_irn_register(cg->arch_env, res, in2_reg);
106 /* add to schedule */
107 sched_add_before(irn, res);
109 /* generate the add */
110 res = new_rd_ia32_xAdd(dbg, irg, block, noreg, noreg, nomem, res, in1);
111 set_ia32_ls_mode(res, get_ia32_ls_mode(irn));
113 /* exchange the add and the sub */
114 edges_reroute(irn, res, irg);
116 /* add to schedule */
117 sched_add_before(irn, res);
119 ir_node *res_proj = NULL;
120 ir_node *flags_proj = NULL;
121 const ir_edge_t *edge;
123 if (get_irn_mode(irn) == mode_T) {
124 /* collect the Proj uses */
125 foreach_out_edge(irn, edge) {
126 ir_node *proj = get_edge_src_irn(edge);
127 long pn = get_Proj_proj(proj);
128 if (pn == pn_ia32_Sub_res) {
129 assert(res_proj == NULL);
132 assert(pn == pn_ia32_Sub_flags);
133 assert(flags_proj == NULL);
139 if (flags_proj == NULL) {
140 res = new_rd_ia32_Neg(dbg, irg, block, in2);
141 arch_set_irn_register(cg->arch_env, res, in2_reg);
143 /* add to schedule */
144 sched_add_before(irn, res);
146 /* generate the add */
147 res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, nomem, res, in1);
148 arch_set_irn_register(cg->arch_env, res, out_reg);
149 set_ia32_commutative(res);
151 /* exchange the add and the sub */
152 edges_reroute(irn, res, irg);
154 /* add to schedule */
155 sched_add_before(irn, res);
157 ir_node *stc, *cmc, *not, *adc;
161 * ARG, the above technique does NOT set the flags right.
162 * So, we must produce the following code:
164 * t2 = a + ~b + Carry
167 * a + -b = a + (~b + 1) would set the carry flag IF a == b ...
169 not = new_rd_ia32_Not(dbg, irg, block, in2);
170 arch_set_irn_register(cg->arch_env, not, in2_reg);
171 sched_add_before(irn, not);
173 stc = new_rd_ia32_Stc(dbg, irg, block);
174 arch_set_irn_register(cg->arch_env, stc,
175 &ia32_flags_regs[REG_EFLAGS]);
176 sched_add_before(irn, stc);
178 adc = new_rd_ia32_Adc(dbg, irg, block, noreg, noreg, nomem, not,
180 arch_set_irn_register(cg->arch_env, adc, out_reg);
181 sched_add_before(irn, adc);
183 set_irn_mode(adc, mode_T);
184 adc_flags = new_r_Proj(irg, block, adc, mode_Iu, pn_ia32_Adc_flags);
185 arch_set_irn_register(cg->arch_env, adc_flags,
186 &ia32_flags_regs[REG_EFLAGS]);
188 cmc = new_rd_ia32_Cmc(dbg, irg, block, adc_flags);
189 arch_set_irn_register(cg->arch_env, cmc,
190 &ia32_flags_regs[REG_EFLAGS]);
191 sched_add_before(irn, cmc);
193 exchange(flags_proj, cmc);
194 if (res_proj != NULL) {
195 set_Proj_pred(res_proj, adc);
196 set_Proj_proj(res_proj, pn_ia32_Adc_res);
203 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
205 /* remove the old sub */
209 DBG_OPT_SUB2NEGADD(irn, res);
212 static INLINE int need_constraint_copy(ir_node *irn)
214 /* TODO this should be determined from the node specification */
215 switch (get_ia32_irn_opcode(irn)) {
216 case iro_ia32_IMul: {
217 /* the 3 operand form of IMul needs no constraint copy */
218 ir_node *right = get_irn_n(irn, n_ia32_IMul_right);
219 return !is_ia32_Immediate(right);
223 case iro_ia32_Conv_I2I:
224 case iro_ia32_Conv_I2I8Bit:
234 * Returns the index of the "same" register.
235 * On the x86, we should have only one.
237 static int get_first_same(const arch_register_req_t* req)
239 const unsigned other = req->other_same;
242 for (i = 0; i < 32; ++i) {
243 if (other & (1U << i)) return i;
245 assert(! "same position not found");
250 * Insert copies for all ia32 nodes where the should_be_same requirement
252 * Transform Sub into Neg -- Add if IN2 == OUT
254 static void assure_should_be_same_requirements(ia32_code_gen_t *cg,
257 ir_graph *irg = cg->irg;
258 const arch_env_t *arch_env = cg->arch_env;
259 const arch_register_req_t **reqs;
260 const arch_register_t *out_reg, *in_reg;
262 ir_node *in_node, *block;
264 reqs = get_ia32_out_req_all(node);
265 n_res = get_ia32_n_res(node);
266 block = get_nodes_block(node);
268 /* check all OUT requirements, if there is a should_be_same */
269 for (i = 0; i < n_res; i++) {
276 ir_node *uses_out_reg;
277 const arch_register_req_t *req = reqs[i];
278 const arch_register_class_t *cls;
279 int uses_out_reg_pos;
281 if (!arch_register_req_is(req, should_be_same))
284 same_pos = get_first_same(req);
286 /* get in and out register */
287 out_reg = get_ia32_out_reg(node, i);
288 in_node = get_irn_n(node, same_pos);
289 in_reg = arch_get_irn_register(arch_env, in_node);
291 /* requirement already fulfilled? */
292 if (in_reg == out_reg)
294 /* unknowns can be changed to any register we want on emitting */
295 if (is_unknown_reg(in_reg))
297 cls = arch_register_get_class(in_reg);
298 assert(cls == arch_register_get_class(out_reg));
300 /* check if any other input operands uses the out register */
301 arity = get_irn_arity(node);
303 uses_out_reg_pos = -1;
304 for (i2 = 0; i2 < arity; ++i2) {
305 ir_node *in = get_irn_n(node, i2);
306 const arch_register_t *in_reg;
308 if (!mode_is_data(get_irn_mode(in)))
311 in_reg = arch_get_irn_register(arch_env, in);
313 if (in_reg != out_reg)
316 if (uses_out_reg != NULL && in != uses_out_reg) {
317 panic("invalid register allocation");
320 if (uses_out_reg_pos >= 0)
321 uses_out_reg_pos = -1; /* multiple inputs... */
323 uses_out_reg_pos = i2;
326 /* no-one else is using the out reg, we can simply copy it
327 * (the register can't be live since the operation will override it
329 if (uses_out_reg == NULL) {
330 ir_node *copy = be_new_Copy(cls, irg, block, in_node);
331 DBG_OPT_2ADDRCPY(copy);
333 /* destination is the out register */
334 arch_set_irn_register(arch_env, copy, out_reg);
336 /* insert copy before the node into the schedule */
337 sched_add_before(node, copy);
340 set_irn_n(node, same_pos, copy);
343 "created copy %+F for should be same argument at input %d of %+F\n",
344 copy, same_pos, node));
348 /* for commutative nodes we can simply swap the left/right */
349 if (uses_out_reg_pos == n_ia32_binary_right && is_ia32_commutative(node)) {
350 ia32_swap_left_right(node);
352 "swapped left/right input of %+F to resolve should be same constraint\n",
358 ir_fprintf(stderr, "Note: need perm to resolve should_be_same constraint at %+F (this is unsafe and should not happen in theory...)\n", node);
360 /* the out reg is used as node input: we need to permutate our input
361 * and the other (this is allowed, since the other node can't be live
362 * after! the operation as we will override the register. */
364 in[1] = uses_out_reg;
365 perm = be_new_Perm(cls, irg, block, 2, in);
367 perm_proj0 = new_r_Proj(irg, block, perm, get_irn_mode(in[0]), 0);
368 perm_proj1 = new_r_Proj(irg, block, perm, get_irn_mode(in[1]), 1);
370 arch_set_irn_register(arch_env, perm_proj0, out_reg);
371 arch_set_irn_register(arch_env, perm_proj1, in_reg);
373 sched_add_before(node, perm);
376 "created perm %+F for should be same argument at input %d of %+F (need permutate with %+F)\n",
377 perm, same_pos, node, uses_out_reg));
379 /* use the perm results */
380 for (i2 = 0; i2 < arity; ++i2) {
381 ir_node *in = get_irn_n(node, i2);
384 set_irn_n(node, i2, perm_proj0);
385 } else if (in == uses_out_reg) {
386 set_irn_n(node, i2, perm_proj1);
394 * We have a source address mode node with base or index register equal to
395 * result register and unfulfilled should_be_same requirement. The constraint
396 * handler will insert a copy from the remaining input operand to the result
397 * register -> base or index is broken then.
398 * Solution: Turn back this address mode into explicit Load + Operation.
400 static void fix_am_source(ir_node *irn, void *env)
402 ia32_code_gen_t *cg = env;
403 const arch_env_t *arch_env = cg->arch_env;
407 const arch_register_t *reg_base;
408 const arch_register_t *reg_index;
409 const arch_register_req_t **reqs;
412 /* check only ia32 nodes with source address mode */
413 if (! is_ia32_irn(irn) || get_ia32_op_type(irn) != ia32_AddrModeS)
415 /* only need to fix binary operations */
416 if (get_ia32_am_support(irn) != ia32_am_binary)
419 base = get_irn_n(irn, n_ia32_base);
420 index = get_irn_n(irn, n_ia32_index);
422 reg_base = arch_get_irn_register(arch_env, base);
423 reg_index = arch_get_irn_register(arch_env, index);
424 reqs = get_ia32_out_req_all(irn);
426 noreg = ia32_new_NoReg_gp(cg);
428 n_res = get_ia32_n_res(irn);
430 for (i = 0; i < n_res; i++) {
431 if (arch_register_req_is(reqs[i], should_be_same)) {
432 /* get in and out register */
433 const arch_register_t *out_reg = get_ia32_out_reg(irn, i);
434 int same_pos = get_first_same(reqs[i]);
435 ir_node *same_node = get_irn_n(irn, same_pos);
436 const arch_register_t *same_reg
437 = arch_get_irn_register(arch_env, same_node);
438 const arch_register_class_t *same_cls;
439 ir_graph *irg = cg->irg;
440 dbg_info *dbgi = get_irn_dbg_info(irn);
441 ir_node *block = get_nodes_block(irn);
449 /* should_be same constraint is fullfilled, nothing to do */
450 if (out_reg == same_reg)
453 /* we only need to do something if the out reg is the same as base
455 if (out_reg != reg_base && out_reg != reg_index)
458 /* turn back address mode */
459 same_cls = arch_register_get_class(same_reg);
460 mem = get_irn_n(irn, n_ia32_mem);
461 assert(get_irn_mode(mem) == mode_M);
462 if (same_cls == &ia32_reg_classes[CLASS_ia32_gp]) {
463 load = new_rd_ia32_Load(dbgi, irg, block, base, index, mem);
464 pnres = pn_ia32_Load_res;
465 pnmem = pn_ia32_Load_M;
467 } else if (same_cls == &ia32_reg_classes[CLASS_ia32_xmm]) {
468 load = new_rd_ia32_xLoad(dbgi, irg, block, base, index, mem,
469 get_ia32_ls_mode(irn));
470 pnres = pn_ia32_xLoad_res;
471 pnmem = pn_ia32_xLoad_M;
474 panic("cannot turn back address mode for this register class");
477 /* copy address mode information to load */
478 set_ia32_op_type(load, ia32_AddrModeS);
479 ia32_copy_am_attrs(load, irn);
480 if (is_ia32_is_reload(irn))
481 set_ia32_is_reload(load);
483 /* insert the load into schedule */
484 sched_add_before(irn, load);
486 DBG((dbg, LEVEL_3, "irg %+F: build back AM source for node %+F, inserted load %+F\n", cg->irg, irn, load));
488 load_res = new_r_Proj(cg->irg, block, load, proj_mode, pnres);
489 arch_set_irn_register(cg->arch_env, load_res, out_reg);
491 /* set the new input operand */
492 if (is_ia32_Immediate(get_irn_n(irn, n_ia32_binary_right)))
493 set_irn_n(irn, n_ia32_binary_left, load_res);
495 set_irn_n(irn, n_ia32_binary_right, load_res);
496 if (get_irn_mode(irn) == mode_T) {
497 const ir_edge_t *edge, *next;
498 foreach_out_edge_safe(irn, edge, next) {
499 ir_node *node = get_edge_src_irn(edge);
500 int pn = get_Proj_proj(node);
501 if (pn == pn_ia32_res) {
503 } else if (pn == pn_ia32_mem) {
504 set_Proj_pred(node, load);
505 set_Proj_proj(node, pnmem);
507 panic("Unexpected Proj");
510 set_irn_mode(irn, mode_Iu);
513 /* this is a normal node now */
514 set_irn_n(irn, n_ia32_base, noreg);
515 set_irn_n(irn, n_ia32_index, noreg);
516 set_ia32_op_type(irn, ia32_Normal);
523 * Block walker: finishes a block
525 static void ia32_finish_irg_walker(ir_node *block, void *env)
527 ia32_code_gen_t *cg = env;
530 /* first: turn back AM source if necessary */
531 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
532 next = sched_next(irn);
533 fix_am_source(irn, env);
536 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
537 ia32_code_gen_t *cg = env;
539 next = sched_next(irn);
541 /* check if there is a sub which need to be transformed */
542 if (is_ia32_Sub(irn) || is_ia32_xSub(irn)) {
543 ia32_transform_sub_to_neg_add(irn, cg);
547 /* second: insert copies and finish irg */
548 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
549 next = sched_next(irn);
550 if (is_ia32_irn(irn)) {
551 /* some nodes are just a bit less efficient, but need no fixing if the
552 * should be same requirement is not fulfilled */
553 if (need_constraint_copy(irn))
554 assure_should_be_same_requirements(cg, irn);
560 * Block walker: pushes all blocks on a wait queue
562 static void ia32_push_on_queue_walker(ir_node *block, void *env)
565 waitq_put(wq, block);
570 * Add Copy nodes for not fulfilled should_be_equal constraints
572 void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg)
574 waitq *wq = new_waitq();
576 /* Push the blocks on the waitq because ia32_finish_irg_walker starts more walks ... */
577 irg_block_walk_graph(irg, NULL, ia32_push_on_queue_walker, wq);
579 while (! waitq_empty(wq)) {
580 ir_node *block = waitq_get(wq);
581 ia32_finish_irg_walker(block, cg);
586 void ia32_init_finish(void)
588 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.finish");