2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief This file implements functions to finalize the irg for emit.
23 * @author Christian Wuerdig
39 #include "../bearch_t.h"
40 #include "../besched_t.h"
41 #include "../benode_t.h"
43 #include "bearch_ia32_t.h"
44 #include "ia32_finish.h"
45 #include "ia32_new_nodes.h"
46 #include "ia32_map_regs.h"
47 #include "ia32_transform.h"
48 #include "ia32_dbg_stat.h"
49 #include "ia32_optimize.h"
50 #include "gen_ia32_regalloc_if.h"
52 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
55 * Transforms a Sub or xSub into Neg--Add iff OUT_REG == SRC2_REG.
56 * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
58 static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) {
60 ir_node *in1, *in2, *noreg, *nomem, *res;
61 ir_node *noreg_fp, *block;
62 ir_mode *mode = get_irn_mode(irn);
63 dbg_info *dbg = get_irn_dbg_info(irn);
64 const arch_register_t *in1_reg, *in2_reg, *out_reg, **slots;
67 /* Return if not a Sub or xSub */
68 if (!is_ia32_Sub(irn) && !is_ia32_xSub(irn))
70 /* fix_am will solve this for AddressMode variants */
71 if(get_ia32_op_type(irn) != ia32_Normal)
74 noreg = ia32_new_NoReg_gp(cg);
75 noreg_fp = ia32_new_NoReg_fp(cg);
76 nomem = new_rd_NoMem(cg->irg);
77 in1 = get_irn_n(irn, 2);
78 in2 = get_irn_n(irn, 3);
79 in1_reg = arch_get_irn_register(cg->arch_env, in1);
80 in2_reg = arch_get_irn_register(cg->arch_env, in2);
81 out_reg = get_ia32_out_reg(irn, 0);
83 assert(get_irn_mode(irn) != mode_T);
86 block = get_nodes_block(irn);
88 /* in case of sub and OUT == SRC2 we can transform the sequence into neg src2 -- add */
89 if (out_reg != in2_reg)
92 /* generate the neg src2 */
93 if(mode_is_float(mode)) {
96 ir_mode *op_mode = get_ia32_ls_mode(irn);
98 res = new_rd_ia32_xXor(dbg, irg, block, noreg, noreg, in2, noreg_fp, nomem);
99 size = get_mode_size_bits(op_mode);
100 entity = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN);
101 set_ia32_am_sc(res, entity);
102 set_ia32_op_type(res, ia32_AddrModeS);
103 set_ia32_ls_mode(res, op_mode);
105 res = new_rd_ia32_Neg(dbg, irg, block, in2);
107 arch_set_irn_register(cg->arch_env, res, in2_reg);
109 /* add to schedule */
110 sched_add_before(irn, res);
112 /* generate the add */
113 if (mode_is_float(mode)) {
114 res = new_rd_ia32_xAdd(dbg, irg, block, noreg, noreg, res, in1, nomem);
115 set_ia32_am_support(res, ia32_am_Source, ia32_am_binary);
116 set_ia32_ls_mode(res, get_ia32_ls_mode(irn));
118 res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, res, in1, nomem);
119 set_ia32_am_support(res, ia32_am_Full, ia32_am_binary);
120 set_ia32_commutative(res);
123 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
125 slots = get_ia32_slots(res);
128 /* exchange the add and the sub */
129 edges_reroute(irn, res, irg);
131 /* add to schedule */
132 sched_add_before(irn, res);
134 /* remove the old sub */
136 arity = get_irn_arity(irn);
137 for(i = 0; i < arity; ++i) {
138 set_irn_n(irn, i, new_Bad());
141 DBG_OPT_SUB2NEGADD(irn, res);
144 static INLINE int is_noreg(ia32_code_gen_t *cg, const ir_node *node)
146 return node == cg->noreg_gp;
149 static ir_node *create_immediate_from_int(ia32_code_gen_t *cg, int val)
151 ir_graph *irg = current_ir_graph;
152 ir_node *start_block = get_irg_start_block(irg);
153 ir_node *immediate = new_rd_ia32_Immediate(NULL, irg, start_block, NULL,
155 arch_set_irn_register(cg->arch_env, immediate, &ia32_gp_regs[REG_GP_NOREG]);
160 static ir_node *create_immediate_from_am(ia32_code_gen_t *cg,
163 ir_graph *irg = get_irn_irg(node);
164 ir_node *block = get_nodes_block(node);
165 int offset = get_ia32_am_offs_int(node);
166 int sc_sign = is_ia32_am_sc_sign(node);
167 ir_entity *entity = get_ia32_am_sc(node);
170 res = new_rd_ia32_Immediate(NULL, irg, block, entity, sc_sign, offset);
171 arch_set_irn_register(cg->arch_env, res, &ia32_gp_regs[REG_GP_NOREG]);
175 static int is_am_one(const ir_node *node)
177 int offset = get_ia32_am_offs_int(node);
178 ir_entity *entity = get_ia32_am_sc(node);
180 return offset == 1 && entity == NULL;
183 static int is_am_minus_one(const ir_node *node)
185 int offset = get_ia32_am_offs_int(node);
186 ir_entity *entity = get_ia32_am_sc(node);
188 return offset == -1 && entity == NULL;
192 * Transforms a LEA into an Add or SHL if possible.
193 * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
195 static void ia32_transform_lea_to_add_or_shl(ir_node *node, ia32_code_gen_t *cg)
197 const arch_env_t *arch_env = cg->arch_env;
198 ir_graph *irg = current_ir_graph;
201 const arch_register_t *base_reg;
202 const arch_register_t *index_reg;
203 const arch_register_t *out_reg;
214 if(!is_ia32_Lea(node))
217 base = get_irn_n(node, n_ia32_Lea_base);
218 index = get_irn_n(node, n_ia32_Lea_index);
220 if(is_noreg(cg, base)) {
224 base_reg = arch_get_irn_register(arch_env, base);
226 if(is_noreg(cg, index)) {
230 index_reg = arch_get_irn_register(arch_env, index);
233 if(base == NULL && index == NULL) {
234 /* we shouldn't construct these in the first place... */
236 ir_fprintf(stderr, "Optimisation warning: found immediate only lea\n");
241 out_reg = arch_get_irn_register(arch_env, node);
242 scale = get_ia32_am_scale(node);
243 assert(!is_ia32_need_stackent(node) || get_ia32_frame_ent(node) != NULL);
244 /* check if we have immediates values (frame entities should already be
245 * expressed in the offsets) */
246 if(get_ia32_am_offs_int(node) != 0 || get_ia32_am_sc(node) != NULL) {
252 /* we can transform leas where the out register is the same as either the
253 * base or index register back to an Add or Shl */
254 if(out_reg == base_reg) {
257 if(!has_immediates) {
258 ir_fprintf(stderr, "Optimisation warning: found lea which is "
263 if(cg->isa->opt & IA32_OPT_INCDEC) {
264 if(is_am_one(node)) {
267 if(is_am_minus_one(node)) {
271 op2 = create_immediate_from_am(cg, node);
274 if(scale == 0 && !has_immediates) {
279 /* can't create an add */
281 } else if(out_reg == index_reg) {
283 if(has_immediates && scale == 0) {
285 if(cg->isa->opt & IA32_OPT_INCDEC) {
286 if(is_am_one(node)) {
289 if(is_am_minus_one(node)) {
293 op2 = create_immediate_from_am(cg, node);
295 } else if(!has_immediates && scale > 0) {
297 op2 = create_immediate_from_int(cg, scale);
299 } else if(!has_immediates) {
301 ir_fprintf(stderr, "Optimisation warning: found lea which is "
305 } else if(scale == 0 && !has_immediates) {
310 /* can't create an add */
313 /* can't create an add */
318 dbgi = get_irn_dbg_info(node);
319 block = get_nodes_block(node);
320 noreg = ia32_new_NoReg_gp(cg);
322 res = new_rd_ia32_Add(dbgi, irg, block, noreg, noreg, op1, op2, nomem);
323 arch_set_irn_register(arch_env, res, out_reg);
324 set_ia32_commutative(res);
328 dbgi = get_irn_dbg_info(node);
329 block = get_nodes_block(node);
330 res = new_rd_ia32_Inc(dbgi, irg, block, op1);
331 arch_set_irn_register(arch_env, res, out_reg);
335 dbgi = get_irn_dbg_info(node);
336 block = get_nodes_block(node);
337 res = new_rd_ia32_Dec(dbgi, irg, block, op1);
338 arch_set_irn_register(arch_env, res, out_reg);
342 dbgi = get_irn_dbg_info(node);
343 block = get_nodes_block(node);
344 noreg = ia32_new_NoReg_gp(cg);
346 res = new_rd_ia32_Shl(dbgi, irg, block, op1, op2);
347 arch_set_irn_register(arch_env, res, out_reg);
351 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, node));
353 /* add new ADD/SHL to schedule */
354 sched_add_before(node, res);
356 DBG_OPT_LEA2ADD(node, res);
358 /* remove the old LEA */
361 /* exchange the Add and the LEA */
365 static INLINE int need_constraint_copy(ir_node *irn) {
366 return ! is_ia32_Lea(irn) &&
367 ! is_ia32_Conv_I2I(irn) &&
368 ! is_ia32_Conv_I2I8Bit(irn) &&
369 ! is_ia32_TestCMov(irn) &&
370 ! is_ia32_CmpCMov(irn);
374 * Insert copies for all ia32 nodes where the should_be_same requirement
376 * Transform Sub into Neg -- Add if IN2 == OUT
378 static void assure_should_be_same_requirements(ia32_code_gen_t *cg,
381 ir_graph *irg = cg->irg;
382 const arch_env_t *arch_env = cg->arch_env;
383 const arch_register_req_t **reqs;
384 const arch_register_t *out_reg, *in_reg;
386 ir_node *in_node, *block;
387 ia32_op_type_t op_tp;
389 if(!is_ia32_irn(node))
392 /* some nodes are just a bit less efficient, but need no fixing if the
393 * should be same requirement is not fulfilled */
394 if(!need_constraint_copy(node))
397 op_tp = get_ia32_op_type(node);
398 reqs = get_ia32_out_req_all(node);
399 n_res = get_ia32_n_res(node);
400 block = get_nodes_block(node);
402 /* check all OUT requirements, if there is a should_be_same */
403 for (i = 0; i < n_res; i++) {
410 ir_node *uses_out_reg;
411 const arch_register_req_t *req = reqs[i];
412 const arch_register_class_t *class;
413 int uses_out_reg_pos;
415 if (!arch_register_req_is(req, should_be_same))
418 same_pos = req->other_same;
420 /* get in and out register */
421 out_reg = get_ia32_out_reg(node, i);
422 in_node = get_irn_n(node, same_pos);
423 in_reg = arch_get_irn_register(arch_env, in_node);
425 /* requirement already fulfilled? */
426 if (in_reg == out_reg)
428 /* unknowns can be changed to any register we want on emitting */
429 if (is_unknown_reg(in_reg))
431 class = arch_register_get_class(in_reg);
432 assert(class == arch_register_get_class(out_reg));
434 /* check if any other input operands uses the out register */
435 arity = get_irn_arity(node);
437 uses_out_reg_pos = -1;
438 for(i2 = 0; i2 < arity; ++i2) {
439 ir_node *in = get_irn_n(node, i2);
440 const arch_register_t *in_reg = arch_get_irn_register(arch_env, in);
442 if(in_reg != out_reg)
445 if(uses_out_reg != NULL && in != uses_out_reg) {
446 panic("invalid register allocation");
449 if(uses_out_reg_pos >= 0)
450 uses_out_reg_pos = -1; /* multiple inputs... */
452 uses_out_reg_pos = i2;
455 /* no-one else is using the out reg, we can simply copy it
456 * (the register can't be live since the operation will override it
458 if(uses_out_reg == NULL) {
459 ir_node *copy = be_new_Copy(class, irg, block, in_node);
460 DBG_OPT_2ADDRCPY(copy);
462 /* destination is the out register */
463 arch_set_irn_register(arch_env, copy, out_reg);
465 /* insert copy before the node into the schedule */
466 sched_add_before(node, copy);
469 set_irn_n(node, same_pos, copy);
471 DBG((dbg, LEVEL_1, "created copy %+F for should be same argument "
472 "at input %d of %+F\n", copy, same_pos, node));
476 /* for commutative nodes we can simply swap the left/right */
477 if(is_ia32_commutative(node) && uses_out_reg_pos == 3) {
478 ia32_swap_left_right(node);
479 DBG((dbg, LEVEL_1, "swapped left/right input of %+F to resolve "
480 "should be same constraint\n", node));
485 ir_fprintf(stderr, "Note: need perm to resolve should_be_same constraint at %+F (this is unsafe and should not happen in theory...)\n", node);
487 /* the out reg is used as node input: we need to permutate our input
488 * and the other (this is allowed, since the other node can't be live
489 * after! the operation as we will override the register. */
491 in[1] = uses_out_reg;
492 perm = be_new_Perm(class, irg, block, 2, in);
494 perm_proj0 = new_r_Proj(irg, block, perm, get_irn_mode(in[0]), 0);
495 perm_proj1 = new_r_Proj(irg, block, perm, get_irn_mode(in[1]), 1);
497 arch_set_irn_register(arch_env, perm_proj0, out_reg);
498 arch_set_irn_register(arch_env, perm_proj1, in_reg);
500 sched_add_before(node, perm);
502 DBG((dbg, LEVEL_1, "created perm %+F for should be same argument "
503 "at input %d of %+F (need permutate with %+F)\n", perm, same_pos,
504 node, uses_out_reg));
506 /* use the perm results */
507 for(i2 = 0; i2 < arity; ++i2) {
508 ir_node *in = get_irn_n(node, i2);
511 set_irn_n(node, i2, perm_proj0);
512 } else if(in == uses_out_reg) {
513 set_irn_n(node, i2, perm_proj1);
518 /* check xCmp: try to avoid unordered cmp */
519 if ((is_ia32_xCmp(node) || is_ia32_xCmpCMov(node) || is_ia32_xCmpSet(node)) &&
520 op_tp == ia32_Normal)
522 long pnc = get_ia32_pncode(node);
524 if (pnc & pn_Cmp_Uo) {
526 int idx1 = 2, idx2 = 3;
528 if (is_ia32_xCmpCMov(node)) {
533 /** Matze: TODO this looks wrong, I assume we should exchange
534 * the proj numbers and not the inputs... */
536 tmp = get_irn_n(node, idx1);
537 set_irn_n(node, idx1, get_irn_n(node, idx2));
538 set_irn_n(node, idx2, tmp);
540 set_ia32_pncode(node, get_negated_pnc(pnc, mode_E));
547 * We have a source address mode node with base or index register equal to
548 * result register and unfulfilled should_be_same requirement. The constraint
549 * handler will insert a copy from the remaining input operand to the result
550 * register -> base or index is broken then.
551 * Solution: Turn back this address mode into explicit Load + Operation.
553 static void fix_am_source(ir_node *irn, void *env) {
554 ia32_code_gen_t *cg = env;
555 const arch_env_t *arch_env = cg->arch_env;
559 const arch_register_t *reg_base;
560 const arch_register_t *reg_index;
561 const arch_register_req_t **reqs;
564 /* check only ia32 nodes with source address mode */
565 if (! is_ia32_irn(irn) || get_ia32_op_type(irn) != ia32_AddrModeS)
567 /* only need to fix binary operations */
568 if (get_ia32_am_arity(irn) != ia32_am_binary)
571 base = get_irn_n(irn, 0);
572 index = get_irn_n(irn, 1);
574 reg_base = arch_get_irn_register(arch_env, base);
575 reg_index = arch_get_irn_register(arch_env, index);
576 reqs = get_ia32_out_req_all(irn);
578 noreg = ia32_new_NoReg_gp(cg);
580 n_res = get_ia32_n_res(irn);
582 for (i = 0; i < n_res; i++) {
583 if (arch_register_req_is(reqs[i], should_be_same)) {
584 /* get in and out register */
585 const arch_register_t *out_reg = get_ia32_out_reg(irn, i);
586 int same_pos = reqs[i]->other_same;
587 ir_node *same_node = get_irn_n(irn, same_pos);
588 const arch_register_t *same_reg
589 = arch_get_irn_register(arch_env, same_node);
590 const arch_register_class_t *same_cls;
591 ir_graph *irg = cg->irg;
592 dbg_info *dbgi = get_irn_dbg_info(irn);
593 ir_node *block = get_nodes_block(irn);
599 /* should_be same constraint is fullfilled, nothing to do */
600 if(out_reg == same_reg)
603 /* we only need to do something if the out reg is the same as base
605 if (out_reg != reg_base && out_reg != reg_index)
608 /* turn back address mode */
609 same_cls = arch_register_get_class(same_reg);
610 if (same_cls == &ia32_reg_classes[CLASS_ia32_gp]) {
611 load = new_rd_ia32_Load(dbgi, irg, block, base, index,
613 assert(get_irn_mode(get_irn_n(irn,4)) == mode_M);
614 pnres = pn_ia32_Load_res;
616 } else if (same_cls == &ia32_reg_classes[CLASS_ia32_xmm]) {
617 load = new_rd_ia32_xLoad(dbgi, irg, block, base, index,
619 get_ia32_ls_mode(irn));
620 assert(get_irn_mode(get_irn_n(irn,4)) == mode_M);
621 pnres = pn_ia32_xLoad_res;
624 panic("cannot turn back address mode for this register class");
627 /* copy address mode information to load */
628 set_ia32_ls_mode(load, get_ia32_ls_mode(irn));
629 set_ia32_op_type(load, ia32_AddrModeS);
630 set_ia32_am_scale(load, get_ia32_am_scale(irn));
631 set_ia32_am_sc(load, get_ia32_am_sc(irn));
632 if(is_ia32_am_sc_sign(irn))
633 set_ia32_am_sc_sign(load);
634 add_ia32_am_offs_int(load, get_ia32_am_offs_int(irn));
635 set_ia32_frame_ent(load, get_ia32_frame_ent(irn));
636 if (is_ia32_use_frame(irn))
637 set_ia32_use_frame(load);
639 /* insert the load into schedule */
640 sched_add_before(irn, load);
642 DBG((dbg, LEVEL_3, "irg %+F: build back AM source for node %+F, inserted load %+F\n", cg->irg, irn, load));
644 load_res = new_r_Proj(cg->irg, block, load, proj_mode, pnres);
645 arch_set_irn_register(cg->arch_env, load_res, out_reg);
647 /* set the new input operand */
648 set_irn_n(irn, 3, load_res);
649 if(get_irn_mode(irn) == mode_T) {
650 const ir_edge_t *edge, *next;
651 foreach_out_edge_safe(irn, edge, next) {
652 ir_node *node = get_edge_src_irn(edge);
653 int pn = get_Proj_proj(node);
658 set_Proj_pred(node, load);
661 set_irn_mode(irn, mode_Iu);
664 /* this is a normal node now */
665 set_irn_n(irn, 0, noreg);
666 set_irn_n(irn, 1, noreg);
667 set_ia32_op_type(irn, ia32_Normal);
674 * Block walker: finishes a block
676 static void ia32_finish_irg_walker(ir_node *block, void *env) {
677 ia32_code_gen_t *cg = env;
680 /* first: turn back AM source if necessary */
681 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
682 next = sched_next(irn);
683 fix_am_source(irn, env);
686 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
687 ia32_code_gen_t *cg = env;
689 next = sched_next(irn);
691 /* check if there is a sub which need to be transformed */
692 ia32_transform_sub_to_neg_add(irn, cg);
694 /* transform a LEA into an Add if possible */
695 ia32_transform_lea_to_add_or_shl(irn, cg);
698 /* second: insert copies and finish irg */
699 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
700 next = sched_next(irn);
701 assure_should_be_same_requirements(cg, irn);
706 * Block walker: pushes all blocks on a wait queue
708 static void ia32_push_on_queue_walker(ir_node *block, void *env) {
710 waitq_put(wq, block);
715 * Add Copy nodes for not fulfilled should_be_equal constraints
717 void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg) {
718 waitq *wq = new_waitq();
720 /* Push the blocks on the waitq because ia32_finish_irg_walker starts more walks ... */
721 irg_block_walk_graph(irg, NULL, ia32_push_on_queue_walker, wq);
723 while (! waitq_empty(wq)) {
724 ir_node *block = waitq_get(wq);
725 ia32_finish_irg_walker(block, cg);
730 void ia32_init_finish(void)
732 FIRM_DBG_REGISTER(dbg, "firm.be.ia32.finish");