2 * This file implements functions to finalize the irg for emit.
3 * @author Christian Wuerdig
18 #include "../bearch.h"
19 #include "../besched_t.h"
20 #include "../benode_t.h"
22 #include "bearch_ia32_t.h"
23 #include "ia32_finish.h"
24 #include "ia32_new_nodes.h"
25 #include "ia32_map_regs.h"
26 #include "ia32_transform.h"
27 #include "ia32_dbg_stat.h"
28 #include "ia32_optimize.h"
29 #include "gen_ia32_regalloc_if.h"
32 * Transforms a Sub or xSub into Neg--Add iff OUT_REG == SRC2_REG.
33 * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
35 static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) {
37 ir_node *in1, *in2, *noreg, *nomem, *res;
38 ir_node *noreg_fp, *block;
39 ir_mode *mode = get_irn_mode(irn);
40 dbg_info *dbg = get_irn_dbg_info(irn);
41 const arch_register_t *in1_reg, *in2_reg, *out_reg, **slots;
44 /* Return if AM node or not a Sub or xSub */
45 if (!(is_ia32_Sub(irn) || is_ia32_xSub(irn)) || get_ia32_op_type(irn) != ia32_Normal)
48 noreg = ia32_new_NoReg_gp(cg);
49 noreg_fp = ia32_new_NoReg_fp(cg);
50 nomem = new_rd_NoMem(cg->irg);
51 in1 = get_irn_n(irn, 2);
52 in2 = get_irn_n(irn, 3);
53 in1_reg = arch_get_irn_register(cg->arch_env, in1);
54 in2_reg = arch_get_irn_register(cg->arch_env, in2);
55 out_reg = get_ia32_out_reg(irn, 0);
58 block = get_nodes_block(irn);
60 /* in case of sub and OUT == SRC2 we can transform the sequence into neg src2 -- add */
61 if (!REGS_ARE_EQUAL(out_reg, in2_reg))
64 /* generate the neg src2 */
65 if(mode_is_float(mode)) {
69 res = new_rd_ia32_xEor(dbg, irg, block, noreg, noreg, in2, noreg_fp, nomem);
70 size = get_mode_size_bits(mode);
71 name = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN);
72 set_ia32_am_sc(res, name);
73 set_ia32_op_type(res, ia32_AddrModeS);
74 set_ia32_ls_mode(res, mode);
76 res = new_rd_ia32_Minus(dbg, irg, block, noreg, noreg, in2, nomem);
78 arch_set_irn_register(cg->arch_env, res, in2_reg);
81 sched_add_before(irn, res);
83 /* generate the add */
84 if (mode_is_float(mode)) {
85 res = new_rd_ia32_xAdd(dbg, irg, block, noreg, noreg, res, in1, nomem);
86 set_ia32_am_support(res, ia32_am_Source);
89 res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, res, in1, nomem);
90 set_ia32_am_support(res, ia32_am_Full);
91 set_ia32_commutative(res);
94 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
96 slots = get_ia32_slots(res);
99 /* exchange the add and the sub */
100 edges_reroute(irn, res, irg);
102 /* add to schedule */
103 sched_add_before(irn, res);
105 /* remove the old sub */
107 arity = get_irn_arity(irn);
108 for(i = 0; i < arity; ++i) {
109 set_irn_n(irn, i, new_Bad());
112 DBG_OPT_SUB2NEGADD(irn, res);
116 * Transforms a LEA into an Add if possible
117 * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
119 static void ia32_transform_lea_to_add(ir_node *irn, ia32_code_gen_t *cg) {
120 ia32_am_flavour_t am_flav;
122 dbg_info *dbg = get_irn_dbg_info(irn);
125 ir_node *nomem, *noreg, *base, *index, *op1, *op2;
127 const char *offs = NULL;
128 const arch_register_t *out_reg, *base_reg, *index_reg;
129 int imm_tp = ia32_ImmConst;
132 if (! is_ia32_Lea(irn))
135 am_flav = get_ia32_am_flavour(irn);
137 if (get_ia32_am_sc(irn))
140 /* only some LEAs can be transformed to an Add */
141 if (am_flav != ia32_am_B && am_flav != ia32_am_OB && am_flav != ia32_am_OI && am_flav != ia32_am_BI)
144 noreg = ia32_new_NoReg_gp(cg);
145 nomem = new_rd_NoMem(cg->irg);
148 base = get_irn_n(irn, 0);
149 index = get_irn_n(irn,1);
151 if (am_flav & ia32_O) {
152 offs = get_ia32_am_offs(irn);
155 ident *id = get_ia32_am_sc(irn);
158 offs = get_id_str(id);
159 imm_tp = ia32_ImmSymConst;
161 /* offset has a explicit sign -> we need to skip + */
162 else if (offs[0] == '+')
166 out_reg = arch_get_irn_register(cg->arch_env, irn);
167 base_reg = arch_get_irn_register(cg->arch_env, base);
168 index_reg = arch_get_irn_register(cg->arch_env, index);
171 block = get_nodes_block(irn);
173 switch(get_ia32_am_flavour(irn)) {
175 /* out register must be same as base register */
176 if (! REGS_ARE_EQUAL(out_reg, base_reg))
182 /* out register must be same as base register */
183 if (! REGS_ARE_EQUAL(out_reg, base_reg))
190 /* out register must be same as index register */
191 if (! REGS_ARE_EQUAL(out_reg, index_reg))
198 /* out register must be same as one in register */
199 if (REGS_ARE_EQUAL(out_reg, base_reg)) {
203 else if (REGS_ARE_EQUAL(out_reg, index_reg)) {
208 /* in registers a different from out -> no Add possible */
215 res = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, op1, op2, nomem);
216 arch_set_irn_register(cg->arch_env, res, out_reg);
217 set_ia32_op_type(res, ia32_Normal);
218 set_ia32_commutative(res);
221 set_ia32_cnst(res, offs);
222 set_ia32_immop_type(res, imm_tp);
225 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
227 /* add Add to schedule */
228 sched_add_before(irn, res);
230 DBG_OPT_LEA2ADD(irn, res);
232 /* remove the old LEA */
235 /* exchange the Add and the LEA */
239 static INLINE int need_constraint_copy(ir_node *irn) {
241 ! is_ia32_Lea(irn) && \
242 ! is_ia32_Conv_I2I(irn) && \
243 ! is_ia32_Conv_I2I8Bit(irn) && \
244 ! is_ia32_CmpCMov(irn) && \
245 ! is_ia32_PsiCondCMov(irn) && \
246 ! is_ia32_CmpSet(irn);
250 * Insert copies for all ia32 nodes where the should_be_same requirement
252 * Transform Sub into Neg -- Add if IN2 == OUT
254 static void ia32_finish_node(ir_node *irn, void *env) {
255 ia32_code_gen_t *cg = env;
256 const ia32_register_req_t **reqs;
257 const arch_register_t *out_reg, *in_reg, *in2_reg;
259 ir_node *copy, *in_node, *block, *in2_node;
260 ia32_op_type_t op_tp;
262 if (is_ia32_irn(irn)) {
263 /* AM Dest nodes don't produce any values */
264 op_tp = get_ia32_op_type(irn);
265 if (op_tp == ia32_AddrModeD)
268 reqs = get_ia32_out_req_all(irn);
269 n_res = get_ia32_n_res(irn);
270 block = get_nodes_block(irn);
272 /* check all OUT requirements, if there is a should_be_same */
273 if ((op_tp == ia32_Normal || op_tp == ia32_AddrModeS) && need_constraint_copy(irn))
275 for (i = 0; i < n_res; i++) {
276 if (arch_register_req_is(&(reqs[i]->req), should_be_same)) {
277 /* get in and out register */
278 out_reg = get_ia32_out_reg(irn, i);
279 in_node = get_irn_n(irn, reqs[i]->same_pos);
280 in_reg = arch_get_irn_register(cg->arch_env, in_node);
282 /* don't copy ignore nodes */
283 if (arch_irn_is(cg->arch_env, in_node, ignore) && is_Proj(in_node))
286 /* check if in and out register are equal */
287 if (! REGS_ARE_EQUAL(out_reg, in_reg)) {
288 /* in case of a commutative op: just exchange the in's */
289 /* beware: the current op could be everything, so test for ia32 */
290 /* commutativity first before getting the second in */
291 if (is_ia32_commutative(irn)) {
292 in2_node = get_irn_n(irn, reqs[i]->same_pos ^ 1);
293 in2_reg = arch_get_irn_register(cg->arch_env, in2_node);
295 if (REGS_ARE_EQUAL(out_reg, in2_reg)) {
296 set_irn_n(irn, reqs[i]->same_pos, in2_node);
297 set_irn_n(irn, reqs[i]->same_pos ^ 1, in_node);
304 DBG((cg->mod, LEVEL_1, "inserting copy for %+F in_pos %d\n", irn, reqs[i]->same_pos));
305 /* create copy from in register */
306 copy = be_new_Copy(arch_register_get_class(in_reg), cg->irg, block, in_node);
308 DBG_OPT_2ADDRCPY(copy);
310 /* destination is the out register */
311 arch_set_irn_register(cg->arch_env, copy, out_reg);
313 /* insert copy before the node into the schedule */
314 sched_add_before(irn, copy);
317 set_irn_n(irn, reqs[i]->same_pos, copy);
324 /* check xCmp: try to avoid unordered cmp */
325 if ((is_ia32_xCmp(irn) || is_ia32_xCmpCMov(irn) || is_ia32_xCmpSet(irn)) &&
326 op_tp == ia32_Normal &&
327 ! is_ia32_ImmConst(irn) && ! is_ia32_ImmSymConst(irn))
329 long pnc = get_ia32_pncode(irn);
331 if (pnc & pn_Cmp_Uo) {
333 int idx1 = 2, idx2 = 3;
335 if (is_ia32_xCmpCMov(irn)) {
340 tmp = get_irn_n(irn, idx1);
341 set_irn_n(irn, idx1, get_irn_n(irn, idx2));
342 set_irn_n(irn, idx2, tmp);
344 set_ia32_pncode(irn, get_negated_pnc(pnc, mode_D));
349 If we have a CondJmp/CmpSet/xCmpSet with immediate,
350 we need to check if it's the right operand, otherwise
351 we have to change it, as CMP doesn't support immediate
355 if ((is_ia32_CondJmp(irn) || is_ia32_CmpSet(irn) || is_ia32_xCmpSet(irn)) &&
356 (is_ia32_ImmConst(irn) || is_ia32_ImmSymConst(irn)) &&
357 op_tp == ia32_AddrModeS)
359 set_ia32_op_type(irn, ia32_AddrModeD);
360 set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn)));
369 * We have a source address mode node with base or index register equal to
370 * result register. The constraint handler will insert a copy from the
371 * remaining input operand to the result register -> base or index is
373 * Solution: Turn back this address mode into explicit Load + Operation.
375 static void fix_am_source(ir_node *irn, void *env) {
376 ia32_code_gen_t *cg = env;
377 ir_node *base, *index, *noreg;
378 const arch_register_t *reg_base, *reg_index;
379 const ia32_register_req_t **reqs;
382 /* check only ia32 nodes with source address mode */
383 if (! is_ia32_irn(irn) || get_ia32_op_type(irn) != ia32_AddrModeS)
385 /* no need to fix unary operations */
386 if (get_irn_arity(irn) == 4)
389 base = get_irn_n(irn, 0);
390 index = get_irn_n(irn, 1);
392 reg_base = arch_get_irn_register(cg->arch_env, base);
393 reg_index = arch_get_irn_register(cg->arch_env, index);
394 reqs = get_ia32_out_req_all(irn);
396 noreg = ia32_new_NoReg_gp(cg);
398 n_res = get_ia32_n_res(irn);
400 for (i = 0; i < n_res; i++) {
401 if (arch_register_req_is(&(reqs[i]->req), should_be_same)) {
402 /* get in and out register */
403 const arch_register_t *out_reg = get_ia32_out_reg(irn, i);
406 there is a constraint for the remaining operand
407 and the result register is equal to base or index register
409 if (reqs[i]->same_pos == 2 &&
410 (REGS_ARE_EQUAL(out_reg, reg_base) || REGS_ARE_EQUAL(out_reg, reg_index)))
412 /* turn back address mode */
413 ir_node *in_node = get_irn_n(irn, 2);
414 const arch_register_t *in_reg = arch_get_irn_register(cg->arch_env, in_node);
415 ir_node *block = get_nodes_block(irn);
416 ir_mode *ls_mode = get_ia32_ls_mode(irn);
420 if (arch_register_get_class(in_reg) == &ia32_reg_classes[CLASS_ia32_gp]) {
421 load = new_rd_ia32_Load(NULL, cg->irg, block, base, index, get_irn_n(irn, 4));
422 pnres = pn_ia32_Load_res;
424 else if (arch_register_get_class(in_reg) == &ia32_reg_classes[CLASS_ia32_xmm]) {
425 load = new_rd_ia32_xLoad(NULL, cg->irg, block, base, index, get_irn_n(irn, 4));
426 pnres = pn_ia32_xLoad_res;
429 assert(0 && "cannot turn back address mode for this register class");
432 /* copy address mode information to load */
433 set_ia32_ls_mode(load, ls_mode);
434 set_ia32_am_flavour(load, get_ia32_am_flavour(irn));
435 set_ia32_op_type(load, ia32_AddrModeS);
436 set_ia32_am_support(load, ia32_am_Source);
437 set_ia32_am_scale(load, get_ia32_am_scale(irn));
438 set_ia32_am_sc(load, get_ia32_am_sc(irn));
439 add_ia32_am_offs_int(load, get_ia32_am_offs_int(irn));
440 set_ia32_frame_ent(load, get_ia32_frame_ent(irn));
442 if (is_ia32_use_frame(irn))
443 set_ia32_use_frame(load);
445 /* insert the load into schedule */
446 sched_add_before(irn, load);
448 DBG((cg->mod, LEVEL_3, "irg %+F: build back AM source for node %+F, inserted load %+F\n", cg->irg, irn, load));
450 load = new_r_Proj(cg->irg, block, load, ls_mode, pnres);
451 arch_set_irn_register(cg->arch_env, load, out_reg);
453 /* insert the load result proj into schedule */
454 sched_add_before(irn, load);
456 /* set the new input operand */
457 set_irn_n(irn, 3, load);
459 /* this is a normal node now */
460 set_irn_n(irn, 0, noreg);
461 set_irn_n(irn, 1, noreg);
462 set_ia32_op_type(irn, ia32_Normal);
470 static void ia32_finish_irg_walker(ir_node *block, void *env) {
473 /* first: turn back AM source if necessary */
474 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
475 next = sched_next(irn);
476 fix_am_source(irn, env);
479 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
480 ia32_code_gen_t *cg = env;
482 next = sched_next(irn);
484 /* check if there is a sub which need to be transformed */
485 ia32_transform_sub_to_neg_add(irn, cg);
487 /* transform a LEA into an Add if possible */
488 ia32_transform_lea_to_add(irn, cg);
491 /* second: insert copies and finish irg */
492 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
493 next = sched_next(irn);
494 ia32_finish_node(irn, env);
498 static void ia32_push_on_queue_walker(ir_node *block, void *env) {
500 waitq_put(wq, block);
505 * Add Copy nodes for not fulfilled should_be_equal constraints
507 void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg) {
508 waitq *wq = new_waitq();
510 /* Push the blocks on the waitq because ia32_finish_irg_walker starts more walks ... */
511 irg_block_walk_graph(irg, NULL, ia32_push_on_queue_walker, wq);
513 while (! waitq_empty(wq)) {
514 ir_node *block = waitq_get(wq);
515 ia32_finish_irg_walker(block, cg);