2 * This file implements functions to finalize the irg for emit.
3 * @author Christian Wuerdig
13 #include "../bearch.h"
14 #include "../besched_t.h"
15 #include "../benode_t.h"
17 #include "bearch_ia32_t.h"
18 #include "ia32_finish.h"
19 #include "ia32_new_nodes.h"
20 #include "ia32_map_regs.h"
21 #include "ia32_transform.h"
22 #include "ia32_dbg_stat.h"
23 #include "ia32_optimize.h"
24 #include "gen_ia32_regalloc_if.h"
27 * Transforms a Sub or xSub into Neg--Add iff OUT_REG == SRC2_REG.
28 * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
30 static void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) {
31 ia32_transform_env_t tenv;
32 ir_node *in1, *in2, *noreg, *nomem, *res;
33 const arch_register_t *in1_reg, *in2_reg, *out_reg, **slots;
35 /* Return if AM node or not a Sub or xSub */
36 if (get_ia32_op_type(irn) != ia32_Normal || !(is_ia32_Sub(irn) || is_ia32_xSub(irn)))
39 noreg = ia32_new_NoReg_gp(cg);
40 nomem = new_rd_NoMem(cg->irg);
41 in1 = get_irn_n(irn, 2);
42 in2 = get_irn_n(irn, 3);
43 in1_reg = arch_get_irn_register(cg->arch_env, in1);
44 in2_reg = arch_get_irn_register(cg->arch_env, in2);
45 out_reg = get_ia32_out_reg(irn, 0);
47 tenv.block = get_nodes_block(irn);
48 tenv.dbg = get_irn_dbg_info(irn);
51 tenv.mode = get_ia32_res_mode(irn);
53 DEBUG_ONLY(tenv.mod = cg->mod;)
55 /* in case of sub and OUT == SRC2 we can transform the sequence into neg src2 -- add */
56 if (REGS_ARE_EQUAL(out_reg, in2_reg)) {
57 /* generate the neg src2 */
58 res = gen_Minus_ex(&tenv, in2);
59 arch_set_irn_register(cg->arch_env, res, in2_reg);
62 sched_add_before(irn, get_Proj_pred(res));
63 sched_add_before(irn, res);
65 /* generate the add */
66 if (mode_is_float(tenv.mode)) {
67 res = new_rd_ia32_xAdd(tenv.dbg, tenv.irg, tenv.block, noreg, noreg, res, in1, nomem);
68 set_ia32_am_support(res, ia32_am_Source);
71 res = new_rd_ia32_Add(tenv.dbg, tenv.irg, tenv.block, noreg, noreg, res, in1, nomem);
72 set_ia32_am_support(res, ia32_am_Full);
73 set_ia32_commutative(res);
75 set_ia32_res_mode(res, tenv.mode);
77 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(tenv.cg, irn));
79 slots = get_ia32_slots(res);
83 sched_add_before(irn, res);
85 /* remove the old sub */
88 DBG_OPT_SUB2NEGADD(irn, res);
90 /* exchange the add and the sub */
96 * Transforms a LEA into an Add if possible
97 * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
99 static void ia32_transform_lea_to_add(ir_node *irn, ia32_code_gen_t *cg) {
100 ia32_am_flavour_t am_flav;
103 ir_node *nomem, *noreg, *base, *index, *op1, *op2;
105 ia32_transform_env_t tenv;
106 const arch_register_t *out_reg, *base_reg, *index_reg;
109 if (! is_ia32_Lea(irn))
112 am_flav = get_ia32_am_flavour(irn);
114 if (get_ia32_am_sc(irn))
117 /* only some LEAs can be transformed to an Add */
118 if (am_flav != ia32_am_B && am_flav != ia32_am_OB && am_flav != ia32_am_OI && am_flav != ia32_am_BI)
121 noreg = ia32_new_NoReg_gp(cg);
122 nomem = new_rd_NoMem(cg->irg);
125 base = get_irn_n(irn, 0);
126 index = get_irn_n(irn,1);
128 offs = get_ia32_am_offs(irn);
130 /* offset has a explicit sign -> we need to skip + */
131 if (offs && offs[0] == '+')
134 out_reg = arch_get_irn_register(cg->arch_env, irn);
135 base_reg = arch_get_irn_register(cg->arch_env, base);
136 index_reg = arch_get_irn_register(cg->arch_env, index);
138 tenv.block = get_nodes_block(irn);
139 tenv.dbg = get_irn_dbg_info(irn);
142 DEBUG_ONLY(tenv.mod = cg->mod;)
143 tenv.mode = get_irn_mode(irn);
146 switch(get_ia32_am_flavour(irn)) {
148 /* out register must be same as base register */
149 if (! REGS_ARE_EQUAL(out_reg, base_reg))
155 /* out register must be same as base register */
156 if (! REGS_ARE_EQUAL(out_reg, base_reg))
163 /* out register must be same as index register */
164 if (! REGS_ARE_EQUAL(out_reg, index_reg))
171 /* out register must be same as one in register */
172 if (REGS_ARE_EQUAL(out_reg, base_reg)) {
176 else if (REGS_ARE_EQUAL(out_reg, index_reg)) {
181 /* in registers a different from out -> no Add possible */
188 res = new_rd_ia32_Add(tenv.dbg, tenv.irg, tenv.block, noreg, noreg, op1, op2, nomem);
189 arch_set_irn_register(cg->arch_env, res, out_reg);
190 set_ia32_op_type(res, ia32_Normal);
191 set_ia32_commutative(res);
192 set_ia32_res_mode(res, tenv.mode);
195 set_ia32_cnst(res, offs);
196 set_ia32_immop_type(res, ia32_ImmConst);
199 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn));
201 /* add Add to schedule */
202 sched_add_before(irn, res);
204 DBG_OPT_LEA2ADD(irn, res);
206 res = new_rd_Proj(tenv.dbg, tenv.irg, tenv.block, res, tenv.mode, pn_ia32_Add_res);
208 /* add result Proj to schedule */
209 sched_add_before(irn, res);
211 /* remove the old LEA */
214 /* exchange the Add and the LEA */
218 static INLINE int need_constraint_copy(ir_node *irn) {
220 ! is_ia32_Lea(irn) && \
221 ! is_ia32_Conv_I2I(irn) && \
222 ! is_ia32_Conv_I2I8Bit(irn) && \
223 ! is_ia32_CmpCMov(irn) && \
224 ! is_ia32_CmpSet(irn);
228 * Insert copies for all ia32 nodes where the should_be_same requirement
230 * Transform Sub into Neg -- Add if IN2 == OUT
232 static void ia32_finish_node(ir_node *irn, void *env) {
233 ia32_code_gen_t *cg = env;
234 const ia32_register_req_t **reqs;
235 const arch_register_t *out_reg, *in_reg, *in2_reg;
237 ir_node *copy, *in_node, *block, *in2_node;
238 ia32_op_type_t op_tp;
240 if (is_ia32_irn(irn)) {
241 /* AM Dest nodes don't produce any values */
242 op_tp = get_ia32_op_type(irn);
243 if (op_tp == ia32_AddrModeD)
246 reqs = get_ia32_out_req_all(irn);
247 n_res = get_ia32_n_res(irn);
248 block = get_nodes_block(irn);
250 /* check all OUT requirements, if there is a should_be_same */
251 if ((op_tp == ia32_Normal || op_tp == ia32_AddrModeS) && need_constraint_copy(irn))
253 for (i = 0; i < n_res; i++) {
254 if (arch_register_req_is(&(reqs[i]->req), should_be_same)) {
255 /* get in and out register */
256 out_reg = get_ia32_out_reg(irn, i);
257 in_node = get_irn_n(irn, reqs[i]->same_pos);
258 in_reg = arch_get_irn_register(cg->arch_env, in_node);
260 /* don't copy ignore nodes */
261 if (arch_irn_is(cg->arch_env, in_node, ignore) && is_Proj(in_node))
264 /* check if in and out register are equal */
265 if (! REGS_ARE_EQUAL(out_reg, in_reg)) {
266 /* in case of a commutative op: just exchange the in's */
267 /* beware: the current op could be everything, so test for ia32 */
268 /* commutativity first before getting the second in */
269 if (is_ia32_commutative(irn)) {
270 in2_node = get_irn_n(irn, reqs[i]->same_pos ^ 1);
271 in2_reg = arch_get_irn_register(cg->arch_env, in2_node);
273 if (REGS_ARE_EQUAL(out_reg, in2_reg)) {
274 set_irn_n(irn, reqs[i]->same_pos, in2_node);
275 set_irn_n(irn, reqs[i]->same_pos ^ 1, in_node);
282 DBG((cg->mod, LEVEL_1, "inserting copy for %+F in_pos %d\n", irn, reqs[i]->same_pos));
283 /* create copy from in register */
284 copy = be_new_Copy(arch_register_get_class(in_reg), cg->irg, block, in_node);
286 DBG_OPT_2ADDRCPY(copy);
288 /* destination is the out register */
289 arch_set_irn_register(cg->arch_env, copy, out_reg);
291 /* insert copy before the node into the schedule */
292 sched_add_before(irn, copy);
295 set_irn_n(irn, reqs[i]->same_pos, copy);
302 /* check xCmp: try to avoid unordered cmp */
303 if ((is_ia32_xCmp(irn) || is_ia32_xCmpCMov(irn) || is_ia32_xCmpSet(irn)) &&
304 op_tp == ia32_Normal &&
305 ! is_ia32_ImmConst(irn) && ! is_ia32_ImmSymConst(irn))
307 long pnc = get_ia32_pncode(irn);
309 if (pnc & pn_Cmp_Uo) {
311 int idx1 = 2, idx2 = 3;
313 if (is_ia32_xCmpCMov(irn)) {
318 tmp = get_irn_n(irn, idx1);
319 set_irn_n(irn, idx1, get_irn_n(irn, idx2));
320 set_irn_n(irn, idx2, tmp);
322 set_ia32_pncode(irn, get_negated_pnc(pnc, mode_D));
327 If we have a CondJmp/CmpSet/xCmpSet with immediate,
328 we need to check if it's the right operand, otherwise
329 we have to change it, as CMP doesn't support immediate
332 if ((is_ia32_CondJmp(irn) || is_ia32_CmpSet(irn) || is_ia32_xCmpSet(irn)) &&
333 (is_ia32_ImmConst(irn) || is_ia32_ImmSymConst(irn)) &&
334 op_tp == ia32_AddrModeS)
336 set_ia32_op_type(irn, ia32_AddrModeD);
337 set_ia32_pncode(irn, get_inversed_pnc(get_ia32_pncode(irn)));
345 * We have a source address mode node with base or index register equal to
346 * result register. The constraint handler will insert a copy from the
347 * remaining input operand to the result register -> base or index is
349 * Solution: Turn back this address mode into explicit Load + Operation.
351 static void fix_am_source(ir_node *irn, void *env) {
352 ia32_code_gen_t *cg = env;
353 ir_node *base, *index, *noreg;
354 const arch_register_t *reg_base, *reg_index;
355 const ia32_register_req_t **reqs;
358 /* check only ia32 nodes with source address mode */
359 if (! is_ia32_irn(irn) || get_ia32_op_type(irn) != ia32_AddrModeS)
362 base = get_irn_n(irn, 0);
363 index = get_irn_n(irn, 1);
365 reg_base = arch_get_irn_register(cg->arch_env, base);
366 reg_index = arch_get_irn_register(cg->arch_env, index);
367 reqs = get_ia32_out_req_all(irn);
369 noreg = ia32_new_NoReg_gp(cg);
371 n_res = get_ia32_n_res(irn);
373 for (i = 0; i < n_res; i++) {
374 if (arch_register_req_is(&(reqs[i]->req), should_be_same)) {
375 /* get in and out register */
376 const arch_register_t *out_reg = get_ia32_out_reg(irn, i);
379 there is a constraint for the remaining operand
380 and the result register is equal to base or index register
382 if (reqs[i]->same_pos == 2 &&
383 (REGS_ARE_EQUAL(out_reg, reg_base) || REGS_ARE_EQUAL(out_reg, reg_index)))
385 /* turn back address mode */
386 ir_node *in_node = get_irn_n(irn, 2);
387 const arch_register_t *in_reg = arch_get_irn_register(cg->arch_env, in_node);
388 ir_node *block = get_nodes_block(irn);
389 ir_mode *ls_mode = get_ia32_ls_mode(irn);
393 if (arch_register_get_class(in_reg) == &ia32_reg_classes[CLASS_ia32_gp]) {
394 load = new_rd_ia32_Load(NULL, cg->irg, block, base, index, get_irn_n(irn, 4));
395 pnres = pn_ia32_Load_res;
397 else if (arch_register_get_class(in_reg) == &ia32_reg_classes[CLASS_ia32_xmm]) {
398 load = new_rd_ia32_xLoad(NULL, cg->irg, block, base, index, get_irn_n(irn, 4));
399 pnres = pn_ia32_xLoad_res;
402 assert(0 && "cannot turn back address mode for this register class");
405 /* copy address mode information to load */
406 set_ia32_ls_mode(load, ls_mode);
407 set_ia32_am_flavour(load, get_ia32_am_flavour(irn));
408 set_ia32_op_type(load, ia32_AddrModeS);
409 set_ia32_am_support(load, ia32_am_Source);
410 set_ia32_am_scale(load, get_ia32_am_scale(irn));
411 set_ia32_am_sc(load, get_ia32_am_sc(irn));
412 add_ia32_am_offs(load, get_ia32_am_offs(irn));
413 set_ia32_frame_ent(load, get_ia32_frame_ent(irn));
415 if (is_ia32_use_frame(irn))
416 set_ia32_use_frame(load);
418 /* insert the load into schedule */
419 sched_add_before(irn, load);
421 DBG((cg->mod, LEVEL_3, "irg %+F: build back AM source for node %+F, inserted load %+F\n", cg->irg, irn, load));
423 load = new_r_Proj(cg->irg, block, load, ls_mode, pnres);
424 arch_set_irn_register(cg->arch_env, load, out_reg);
426 /* insert the load result proj into schedule */
427 sched_add_before(irn, load);
429 /* set the new input operand */
430 set_irn_n(irn, 3, load);
432 /* this is a normal node now */
433 set_irn_n(irn, 0, noreg);
434 set_irn_n(irn, 1, noreg);
435 set_ia32_op_type(irn, ia32_Normal);
443 static void ia32_finish_irg_walker(ir_node *block, void *env) {
446 /* first: turn back AM source if necessary */
447 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
448 next = sched_next(irn);
449 fix_am_source(irn, env);
452 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
453 ia32_code_gen_t *cg = env;
454 next = sched_next(irn);
456 if (is_ia32_irn(irn)) {
457 /* check if there is a sub which need to be transformed */
458 ia32_transform_sub_to_neg_add(irn, cg);
460 /* transform a LEA into an Add if possible */
461 ia32_transform_lea_to_add(irn, cg);
463 /* check for peephole optimization */
464 ia32_peephole_optimization(irn, cg);
468 /* second: insert copies and finish irg */
469 for (irn = sched_first(block); ! sched_is_end(irn); irn = next) {
470 next = sched_next(irn);
471 ia32_finish_node(irn, env);
475 static void ia32_push_on_queue_walker(ir_node *block, void *env) {
477 waitq_put(wq, block);
482 * Add Copy nodes for not fulfilled should_be_equal constraints
484 void ia32_finish_irg(ir_graph *irg, ia32_code_gen_t *cg) {
485 waitq *wq = new_waitq();
487 /* Push the blocks on the waitq because ia32_finish_irg_walker starts more walks ... */
488 irg_block_walk_graph(irg, NULL, ia32_push_on_queue_walker, wq);
490 while (! waitq_empty(wq)) {
491 ir_node *block = waitq_get(wq);
492 ia32_finish_irg_walker(block, cg);