2 * This file implements the IR transformation from firm into
14 #include "irgraph_t.h"
19 #include "iredges_t.h"
26 #include "../benode_t.h"
27 #include "../besched.h"
29 #include "bearch_ia32_t.h"
31 #include "ia32_nodes_attr.h"
32 #include "../arch/archop.h" /* we need this for Min and Max nodes */
33 #include "ia32_transform.h"
34 #include "ia32_new_nodes.h"
35 #include "ia32_map_regs.h"
37 #include "gen_ia32_regalloc_if.h"
39 #define SFP_SIGN "0x80000000"
40 #define DFP_SIGN "0x8000000000000000"
41 #define SFP_ABS "0x7FFFFFFF"
42 #define DFP_ABS "0x7FFFFFFFFFFFFFFF"
44 #define TP_SFP_SIGN "ia32_sfp_sign"
45 #define TP_DFP_SIGN "ia32_dfp_sign"
46 #define TP_SFP_ABS "ia32_sfp_abs"
47 #define TP_DFP_ABS "ia32_dfp_abs"
49 #define ENT_SFP_SIGN "IA32_SFP_SIGN"
50 #define ENT_DFP_SIGN "IA32_DFP_SIGN"
51 #define ENT_SFP_ABS "IA32_SFP_ABS"
52 #define ENT_DFP_ABS "IA32_DFP_ABS"
54 extern ir_op *get_op_Mulh(void);
56 typedef ir_node *construct_binop_func(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, \
57 ir_node *op1, ir_node *op2, ir_node *mem, ir_mode *mode);
59 typedef ir_node *construct_unop_func(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, \
60 ir_node *op, ir_node *mem, ir_mode *mode);
63 ia32_SSIGN, ia32_DSIGN, ia32_SABS, ia32_DABS, ia32_known_const_max
66 /****************************************************************************************************
68 * | | | | / _| | | (_)
69 * _ __ ___ __| | ___ | |_ _ __ __ _ _ __ ___| |_ ___ _ __ _ __ ___ __ _| |_ _ ___ _ __
70 * | '_ \ / _ \ / _` |/ _ \ | __| '__/ _` | '_ \/ __| _/ _ \| '__| '_ ` _ \ / _` | __| |/ _ \| '_ \
71 * | | | | (_) | (_| | __/ | |_| | | (_| | | | \__ \ || (_) | | | | | | | | (_| | |_| | (_) | | | |
72 * |_| |_|\___/ \__,_|\___| \__|_| \__,_|_| |_|___/_| \___/|_| |_| |_| |_|\__,_|\__|_|\___/|_| |_|
74 ****************************************************************************************************/
77 * Gets the Proj with number pn from irn.
79 static ir_node *get_proj_for_pn(const ir_node *irn, long pn) {
80 const ir_edge_t *edge;
82 assert(get_irn_mode(irn) == mode_T && "need mode_T");
84 foreach_out_edge(irn, edge) {
85 proj = get_edge_src_irn(edge);
87 if (get_Proj_proj(proj) == pn)
94 /* Generates an entity for a known FP const (used for FP Neg + Abs) */
95 static ident *gen_fp_known_const(ir_mode *mode, ia32_known_const_t kct) {
100 } names [ia32_known_const_max] = {
101 { TP_SFP_SIGN, ENT_SFP_SIGN, SFP_SIGN }, /* ia32_SSIGN */
102 { TP_DFP_SIGN, ENT_DFP_SIGN, DFP_SIGN }, /* ia32_DSIGN */
103 { TP_SFP_ABS, ENT_SFP_ABS, SFP_ABS }, /* ia32_SABS */
104 { TP_DFP_ABS, ENT_DFP_ABS, DFP_ABS } /* ia32_DABS */
106 static struct entity *ent_cache[ia32_known_const_max];
108 const char *tp_name, *ent_name, *cnst_str;
115 ent_name = names[kct].ent_name;
116 if (! ent_cache[kct]) {
117 tp_name = names[kct].tp_name;
118 cnst_str = names[kct].cnst_str;
120 tv = new_tarval_from_str(cnst_str, strlen(cnst_str), mode);
121 tp = new_type_primitive(new_id_from_str(tp_name), mode);
122 ent = new_entity(get_glob_type(), new_id_from_str(ent_name), tp);
124 set_entity_ld_ident(ent, get_entity_ident(ent));
125 set_entity_visibility(ent, visibility_local);
126 set_entity_variability(ent, variability_constant);
127 set_entity_allocation(ent, allocation_static);
129 /* we create a new entity here: It's initialization must resist on the
131 rem = current_ir_graph;
132 current_ir_graph = get_const_code_irg();
133 cnst = new_Const(mode, tv);
134 current_ir_graph = rem;
136 set_atomic_ent_value(ent, cnst);
138 /* cache the entry */
139 ent_cache[kct] = ent;
142 return get_entity_ident(ent_cache[kct]);
147 * Prints the old node name on cg obst and returns a pointer to it.
149 const char *ia32_get_old_node_name(ia32_transform_env_t *env) {
150 ia32_isa_t *isa = (ia32_isa_t *)env->cg->arch_env->isa;
152 lc_eoprintf(firm_get_arg_env(), isa->name_obst, "%+F", env->irn);
153 obstack_1grow(isa->name_obst, 0);
154 isa->name_obst_size += obstack_object_size(isa->name_obst);
155 return obstack_finish(isa->name_obst);
159 /* determine if one operator is an Imm */
160 static ir_node *get_immediate_op(ir_node *op1, ir_node *op2) {
162 return is_ia32_Cnst(op1) ? op1 : (is_ia32_Cnst(op2) ? op2 : NULL);
163 else return is_ia32_Cnst(op2) ? op2 : NULL;
166 /* determine if one operator is not an Imm */
167 static ir_node *get_expr_op(ir_node *op1, ir_node *op2) {
168 return !is_ia32_Cnst(op1) ? op1 : (!is_ia32_Cnst(op2) ? op2 : NULL);
173 * Construct a standard binary operation, set AM and immediate if required.
175 * @param env The transformation environment
176 * @param op1 The first operand
177 * @param op2 The second operand
178 * @param func The node constructor function
179 * @return The constructed ia32 node.
181 static ir_node *gen_binop(ia32_transform_env_t *env, ir_node *op1, ir_node *op2, construct_binop_func *func) {
182 ir_node *new_op = NULL;
183 ir_mode *mode = env->mode;
184 dbg_info *dbg = env->dbg;
185 ir_graph *irg = env->irg;
186 ir_node *block = env->block;
187 ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg);
188 ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg);
189 ir_node *nomem = new_NoMem();
190 ir_node *expr_op, *imm_op;
191 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
193 /* Check if immediate optimization is on and */
194 /* if it's an operation with immediate. */
195 if (! env->cg->opt.immops) {
199 else if (is_op_commutative(get_irn_op(env->irn))) {
200 imm_op = get_immediate_op(op1, op2);
201 expr_op = get_expr_op(op1, op2);
204 imm_op = get_immediate_op(NULL, op2);
205 expr_op = get_expr_op(op1, op2);
208 assert((expr_op || imm_op) && "invalid operands");
211 /* We have two consts here: not yet supported */
215 if (mode_is_float(mode)) {
216 /* floating point operations */
218 DB((mod, LEVEL_1, "FP with immediate ..."));
219 new_op = func(dbg, irg, block, noreg_gp, noreg_gp, expr_op, noreg_fp, nomem, mode_T);
220 set_ia32_Immop_attr(new_op, imm_op);
221 set_ia32_am_support(new_op, ia32_am_None);
224 DB((mod, LEVEL_1, "FP binop ..."));
225 new_op = func(dbg, irg, block, noreg_gp, noreg_gp, op1, op2, nomem, mode_T);
226 set_ia32_am_support(new_op, ia32_am_Source);
230 /* integer operations */
232 /* This is expr + const */
233 DB((mod, LEVEL_1, "INT with immediate ..."));
234 new_op = func(dbg, irg, block, noreg_gp, noreg_gp, expr_op, noreg_gp, nomem, mode_T);
235 set_ia32_Immop_attr(new_op, imm_op);
238 set_ia32_am_support(new_op, ia32_am_Dest);
241 DB((mod, LEVEL_1, "INT binop ..."));
242 /* This is a normal operation */
243 new_op = func(dbg, irg, block, noreg_gp, noreg_gp, op1, op2, nomem, mode_T);
246 set_ia32_am_support(new_op, ia32_am_Full);
250 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
252 set_ia32_res_mode(new_op, mode);
254 if (is_op_commutative(get_irn_op(env->irn))) {
255 set_ia32_commutative(new_op);
258 return new_rd_Proj(dbg, irg, block, new_op, mode, 0);
264 * Construct a shift/rotate binary operation, sets AM and immediate if required.
266 * @param env The transformation environment
267 * @param op1 The first operand
268 * @param op2 The second operand
269 * @param func The node constructor function
270 * @return The constructed ia32 node.
272 static ir_node *gen_shift_binop(ia32_transform_env_t *env, ir_node *op1, ir_node *op2, construct_binop_func *func) {
273 ir_node *new_op = NULL;
274 ir_mode *mode = env->mode;
275 dbg_info *dbg = env->dbg;
276 ir_graph *irg = env->irg;
277 ir_node *block = env->block;
278 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
279 ir_node *nomem = new_NoMem();
280 ir_node *expr_op, *imm_op;
282 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
284 assert(! mode_is_float(mode) && "Shift/Rotate with float not supported");
286 /* Check if immediate optimization is on and */
287 /* if it's an operation with immediate. */
288 imm_op = env->cg->opt.immops ? get_immediate_op(NULL, op2) : NULL;
289 expr_op = get_expr_op(op1, op2);
291 assert((expr_op || imm_op) && "invalid operands");
294 /* We have two consts here: not yet supported */
298 /* Limit imm_op within range imm8 */
300 tv = get_ia32_Immop_tarval(imm_op);
303 tv = tarval_mod(tv, new_tarval_from_long(32, mode_Iu));
310 /* integer operations */
312 /* This is shift/rot with const */
313 DB((mod, LEVEL_1, "Shift/Rot with immediate ..."));
315 new_op = func(dbg, irg, block, noreg, noreg, expr_op, noreg, nomem, mode_T);
316 set_ia32_Immop_attr(new_op, imm_op);
319 /* This is a normal shift/rot */
320 DB((mod, LEVEL_1, "Shift/Rot binop ..."));
321 new_op = func(dbg, irg, block, noreg, noreg, op1, op2, nomem, mode_T);
325 set_ia32_am_support(new_op, ia32_am_Dest);
327 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
329 set_ia32_res_mode(new_op, mode);
330 set_ia32_emit_cl(new_op);
332 return new_rd_Proj(dbg, irg, block, new_op, mode, 0);
337 * Construct a standard unary operation, set AM and immediate if required.
339 * @param env The transformation environment
340 * @param op The operand
341 * @param func The node constructor function
342 * @return The constructed ia32 node.
344 static ir_node *gen_unop(ia32_transform_env_t *env, ir_node *op, construct_unop_func *func) {
345 ir_node *new_op = NULL;
346 ir_mode *mode = env->mode;
347 dbg_info *dbg = env->dbg;
348 ir_graph *irg = env->irg;
349 ir_node *block = env->block;
350 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
351 ir_node *nomem = new_NoMem();
352 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
354 new_op = func(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
356 if (mode_is_float(mode)) {
357 DB((mod, LEVEL_1, "FP unop ..."));
358 /* floating point operations don't support implicit store */
359 set_ia32_am_support(new_op, ia32_am_None);
362 DB((mod, LEVEL_1, "INT unop ..."));
363 set_ia32_am_support(new_op, ia32_am_Dest);
366 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
368 set_ia32_res_mode(new_op, mode);
370 return new_rd_Proj(dbg, irg, block, new_op, mode, 0);
376 * Creates an ia32 Add with immediate.
378 * @param env The transformation environment
379 * @param expr_op The expression operator
380 * @param const_op The constant
381 * @return the created ia32 Add node
383 static ir_node *gen_imm_Add(ia32_transform_env_t *env, ir_node *expr_op, ir_node *const_op) {
384 ir_node *new_op = NULL;
385 tarval *tv = get_ia32_Immop_tarval(const_op);
386 dbg_info *dbg = env->dbg;
387 ir_graph *irg = env->irg;
388 ir_node *block = env->block;
389 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
390 ir_node *nomem = new_NoMem();
392 tarval_classification_t class_tv, class_negtv;
393 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
395 /* try to optimize to inc/dec */
396 if (env->cg->opt.incdec && tv) {
397 /* optimize tarvals */
398 class_tv = classify_tarval(tv);
399 class_negtv = classify_tarval(tarval_neg(tv));
401 if (class_tv == TV_CLASSIFY_ONE) { /* + 1 == INC */
402 DB((env->mod, LEVEL_2, "Add(1) to Inc ... "));
403 new_op = new_rd_ia32_Inc(dbg, irg, block, noreg, noreg, expr_op, nomem, mode_T);
406 else if (class_tv == TV_CLASSIFY_ALL_ONE || class_negtv == TV_CLASSIFY_ONE) { /* + (-1) == DEC */
407 DB((mod, LEVEL_2, "Add(-1) to Dec ... "));
408 new_op = new_rd_ia32_Dec(dbg, irg, block, noreg, noreg, expr_op, nomem, mode_T);
414 new_op = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, expr_op, noreg, nomem, mode_T);
415 set_ia32_Immop_attr(new_op, const_op);
422 * Creates an ia32 Add.
424 * @param dbg firm node dbg
425 * @param block the block the new node should belong to
426 * @param op1 first operator
427 * @param op2 second operator
428 * @param mode node mode
429 * @return the created ia32 Add node
431 static ir_node *gen_Add(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
432 ir_node *new_op = NULL;
433 dbg_info *dbg = env->dbg;
434 ir_mode *mode = env->mode;
435 ir_graph *irg = env->irg;
436 ir_node *block = env->block;
437 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
438 ir_node *nomem = new_NoMem();
439 ir_node *expr_op, *imm_op;
441 /* Check if immediate optimization is on and */
442 /* if it's an operation with immediate. */
443 imm_op = env->cg->opt.immops ? get_immediate_op(op1, op2) : NULL;
444 expr_op = get_expr_op(op1, op2);
446 assert((expr_op || imm_op) && "invalid operands");
448 if (mode_is_float(mode)) {
449 if (USE_SSE2(env->cg))
450 return gen_binop(env, op1, op2, new_rd_ia32_fAdd);
452 env->cg->used_x87 = 1;
453 return gen_binop(env, op1, op2, new_rd_ia32_vfadd);
459 /* No expr_op means, that we have two const - one symconst and */
460 /* one tarval or another symconst - because this case is not */
461 /* covered by constant folding */
462 /* We need to check for: */
463 /* 1) symconst + const -> becomes a LEA */
464 /* 2) symconst + symconst -> becomes a const + LEA as the elf */
465 /* linker doesn't support two symconsts */
467 if (get_ia32_op_type(op1) == ia32_SymConst && get_ia32_op_type(op2) == ia32_SymConst) {
468 /* this is the 2nd case */
469 new_op = new_rd_ia32_Lea(dbg, irg, block, op1, noreg, mode);
470 set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
471 set_ia32_am_flavour(new_op, ia32_am_OB);
474 /* this is the 1st case */
475 new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode);
477 if (get_ia32_op_type(op1) == ia32_SymConst) {
478 set_ia32_am_sc(new_op, get_ia32_id_cnst(op1));
479 add_ia32_am_offs(new_op, get_ia32_cnst(op2));
482 add_ia32_am_offs(new_op, get_ia32_cnst(op1));
483 set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
485 set_ia32_am_flavour(new_op, ia32_am_O);
489 set_ia32_am_support(new_op, ia32_am_Source);
490 set_ia32_op_type(new_op, ia32_AddrModeS);
492 /* Lea doesn't need a Proj */
496 /* This is expr + const */
497 new_op = gen_imm_Add(env, expr_op, imm_op);
500 set_ia32_am_support(new_op, ia32_am_Dest);
503 /* This is a normal add */
504 new_op = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, op1, op2, nomem, mode_T);
507 set_ia32_am_support(new_op, ia32_am_Full);
511 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
513 set_ia32_res_mode(new_op, mode);
515 return new_rd_Proj(dbg, irg, block, new_op, mode, 0);
521 * Creates an ia32 Mul.
523 * @param dbg firm node dbg
524 * @param block the block the new node should belong to
525 * @param op1 first operator
526 * @param op2 second operator
527 * @param mode node mode
528 * @return the created ia32 Mul node
530 static ir_node *gen_Mul(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
533 if (mode_is_float(env->mode)) {
534 if (USE_SSE2(env->cg))
535 new_op = gen_binop(env, op1, op2, new_rd_ia32_fMul);
537 env->cg->used_x87 = 1;
538 new_op = gen_binop(env, op1, op2, new_rd_ia32_vfmul);
542 new_op = gen_binop(env, op1, op2, new_rd_ia32_Mul);
551 * Creates an ia32 Mulh.
552 * Note: Mul produces a 64Bit result and Mulh returns the upper 32 bit of
553 * this result while Mul returns the lower 32 bit.
555 * @param env The transformation environment
556 * @param op1 The first operator
557 * @param op2 The second operator
558 * @return the created ia32 Mulh node
560 static ir_node *gen_Mulh(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
561 ir_node *proj_EAX, *proj_EDX, *mulh;
564 assert(!mode_is_float(env->mode) && "Mulh with float not supported");
565 proj_EAX = gen_binop(env, op1, op2, new_rd_ia32_Mulh);
566 mulh = get_Proj_pred(proj_EAX);
567 proj_EDX = new_rd_Proj(env->dbg, env->irg, env->block, mulh, env->mode, pn_EDX);
569 /* to be on the save side */
570 set_Proj_proj(proj_EAX, pn_EAX);
572 if (is_ia32_ImmConst(mulh) || is_ia32_ImmSymConst(mulh)) {
573 /* Mulh with const cannot have AM */
574 set_ia32_am_support(mulh, ia32_am_None);
577 /* Mulh cannot have AM for destination */
578 set_ia32_am_support(mulh, ia32_am_Source);
584 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], env->irg, env->block, 1, in);
592 * Creates an ia32 And.
594 * @param env The transformation environment
595 * @param op1 The first operator
596 * @param op2 The second operator
597 * @return The created ia32 And node
599 static ir_node *gen_And(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
600 assert (! mode_is_float(env->mode));
601 return gen_binop(env, op1, op2, new_rd_ia32_And);
607 * Creates an ia32 Or.
609 * @param env The transformation environment
610 * @param op1 The first operator
611 * @param op2 The second operator
612 * @return The created ia32 Or node
614 static ir_node *gen_Or(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
615 assert (! mode_is_float(env->mode));
616 return gen_binop(env, op1, op2, new_rd_ia32_Or);
622 * Creates an ia32 Eor.
624 * @param env The transformation environment
625 * @param op1 The first operator
626 * @param op2 The second operator
627 * @return The created ia32 Eor node
629 static ir_node *gen_Eor(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
630 assert(! mode_is_float(env->mode));
631 return gen_binop(env, op1, op2, new_rd_ia32_Eor);
637 * Creates an ia32 Max.
639 * @param env The transformation environment
640 * @param op1 The first operator
641 * @param op2 The second operator
642 * @return the created ia32 Max node
644 static ir_node *gen_Max(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
647 if (mode_is_float(env->mode)) {
648 if (USE_SSE2(env->cg))
649 new_op = gen_binop(env, op1, op2, new_rd_ia32_fMax);
651 env->cg->used_x87 = 1;
656 new_op = new_rd_ia32_Max(env->dbg, env->irg, env->block, op1, op2, env->mode);
657 set_ia32_am_support(new_op, ia32_am_None);
658 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
667 * Creates an ia32 Min.
669 * @param env The transformation environment
670 * @param op1 The first operator
671 * @param op2 The second operator
672 * @return the created ia32 Min node
674 static ir_node *gen_Min(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
677 if (mode_is_float(env->mode)) {
678 if (USE_SSE2(env->cg))
679 new_op = gen_binop(env, op1, op2, new_rd_ia32_fMin);
681 env->cg->used_x87 = 1;
686 new_op = new_rd_ia32_Min(env->dbg, env->irg, env->block, op1, op2, env->mode);
687 set_ia32_am_support(new_op, ia32_am_None);
688 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
697 * Creates an ia32 Sub with immediate.
699 * @param env The transformation environment
700 * @param op1 The first operator
701 * @param op2 The second operator
702 * @return The created ia32 Sub node
704 static ir_node *gen_imm_Sub(ia32_transform_env_t *env, ir_node *expr_op, ir_node *const_op) {
705 ir_node *new_op = NULL;
706 tarval *tv = get_ia32_Immop_tarval(const_op);
707 dbg_info *dbg = env->dbg;
708 ir_graph *irg = env->irg;
709 ir_node *block = env->block;
710 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
711 ir_node *nomem = new_NoMem();
713 tarval_classification_t class_tv, class_negtv;
714 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
716 /* try to optimize to inc/dec */
717 if (env->cg->opt.incdec && tv) {
718 /* optimize tarvals */
719 class_tv = classify_tarval(tv);
720 class_negtv = classify_tarval(tarval_neg(tv));
722 if (class_tv == TV_CLASSIFY_ONE) { /* - 1 == DEC */
723 DB((mod, LEVEL_2, "Sub(1) to Dec ... "));
724 new_op = new_rd_ia32_Dec(dbg, irg, block, noreg, noreg, expr_op, nomem, mode_T);
727 else if (class_negtv == TV_CLASSIFY_ONE) { /* - (-1) == Sub */
728 DB((mod, LEVEL_2, "Sub(-1) to Inc ... "));
729 new_op = new_rd_ia32_Inc(dbg, irg, block, noreg, noreg, expr_op, nomem, mode_T);
735 new_op = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, expr_op, noreg, nomem, mode_T);
736 set_ia32_Immop_attr(new_op, const_op);
743 * Creates an ia32 Sub.
745 * @param env The transformation environment
746 * @param op1 The first operator
747 * @param op2 The second operator
748 * @return The created ia32 Sub node
750 static ir_node *gen_Sub(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
751 ir_node *new_op = NULL;
752 dbg_info *dbg = env->dbg;
753 ir_mode *mode = env->mode;
754 ir_graph *irg = env->irg;
755 ir_node *block = env->block;
756 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
757 ir_node *nomem = new_NoMem();
758 ir_node *expr_op, *imm_op;
760 /* Check if immediate optimization is on and */
761 /* if it's an operation with immediate. */
762 imm_op = env->cg->opt.immops ? get_immediate_op(NULL, op2) : NULL;
763 expr_op = get_expr_op(op1, op2);
765 assert((expr_op || imm_op) && "invalid operands");
767 if (mode_is_float(mode)) {
768 if (USE_SSE2(env->cg))
769 return gen_binop(env, op1, op2, new_rd_ia32_fSub);
771 env->cg->used_x87 = 1;
772 return gen_binop(env, op1, op2, new_rd_ia32_vfsub);
778 /* No expr_op means, that we have two const - one symconst and */
779 /* one tarval or another symconst - because this case is not */
780 /* covered by constant folding */
781 /* We need to check for: */
782 /* 1) symconst + const -> becomes a LEA */
783 /* 2) symconst + symconst -> becomes a const + LEA as the elf */
784 /* linker doesn't support two symconsts */
786 if (get_ia32_op_type(op1) == ia32_SymConst && get_ia32_op_type(op2) == ia32_SymConst) {
787 /* this is the 2nd case */
788 new_op = new_rd_ia32_Lea(dbg, irg, block, op1, noreg, mode);
789 set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
790 set_ia32_am_sc_sign(new_op);
791 set_ia32_am_flavour(new_op, ia32_am_OB);
794 /* this is the 1st case */
795 new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode);
797 if (get_ia32_op_type(op1) == ia32_SymConst) {
798 set_ia32_am_sc(new_op, get_ia32_id_cnst(op1));
799 sub_ia32_am_offs(new_op, get_ia32_cnst(op2));
802 add_ia32_am_offs(new_op, get_ia32_cnst(op1));
803 set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
804 set_ia32_am_sc_sign(new_op);
806 set_ia32_am_flavour(new_op, ia32_am_O);
810 set_ia32_am_support(new_op, ia32_am_Source);
811 set_ia32_op_type(new_op, ia32_AddrModeS);
813 /* Lea doesn't need a Proj */
817 /* This is expr - const */
818 new_op = gen_imm_Sub(env, expr_op, imm_op);
821 set_ia32_am_support(new_op, ia32_am_Dest);
824 /* This is a normal sub */
825 new_op = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, op1, op2, nomem, mode_T);
828 set_ia32_am_support(new_op, ia32_am_Full);
832 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
834 set_ia32_res_mode(new_op, mode);
836 return new_rd_Proj(dbg, irg, block, new_op, mode, 0);
842 * Generates an ia32 DivMod with additional infrastructure for the
843 * register allocator if needed.
845 * @param env The transformation environment
846 * @param dividend -no comment- :)
847 * @param divisor -no comment- :)
848 * @param dm_flav flavour_Div/Mod/DivMod
849 * @return The created ia32 DivMod node
851 static ir_node *generate_DivMod(ia32_transform_env_t *env, ir_node *dividend, ir_node *divisor, ia32_op_flavour_t dm_flav) {
853 ir_node *edx_node, *cltd;
855 dbg_info *dbg = env->dbg;
856 ir_graph *irg = env->irg;
857 ir_node *block = env->block;
858 ir_mode *mode = env->mode;
859 ir_node *irn = env->irn;
864 mem = get_Div_mem(irn);
865 mode = get_irn_mode(get_proj_for_pn(irn, pn_Div_res));
868 mem = get_Mod_mem(irn);
869 mode = get_irn_mode(get_proj_for_pn(irn, pn_Mod_res));
872 mem = get_DivMod_mem(irn);
873 mode = get_irn_mode(get_proj_for_pn(irn, pn_DivMod_res_div));
879 if (mode_is_signed(mode)) {
880 /* in signed mode, we need to sign extend the dividend */
881 cltd = new_rd_ia32_Cdq(dbg, irg, block, dividend, mode_T);
882 dividend = new_rd_Proj(dbg, irg, block, cltd, mode_Is, pn_EAX);
883 edx_node = new_rd_Proj(dbg, irg, block, cltd, mode_Is, pn_EDX);
886 edx_node = new_rd_ia32_Const(dbg, irg, block, mode_Iu);
887 set_ia32_Const_type(edx_node, ia32_Const);
888 set_ia32_Immop_tarval(edx_node, get_tarval_null(mode_Iu));
891 res = new_rd_ia32_DivMod(dbg, irg, block, dividend, divisor, edx_node, mem, mode_T);
893 set_ia32_flavour(res, dm_flav);
894 set_ia32_n_res(res, 2);
896 /* Only one proj is used -> We must add a second proj and */
897 /* connect this one to a Keep node to eat up the second */
898 /* destroyed register. */
899 if (get_irn_n_edges(irn) == 1) {
900 proj = get_edge_src_irn(get_irn_out_edge_first(irn));
901 assert(is_Proj(proj) && "non-Proj to Div/Mod node");
903 if (get_Proj_proj(proj) == pn_DivMod_res_div) {
904 in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode_Is, pn_DivMod_res_mod);
907 in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode_Is, pn_DivMod_res_div);
910 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep);
913 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
915 set_ia32_res_mode(res, mode_Is);
922 * Wrapper for generate_DivMod. Sets flavour_Mod.
924 static ir_node *gen_Mod(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
925 return generate_DivMod(env, op1, op2, flavour_Mod);
931 * Wrapper for generate_DivMod. Sets flavour_Div.
933 static ir_node *gen_Div(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
934 return generate_DivMod(env, op1, op2, flavour_Div);
940 * Wrapper for generate_DivMod. Sets flavour_DivMod.
942 static ir_node *gen_DivMod(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
943 return generate_DivMod(env, op1, op2, flavour_DivMod);
949 * Creates an ia32 floating Div.
951 * @param env The transformation environment
952 * @param op1 The first operator
953 * @param op2 The second operator
954 * @return The created ia32 fDiv node
956 static ir_node *gen_Quot(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
957 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
959 ir_node *nomem = new_rd_NoMem(env->irg);
961 if (USE_SSE2(env->cg)) {
963 if (is_ia32_fConst(op2)) {
964 new_op = new_rd_ia32_fDiv(env->dbg, env->irg, env->block, noreg, noreg, op1, noreg, nomem, mode_T);
965 set_ia32_am_support(new_op, ia32_am_None);
966 set_ia32_Immop_attr(new_op, op2);
969 new_op = new_rd_ia32_fDiv(env->dbg, env->irg, env->block, noreg, noreg, op1, op2, nomem, mode_T);
970 set_ia32_am_support(new_op, ia32_am_Source);
974 new_op = new_rd_ia32_vfdiv(env->dbg, env->irg, env->block, noreg, noreg, op1, op2, nomem, mode_T);
975 set_ia32_am_support(new_op, ia32_am_Source);
977 set_ia32_res_mode(new_op, get_irn_mode(get_proj_for_pn(env->irn, pn_Quot_res)));
978 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
986 * Creates an ia32 Shl.
988 * @param env The transformation environment
989 * @param op1 The first operator
990 * @param op2 The second operator
991 * @return The created ia32 Shl node
993 static ir_node *gen_Shl(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
994 return gen_shift_binop(env, op1, op2, new_rd_ia32_Shl);
1000 * Creates an ia32 Shr.
1002 * @param env The transformation environment
1003 * @param op1 The first operator
1004 * @param op2 The second operator
1005 * @return The created ia32 Shr node
1007 static ir_node *gen_Shr(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
1008 return gen_shift_binop(env, op1, op2, new_rd_ia32_Shr);
1014 * Creates an ia32 Shrs.
1016 * @param env The transformation environment
1017 * @param op1 The first operator
1018 * @param op2 The second operator
1019 * @return The created ia32 Shrs node
1021 static ir_node *gen_Shrs(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
1022 return gen_shift_binop(env, op1, op2, new_rd_ia32_Shrs);
1028 * Creates an ia32 RotL.
1030 * @param env The transformation environment
1031 * @param op1 The first operator
1032 * @param op2 The second operator
1033 * @return The created ia32 RotL node
1035 static ir_node *gen_RotL(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
1036 return gen_shift_binop(env, op1, op2, new_rd_ia32_RotL);
1042 * Creates an ia32 RotR.
1043 * NOTE: There is no RotR with immediate because this would always be a RotL
1044 * "imm-mode_size_bits" which can be pre-calculated.
1046 * @param env The transformation environment
1047 * @param op1 The first operator
1048 * @param op2 The second operator
1049 * @return The created ia32 RotR node
1051 static ir_node *gen_RotR(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
1052 return gen_shift_binop(env, op1, op2, new_rd_ia32_RotR);
1058 * Creates an ia32 RotR or RotL (depending on the found pattern).
1060 * @param env The transformation environment
1061 * @param op1 The first operator
1062 * @param op2 The second operator
1063 * @return The created ia32 RotL or RotR node
1065 static ir_node *gen_Rot(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
1066 ir_node *rotate = NULL;
1068 /* Firm has only Rot (which is a RotL), so we are looking for a right (op2)
1069 operand "-e+mode_size_bits" (it's an already modified "mode_size_bits-e",
1070 that means we can create a RotR instead of an Add and a RotL */
1073 ir_node *pred = get_Proj_pred(op2);
1075 if (is_ia32_Add(pred)) {
1076 ir_node *pred_pred = get_irn_n(pred, 2);
1077 tarval *tv = get_ia32_Immop_tarval(pred);
1078 long bits = get_mode_size_bits(env->mode);
1080 if (is_Proj(pred_pred)) {
1081 pred_pred = get_Proj_pred(pred_pred);
1084 if (is_ia32_Minus(pred_pred) &&
1085 tarval_is_long(tv) &&
1086 get_tarval_long(tv) == bits)
1088 DB((env->mod, LEVEL_1, "RotL into RotR ... "));
1089 rotate = gen_RotR(env, op1, get_irn_n(pred_pred, 2));
1096 rotate = gen_RotL(env, op1, op2);
1105 * Transforms a Minus node.
1107 * @param env The transformation environment
1108 * @param op The operator
1109 * @return The created ia32 Minus node
1111 static ir_node *gen_Minus(ia32_transform_env_t *env, ir_node *op) {
1114 ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg);
1115 ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg);
1116 ir_node *nomem = new_rd_NoMem(env->irg);
1119 if (mode_is_float(env->mode)) {
1120 if (USE_SSE2(env->cg)) {
1121 new_op = new_rd_ia32_fEor(env->dbg, env->irg, env->block, noreg_gp, noreg_gp, op, noreg_fp, nomem, mode_T);
1123 size = get_mode_size_bits(env->mode);
1124 name = gen_fp_known_const(env->mode, size == 32 ? ia32_SSIGN : ia32_DSIGN);
1126 set_ia32_sc(new_op, name);
1128 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
1130 set_ia32_res_mode(new_op, env->mode);
1131 set_ia32_immop_type(new_op, ia32_ImmSymConst);
1133 new_op = new_rd_Proj(env->dbg, env->irg, env->block, new_op, env->mode, 0);
1136 env->cg->used_x87 = 1;
1137 new_op = new_rd_ia32_vfchs(env->dbg, env->irg, env->block, op, env->mode);
1138 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
1142 new_op = gen_unop(env, op, new_rd_ia32_Minus);
1151 * Transforms a Not node.
1153 * @param env The transformation environment
1154 * @param op The operator
1155 * @return The created ia32 Not node
1157 static ir_node *gen_Not(ia32_transform_env_t *env, ir_node *op) {
1158 assert (! mode_is_float(env->mode));
1159 return gen_unop(env, op, new_rd_ia32_Not);
1165 * Transforms an Abs node.
1167 * @param env The transformation environment
1168 * @param op The operator
1169 * @return The created ia32 Abs node
1171 static ir_node *gen_Abs(ia32_transform_env_t *env, ir_node *op) {
1172 ir_node *res, *p_eax, *p_edx;
1173 dbg_info *dbg = env->dbg;
1174 ir_mode *mode = env->mode;
1175 ir_graph *irg = env->irg;
1176 ir_node *block = env->block;
1177 ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg);
1178 ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg);
1179 ir_node *nomem = new_NoMem();
1183 if (mode_is_float(mode)) {
1184 if (USE_SSE2(env->cg)) {
1185 res = new_rd_ia32_fAnd(dbg,irg, block, noreg_gp, noreg_gp, op, noreg_fp, nomem, mode_T);
1187 size = get_mode_size_bits(mode);
1188 name = gen_fp_known_const(mode, size == 32 ? ia32_SABS : ia32_DABS);
1190 set_ia32_sc(res, name);
1192 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
1194 set_ia32_res_mode(res, mode);
1195 set_ia32_immop_type(res, ia32_ImmSymConst);
1197 res = new_rd_Proj(dbg, irg, block, res, mode, 0);
1200 env->cg->used_x87 = 1;
1201 res = new_rd_ia32_vfabs(dbg, irg, block, op, mode);
1202 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
1206 res = new_rd_ia32_Cdq(dbg, irg, block, op, mode_T);
1207 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
1208 set_ia32_res_mode(res, mode);
1210 p_eax = new_rd_Proj(dbg, irg, block, res, mode, pn_EAX);
1211 p_edx = new_rd_Proj(dbg, irg, block, res, mode, pn_EDX);
1213 res = new_rd_ia32_Eor(dbg, irg, block, noreg_gp, noreg_gp, p_eax, p_edx, nomem, mode_T);
1214 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
1215 set_ia32_res_mode(res, mode);
1217 res = new_rd_Proj(dbg, irg, block, res, mode, 0);
1219 res = new_rd_ia32_Sub(dbg, irg, block, noreg_gp, noreg_gp, res, p_edx, nomem, mode_T);
1220 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
1221 set_ia32_res_mode(res, mode);
1223 res = new_rd_Proj(dbg, irg, block, res, mode, 0);
1232 * Transforms a Load.
1234 * @param mod the debug module
1235 * @param block the block the new node should belong to
1236 * @param node the ir Load node
1237 * @param mode node mode
1238 * @return the created ia32 Load node
1240 static ir_node *gen_Load(ia32_transform_env_t *env) {
1241 ir_node *node = env->irn;
1242 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1243 ir_node *ptr = get_Load_ptr(node);
1244 ir_node *lptr = ptr;
1245 ir_mode *mode = get_Load_mode(node);
1248 ia32_am_flavour_t am_flav = ia32_B;
1250 /* address might be a constant (symconst or absolute address) */
1251 if (is_ia32_Const(ptr)) {
1256 if (mode_is_float(mode)) {
1257 if (USE_SSE2(env->cg))
1258 new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node), env->mode);
1260 env->cg->used_x87 = 1;
1261 new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node), env->mode);
1265 new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node), env->mode);
1268 /* base is an constant address */
1270 if (get_ia32_immop_type(ptr) == ia32_ImmSymConst) {
1271 set_ia32_am_sc(new_op, get_ia32_id_cnst(ptr));
1274 add_ia32_am_offs(new_op, get_ia32_cnst(ptr));
1280 set_ia32_am_support(new_op, ia32_am_Source);
1281 set_ia32_op_type(new_op, ia32_AddrModeS);
1282 set_ia32_am_flavour(new_op, am_flav);
1283 set_ia32_ls_mode(new_op, mode);
1285 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
1293 * Transforms a Store.
1295 * @param mod the debug module
1296 * @param block the block the new node should belong to
1297 * @param node the ir Store node
1298 * @param mode node mode
1299 * @return the created ia32 Store node
1301 static ir_node *gen_Store(ia32_transform_env_t *env) {
1302 ir_node *node = env->irn;
1303 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1304 ir_node *val = get_Store_value(node);
1305 ir_node *ptr = get_Store_ptr(node);
1306 ir_node *sptr = ptr;
1307 ir_node *mem = get_Store_mem(node);
1308 ir_mode *mode = get_irn_mode(val);
1309 ir_node *sval = val;
1312 ia32_am_flavour_t am_flav = ia32_B;
1313 ia32_immop_type_t immop = ia32_ImmNone;
1315 if (! mode_is_float(mode)) {
1316 /* in case of storing a const (but not a symconst) -> make it an attribute */
1317 if (is_ia32_Cnst(val)) {
1318 switch (get_ia32_op_type(val)) {
1320 immop = ia32_ImmConst;
1323 immop = ia32_ImmSymConst;
1326 assert(0 && "unsupported Const type");
1332 /* address might be a constant (symconst or absolute address) */
1333 if (is_ia32_Const(ptr)) {
1338 if (mode_is_float(mode)) {
1339 if (USE_SSE2(env->cg))
1340 new_op = new_rd_ia32_fStore(env->dbg, env->irg, env->block, sptr, noreg, sval, mem, mode_T);
1342 env->cg->used_x87 = 1;
1343 new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, sptr, noreg, sval, mem, mode_T);
1346 else if (get_mode_size_bits(mode) == 8) {
1347 new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, sptr, noreg, sval, mem, mode_T);
1350 new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, sval, mem, mode_T);
1353 /* stored const is an attribute (saves a register) */
1354 if (! mode_is_float(mode) && is_ia32_Cnst(val)) {
1355 set_ia32_Immop_attr(new_op, val);
1358 /* base is an constant address */
1360 if (get_ia32_immop_type(ptr) == ia32_ImmSymConst) {
1361 set_ia32_am_sc(new_op, get_ia32_id_cnst(ptr));
1364 add_ia32_am_offs(new_op, get_ia32_cnst(ptr));
1370 set_ia32_am_support(new_op, ia32_am_Dest);
1371 set_ia32_op_type(new_op, ia32_AddrModeD);
1372 set_ia32_am_flavour(new_op, am_flav);
1373 set_ia32_ls_mode(new_op, get_irn_mode(val));
1374 set_ia32_immop_type(new_op, immop);
1376 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
1384 * Transforms a Cond -> Proj[b] -> Cmp into a CondJmp, CondJmp_i or TestJmp
1386 * @param env The transformation environment
1387 * @return The transformed node.
1389 static ir_node *gen_Cond(ia32_transform_env_t *env) {
1390 dbg_info *dbg = env->dbg;
1391 ir_graph *irg = env->irg;
1392 ir_node *block = env->block;
1393 ir_node *node = env->irn;
1394 ir_node *sel = get_Cond_selector(node);
1395 ir_mode *sel_mode = get_irn_mode(sel);
1396 ir_node *res = NULL;
1397 ir_node *pred = NULL;
1398 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1399 ir_node *cmp_a, *cmp_b, *cnst, *expr;
1401 if (is_Proj(sel) && sel_mode == mode_b) {
1402 ir_node *nomem = new_NoMem();
1404 pred = get_Proj_pred(sel);
1406 /* get both compare operators */
1407 cmp_a = get_Cmp_left(pred);
1408 cmp_b = get_Cmp_right(pred);
1410 /* check if we can use a CondJmp with immediate */
1411 cnst = env->cg->opt.immops ? get_immediate_op(cmp_a, cmp_b) : NULL;
1412 expr = get_expr_op(cmp_a, cmp_b);
1415 pn_Cmp pnc = get_Proj_proj(sel);
1417 if ((pnc == pn_Cmp_Eq || pnc == pn_Cmp_Lg) && mode_is_int(get_irn_mode(expr))) {
1418 if (classify_tarval(get_ia32_Immop_tarval(cnst)) == TV_CLASSIFY_NULL) {
1419 /* a Cmp A =/!= 0 */
1420 ir_node *op1 = expr;
1421 ir_node *op2 = expr;
1422 ir_node *and = skip_Proj(expr);
1423 const char *cnst = NULL;
1425 /* check, if expr is an only once used And operation */
1426 if (get_irn_n_edges(expr) == 1 && is_ia32_And(and)) {
1427 op1 = get_irn_n(and, 2);
1428 op2 = get_irn_n(and, 3);
1430 cnst = (is_ia32_ImmConst(and) || is_ia32_ImmSymConst(and)) ? get_ia32_cnst(and) : NULL;
1432 res = new_rd_ia32_TestJmp(dbg, irg, block, op1, op2, mode_T);
1433 set_ia32_pncode(res, get_Proj_proj(sel));
1434 set_ia32_res_mode(res, get_irn_mode(op1));
1437 copy_ia32_Immop_attr(res, and);
1440 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
1445 if (mode_is_float(get_irn_mode(expr))) {
1446 if (USE_SSE2(env->cg))
1447 res = new_rd_ia32_fCondJmp(dbg, irg, block, noreg, noreg, expr, noreg, nomem, mode_T);
1449 env->cg->used_x87 = 1;
1454 res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, expr, noreg, nomem, mode_T);
1456 set_ia32_Immop_attr(res, cnst);
1457 set_ia32_res_mode(res, get_irn_mode(expr));
1460 if (mode_is_float(get_irn_mode(cmp_a))) {
1461 if (USE_SSE2(env->cg))
1462 res = new_rd_ia32_fCondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem, mode_T);
1464 env->cg->used_x87 = 1;
1469 res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem, mode_T);
1471 set_ia32_res_mode(res, get_irn_mode(cmp_a));
1474 set_ia32_pncode(res, get_Proj_proj(sel));
1475 set_ia32_am_support(res, ia32_am_Source);
1478 res = new_rd_ia32_SwitchJmp(dbg, irg, block, sel, mode_T);
1479 set_ia32_pncode(res, get_Cond_defaultProj(node));
1480 set_ia32_res_mode(res, get_irn_mode(sel));
1483 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
1490 * Transforms a CopyB node.
1492 * @param env The transformation environment
1493 * @return The transformed node.
1495 static ir_node *gen_CopyB(ia32_transform_env_t *env) {
1496 ir_node *res = NULL;
1497 dbg_info *dbg = env->dbg;
1498 ir_graph *irg = env->irg;
1499 ir_mode *mode = env->mode;
1500 ir_node *block = env->block;
1501 ir_node *node = env->irn;
1502 ir_node *src = get_CopyB_src(node);
1503 ir_node *dst = get_CopyB_dst(node);
1504 ir_node *mem = get_CopyB_mem(node);
1505 int size = get_type_size_bytes(get_CopyB_type(node));
1508 /* If we have to copy more than 16 bytes, we use REP MOVSx and */
1509 /* then we need the size explicitly in ECX. */
1510 if (size >= 16 * 4) {
1511 rem = size & 0x3; /* size % 4 */
1514 res = new_rd_ia32_Const(dbg, irg, block, mode_Is);
1515 set_ia32_op_type(res, ia32_Const);
1516 set_ia32_Immop_tarval(res, new_tarval_from_long(size, mode_Is));
1518 res = new_rd_ia32_CopyB(dbg, irg, block, dst, src, res, mem, mode);
1519 set_ia32_Immop_tarval(res, new_tarval_from_long(rem, mode_Is));
1522 res = new_rd_ia32_CopyB_i(dbg, irg, block, dst, src, mem, mode);
1523 set_ia32_Immop_tarval(res, new_tarval_from_long(size, mode_Is));
1524 set_ia32_immop_type(res, ia32_ImmConst);
1527 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env));
1535 * Transforms a Mux node into CMov.
1537 * @param env The transformation environment
1538 * @return The transformed node.
1540 static ir_node *gen_Mux(ia32_transform_env_t *env) {
1541 ir_node *node = env->irn;
1542 ir_node *new_op = new_rd_ia32_CMov(env->dbg, env->irg, env->block, \
1543 get_Mux_sel(node), get_Mux_false(node), get_Mux_true(node), env->mode);
1545 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
1552 * Following conversion rules apply:
1556 * 1) n bit -> m bit n > m (downscale)
1557 * a) target is signed: movsx
1558 * b) target is unsigned: and with lower bits sets
1559 * 2) n bit -> m bit n == m (sign change)
1561 * 3) n bit -> m bit n < m (upscale)
1562 * a) source is signed: movsx
1563 * b) source is unsigned: and with lower bits sets
1567 * SSE(1/2) convert to float or double (cvtsi2ss/sd)
1571 * SSE(1/2) convert from float or double to 32bit int (cvtss/sd2si)
1572 * if target mode < 32bit: additional INT -> INT conversion (see above)
1576 * SSE(1/2) convert from float or double to double or float (cvtss/sd2sd/ss)
1577 * x87 is mode_E internally, conversions happen only at load and store
1578 * in non-strict semantic
1581 //static ir_node *gen_int_downscale_conv(ia32_transform_env_t *env, ir_node *op,
1582 // ir_mode *src_mode, ir_mode *tgt_mode)
1584 // int n = get_mode_size_bits(src_mode);
1585 // int m = get_mode_size_bits(tgt_mode);
1586 // dbg_info *dbg = env->dbg;
1587 // ir_graph *irg = env->irg;
1588 // ir_node *block = env->block;
1589 // ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1590 // ir_node *nomem = new_rd_NoMem(irg);
1591 // ir_node *new_op, *proj;
1592 // assert(n > m && "downscale expected");
1593 // if (mode_is_signed(src_mode) && mode_is_signed(tgt_mode)) {
1594 // /* ASHL Sn, n - m */
1595 // new_op = new_rd_ia32_Shl(dbg, irg, block, noreg, noreg, op, noreg, nomem, mode_T);
1596 // proj = new_rd_Proj(dbg, irg, block, new_op, src_mode, 0);
1597 // set_ia32_Immop_tarval(new_op, new_tarval_from_long(n - m, mode_Is));
1598 // set_ia32_am_support(new_op, ia32_am_Source);
1599 // SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
1600 // /* ASHR Sn, n - m */
1601 // new_op = new_rd_ia32_Shrs(dbg, irg, block, noreg, noreg, proj, noreg, nomem, mode_T);
1602 // set_ia32_Immop_tarval(new_op, new_tarval_from_long(n - m, mode_Is));
1605 // new_op = new_rd_ia32_And(dbg, irg, block, noreg, noreg, op, noreg, nomem, mode_T);
1606 // set_ia32_Immop_tarval(new_op, new_tarval_from_long((1 << m) - 1, mode_Is));
1612 * Transforms a Conv node.
1614 * @param env The transformation environment
1615 * @param op The operator
1616 * @return The created ia32 Conv node
1618 static ir_node *gen_Conv(ia32_transform_env_t *env, ir_node *op) {
1619 dbg_info *dbg = env->dbg;
1620 ir_graph *irg = env->irg;
1621 ir_mode *src_mode = get_irn_mode(op);
1622 ir_mode *tgt_mode = env->mode;
1623 int src_bits = get_mode_size_bits(src_mode);
1624 int tgt_bits = get_mode_size_bits(tgt_mode);
1625 ir_node *block = env->block;
1626 ir_node *new_op = NULL;
1627 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1628 ir_node *nomem = new_rd_NoMem(irg);
1630 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
1632 if (src_mode == tgt_mode) {
1633 /* this can happen when changing mode_P to mode_Is */
1634 DB((mod, LEVEL_1, "killed Conv(mode, mode) ..."));
1635 edges_reroute(env->irn, op, irg);
1637 else if (mode_is_float(src_mode)) {
1638 /* we convert from float ... */
1639 if (mode_is_float(tgt_mode)) {
1641 if (USE_SSE2(env->cg)) {
1642 DB((mod, LEVEL_1, "create Conv(float, float) ..."));
1643 new_op = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
1646 DB((mod, LEVEL_1, "killed Conv(float, float) ..."));
1647 edges_reroute(env->irn, op, irg);
1652 DB((mod, LEVEL_1, "create Conv(float, int) ..."));
1653 new_op = new_rd_ia32_Conv_FP2I(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
1654 /* if target mode is not int: add an additional downscale convert */
1655 if (tgt_bits < 32) {
1656 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
1657 set_ia32_am_support(new_op, ia32_am_Source);
1658 set_ia32_tgt_mode(new_op, tgt_mode);
1659 set_ia32_src_mode(new_op, src_mode);
1661 proj = new_rd_Proj(dbg, irg, block, new_op, mode_Is, 0);
1663 if (tgt_bits == 8 || src_bits == 8) {
1664 new_op = new_rd_ia32_Conv_I2I8Bit(dbg, irg, block, noreg, noreg, proj, nomem, mode_T);
1667 new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, proj, nomem, mode_T);
1673 /* we convert from int ... */
1674 if (mode_is_float(tgt_mode)) {
1676 DB((mod, LEVEL_1, "create Conv(int, float) ..."));
1677 new_op = new_rd_ia32_Conv_I2FP(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
1681 if (get_mode_size_bits(src_mode) == tgt_bits) {
1682 DB((mod, LEVEL_1, "omitting equal size Conv(%+F, %+F) ...", src_mode, tgt_mode));
1683 edges_reroute(env->irn, op, irg);
1686 DB((mod, LEVEL_1, "create Conv(int, int) ...", src_mode, tgt_mode));
1687 if (tgt_bits == 8 || src_bits == 8) {
1688 new_op = new_rd_ia32_Conv_I2I8Bit(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
1691 new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, op, nomem, mode_T);
1698 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
1699 set_ia32_tgt_mode(new_op, tgt_mode);
1700 set_ia32_src_mode(new_op, src_mode);
1702 set_ia32_am_support(new_op, ia32_am_Source);
1704 new_op = new_rd_Proj(dbg, irg, block, new_op, tgt_mode, 0);
1712 /********************************************
1715 * | |__ ___ _ __ ___ __| | ___ ___
1716 * | '_ \ / _ \ '_ \ / _ \ / _` |/ _ \/ __|
1717 * | |_) | __/ | | | (_) | (_| | __/\__ \
1718 * |_.__/ \___|_| |_|\___/ \__,_|\___||___/
1720 ********************************************/
1722 static ir_node *gen_StackParam(ia32_transform_env_t *env) {
1723 ir_node *new_op = NULL;
1724 ir_node *node = env->irn;
1725 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1726 ir_node *mem = new_rd_NoMem(env->irg);
1727 ir_node *ptr = get_irn_n(node, 0);
1728 entity *ent = be_get_frame_entity(node);
1729 ir_mode *mode = env->mode;
1731 // /* If the StackParam has only one user -> */
1732 // /* put it in the Block where the user resides */
1733 // if (get_irn_n_edges(node) == 1) {
1734 // env->block = get_nodes_block(get_edge_src_irn(get_irn_out_edge_first(node)));
1737 if (mode_is_float(mode)) {
1738 if (USE_SSE2(env->cg))
1739 new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
1741 env->cg->used_x87 = 1;
1742 new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
1746 new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
1749 set_ia32_frame_ent(new_op, ent);
1750 set_ia32_use_frame(new_op);
1752 set_ia32_am_support(new_op, ia32_am_Source);
1753 set_ia32_op_type(new_op, ia32_AddrModeS);
1754 set_ia32_am_flavour(new_op, ia32_B);
1755 set_ia32_ls_mode(new_op, mode);
1757 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
1759 return new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, 0);
1763 * Transforms a FrameAddr into an ia32 Add.
1765 static ir_node *gen_FrameAddr(ia32_transform_env_t *env) {
1766 ir_node *new_op = NULL;
1767 ir_node *node = env->irn;
1768 ir_node *op = get_irn_n(node, 0);
1769 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1770 ir_node *nomem = new_rd_NoMem(env->irg);
1772 new_op = new_rd_ia32_Add(env->dbg, env->irg, env->block, noreg, noreg, op, noreg, nomem, mode_T);
1773 set_ia32_frame_ent(new_op, be_get_frame_entity(node));
1774 set_ia32_am_support(new_op, ia32_am_Full);
1775 set_ia32_use_frame(new_op);
1776 set_ia32_immop_type(new_op, ia32_ImmConst);
1778 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
1780 return new_rd_Proj(env->dbg, env->irg, env->block, new_op, env->mode, 0);
1784 * Transforms a FrameLoad into an ia32 Load.
1786 static ir_node *gen_FrameLoad(ia32_transform_env_t *env) {
1787 ir_node *new_op = NULL;
1788 ir_node *node = env->irn;
1789 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1790 ir_node *mem = get_irn_n(node, 0);
1791 ir_node *ptr = get_irn_n(node, 1);
1792 entity *ent = be_get_frame_entity(node);
1793 ir_mode *mode = get_type_mode(get_entity_type(ent));
1795 if (mode_is_float(mode)) {
1796 if (USE_SSE2(env->cg))
1797 new_op = new_rd_ia32_fLoad(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
1799 env->cg->used_x87 = 1;
1800 new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
1804 new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem, mode_T);
1807 set_ia32_frame_ent(new_op, ent);
1808 set_ia32_use_frame(new_op);
1810 set_ia32_am_support(new_op, ia32_am_Source);
1811 set_ia32_op_type(new_op, ia32_AddrModeS);
1812 set_ia32_am_flavour(new_op, ia32_B);
1813 set_ia32_ls_mode(new_op, mode);
1815 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
1822 * Transforms a FrameStore into an ia32 Store.
1824 static ir_node *gen_FrameStore(ia32_transform_env_t *env) {
1825 ir_node *new_op = NULL;
1826 ir_node *node = env->irn;
1827 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1828 ir_node *mem = get_irn_n(node, 0);
1829 ir_node *ptr = get_irn_n(node, 1);
1830 ir_node *val = get_irn_n(node, 2);
1831 entity *ent = be_get_frame_entity(node);
1832 ir_mode *mode = get_irn_mode(val);
1834 if (mode_is_float(mode)) {
1835 if (USE_SSE2(env->cg))
1836 new_op = new_rd_ia32_fStore(env->dbg, env->irg, env->block, ptr, noreg, val, mem, mode_T);
1838 env->cg->used_x87 = 1;
1839 new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, mem, mode_T);
1842 else if (get_mode_size_bits(mode) == 8) {
1843 new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, mem, mode_T);
1846 new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, mem, mode_T);
1849 set_ia32_frame_ent(new_op, ent);
1850 set_ia32_use_frame(new_op);
1852 set_ia32_am_support(new_op, ia32_am_Dest);
1853 set_ia32_op_type(new_op, ia32_AddrModeD);
1854 set_ia32_am_flavour(new_op, ia32_B);
1855 set_ia32_ls_mode(new_op, mode);
1857 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env));
1864 /*********************************************************
1867 * _ __ ___ __ _ _ _ __ __| |_ __ ___ _____ _ __
1868 * | '_ ` _ \ / _` | | '_ \ / _` | '__| \ \ / / _ \ '__|
1869 * | | | | | | (_| | | | | | | (_| | | | |\ V / __/ |
1870 * |_| |_| |_|\__,_|_|_| |_| \__,_|_| |_| \_/ \___|_|
1872 *********************************************************/
1875 * Transforms a Sub or fSub into Neg--Add iff OUT_REG == SRC2_REG.
1876 * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
1878 void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) {
1879 ia32_transform_env_t tenv;
1880 ir_node *in1, *in2, *noreg, *nomem, *res;
1881 const arch_register_t *in1_reg, *in2_reg, *out_reg, **slots;
1883 /* Return if AM node or not a Sub or fSub */
1884 if (get_ia32_op_type(irn) != ia32_Normal || !(is_ia32_Sub(irn) || is_ia32_fSub(irn)))
1887 noreg = ia32_new_NoReg_gp(cg);
1888 nomem = new_rd_NoMem(cg->irg);
1889 in1 = get_irn_n(irn, 2);
1890 in2 = get_irn_n(irn, 3);
1891 in1_reg = arch_get_irn_register(cg->arch_env, in1);
1892 in2_reg = arch_get_irn_register(cg->arch_env, in2);
1893 out_reg = get_ia32_out_reg(irn, 0);
1895 tenv.block = get_nodes_block(irn);
1896 tenv.dbg = get_irn_dbg_info(irn);
1899 DEBUG_ONLY(tenv.mod = cg->mod;)
1900 tenv.mode = get_ia32_res_mode(irn);
1903 /* in case of sub and OUT == SRC2 we can transform the sequence into neg src2 -- add */
1904 if (REGS_ARE_EQUAL(out_reg, in2_reg)) {
1905 /* generate the neg src2 */
1906 res = gen_Minus(&tenv, in2);
1907 arch_set_irn_register(cg->arch_env, res, in2_reg);
1909 /* add to schedule */
1910 sched_add_before(irn, res);
1912 /* generate the add */
1913 if (mode_is_float(tenv.mode)) {
1914 res = new_rd_ia32_fAdd(tenv.dbg, tenv.irg, tenv.block, noreg, noreg, res, in1, nomem, mode_T);
1915 set_ia32_am_support(res, ia32_am_Source);
1918 res = new_rd_ia32_Add(tenv.dbg, tenv.irg, tenv.block, noreg, noreg, res, in1, nomem, mode_T);
1919 set_ia32_am_support(res, ia32_am_Full);
1922 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(&tenv));
1924 slots = get_ia32_slots(res);
1927 /* add to schedule */
1928 sched_add_before(irn, res);
1930 /* remove the old sub */
1933 /* exchange the add and the sub */
1939 * Transforms a LEA into an Add if possible
1940 * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION.
1942 void ia32_transform_lea_to_add(ir_node *irn, ia32_code_gen_t *cg) {
1943 ia32_am_flavour_t am_flav;
1945 ir_node *res = NULL;
1946 ir_node *nomem, *noreg, *base, *index, *op1, *op2;
1948 ia32_transform_env_t tenv;
1949 const arch_register_t *out_reg, *base_reg, *index_reg;
1952 if (! is_ia32_Lea(irn))
1955 am_flav = get_ia32_am_flavour(irn);
1957 /* only some LEAs can be transformed to an Add */
1958 if (am_flav != ia32_am_B && am_flav != ia32_am_OB && am_flav != ia32_am_OI && am_flav != ia32_am_BI)
1961 noreg = ia32_new_NoReg_gp(cg);
1962 nomem = new_rd_NoMem(cg->irg);
1965 base = get_irn_n(irn, 0);
1966 index = get_irn_n(irn,1);
1968 offs = get_ia32_am_offs(irn);
1970 /* offset has a explicit sign -> we need to skip + */
1971 if (offs && offs[0] == '+')
1974 out_reg = arch_get_irn_register(cg->arch_env, irn);
1975 base_reg = arch_get_irn_register(cg->arch_env, base);
1976 index_reg = arch_get_irn_register(cg->arch_env, index);
1978 tenv.block = get_nodes_block(irn);
1979 tenv.dbg = get_irn_dbg_info(irn);
1982 DEBUG_ONLY(tenv.mod = cg->mod;)
1983 tenv.mode = get_irn_mode(irn);
1986 switch(get_ia32_am_flavour(irn)) {
1988 /* out register must be same as base register */
1989 if (! REGS_ARE_EQUAL(out_reg, base_reg))
1995 /* out register must be same as base register */
1996 if (! REGS_ARE_EQUAL(out_reg, base_reg))
2003 /* out register must be same as index register */
2004 if (! REGS_ARE_EQUAL(out_reg, index_reg))
2011 /* out register must be same as one in register */
2012 if (REGS_ARE_EQUAL(out_reg, base_reg)) {
2016 else if (REGS_ARE_EQUAL(out_reg, index_reg)) {
2021 /* in registers a different from out -> no Add possible */
2028 res = new_rd_ia32_Add(tenv.dbg, tenv.irg, tenv.block, noreg, noreg, op1, op2, nomem, mode_T);
2029 arch_set_irn_register(cg->arch_env, res, out_reg);
2030 set_ia32_op_type(res, ia32_Normal);
2033 set_ia32_cnst(res, offs);
2034 set_ia32_immop_type(res, ia32_ImmConst);
2037 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(&tenv));
2039 /* add Add to schedule */
2040 sched_add_before(irn, res);
2042 res = new_rd_Proj(tenv.dbg, tenv.irg, tenv.block, res, tenv.mode, 0);
2044 /* add result Proj to schedule */
2045 sched_add_before(irn, res);
2047 /* remove the old LEA */
2050 /* exchange the Add and the LEA */
2055 * Transforms the given firm node (and maybe some other related nodes)
2056 * into one or more assembler nodes.
2058 * @param node the firm node
2059 * @param env the debug module
2061 void ia32_transform_node(ir_node *node, void *env) {
2062 ia32_code_gen_t *cgenv = (ia32_code_gen_t *)env;
2064 ir_node *asm_node = NULL;
2065 ia32_transform_env_t tenv;
2070 tenv.block = get_nodes_block(node);
2071 tenv.dbg = get_irn_dbg_info(node);
2072 tenv.irg = current_ir_graph;
2074 DEBUG_ONLY(tenv.mod = cgenv->mod;)
2075 tenv.mode = get_irn_mode(node);
2078 #define UNOP(a) case iro_##a: asm_node = gen_##a(&tenv, get_##a##_op(node)); break
2079 #define BINOP(a) case iro_##a: asm_node = gen_##a(&tenv, get_##a##_left(node), get_##a##_right(node)); break
2080 #define GEN(a) case iro_##a: asm_node = gen_##a(&tenv); break
2081 #define IGN(a) case iro_##a: break
2082 #define BAD(a) case iro_##a: goto bad
2083 #define OTHER_BIN(a) \
2084 if (get_irn_op(node) == get_op_##a()) { \
2085 asm_node = gen_##a(&tenv, get_irn_n(node, 0), get_irn_n(node, 1)); \
2089 if (be_is_##a(node)) { \
2090 asm_node = gen_##a(&tenv); \
2094 DBG((tenv.mod, LEVEL_1, "check %+F ... ", node));
2096 code = get_irn_opcode(node);
2142 /* constant transformation happens earlier */
2172 fprintf(stderr, "Not implemented: %s\n", get_irn_opname(node));
2176 /* exchange nodes if a new one was generated */
2178 exchange(node, asm_node);
2179 DB((tenv.mod, LEVEL_1, "created node %+F[%p]\n", asm_node, asm_node));
2182 DB((tenv.mod, LEVEL_1, "ignored\n"));