2 * This file implements the IR transformation from firm into ia32-Firm.
3 * @author Christian Wuerdig
15 #include "irgraph_t.h"
20 #include "iredges_t.h"
30 #include "archop.h" /* we need this for Min and Max nodes */
33 #include "../benode_t.h"
34 #include "../besched.h"
37 #include "bearch_ia32_t.h"
38 #include "ia32_nodes_attr.h"
39 #include "ia32_transform.h"
40 #include "ia32_new_nodes.h"
41 #include "ia32_map_regs.h"
42 #include "ia32_dbg_stat.h"
43 #include "ia32_optimize.h"
44 #include "ia32_util.h"
46 #include "gen_ia32_regalloc_if.h"
48 #define SFP_SIGN "0x80000000"
49 #define DFP_SIGN "0x8000000000000000"
50 #define SFP_ABS "0x7FFFFFFF"
51 #define DFP_ABS "0x7FFFFFFFFFFFFFFF"
53 #define TP_SFP_SIGN "ia32_sfp_sign"
54 #define TP_DFP_SIGN "ia32_dfp_sign"
55 #define TP_SFP_ABS "ia32_sfp_abs"
56 #define TP_DFP_ABS "ia32_dfp_abs"
58 #define ENT_SFP_SIGN "IA32_SFP_SIGN"
59 #define ENT_DFP_SIGN "IA32_DFP_SIGN"
60 #define ENT_SFP_ABS "IA32_SFP_ABS"
61 #define ENT_DFP_ABS "IA32_DFP_ABS"
63 extern ir_op *get_op_Mulh(void);
65 typedef ir_node *construct_binop_func(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, \
66 ir_node *op1, ir_node *op2, ir_node *mem, ir_mode *mode);
68 typedef ir_node *construct_unop_func(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, \
69 ir_node *op, ir_node *mem, ir_mode *mode);
72 ia32_SSIGN, ia32_DSIGN, ia32_SABS, ia32_DABS, ia32_known_const_max
75 /****************************************************************************************************
77 * | | | | / _| | | (_)
78 * _ __ ___ __| | ___ | |_ _ __ __ _ _ __ ___| |_ ___ _ __ _ __ ___ __ _| |_ _ ___ _ __
79 * | '_ \ / _ \ / _` |/ _ \ | __| '__/ _` | '_ \/ __| _/ _ \| '__| '_ ` _ \ / _` | __| |/ _ \| '_ \
80 * | | | | (_) | (_| | __/ | |_| | | (_| | | | \__ \ || (_) | | | | | | | | (_| | |_| | (_) | | | |
81 * |_| |_|\___/ \__,_|\___| \__|_| \__,_|_| |_|___/_| \___/|_| |_| |_| |_|\__,_|\__|_|\___/|_| |_|
83 ****************************************************************************************************/
86 * Returns 1 if irn is a Const representing 0, 0 otherwise
88 static INLINE int is_ia32_Const_0(ir_node *irn) {
89 return (is_ia32_irn(irn) && get_ia32_op_type(irn) == ia32_Const) ?
90 classify_tarval(get_ia32_Immop_tarval(irn)) == TV_CLASSIFY_NULL : 0;
94 * Returns 1 if irn is a Const representing 1, 0 otherwise
96 static INLINE int is_ia32_Const_1(ir_node *irn) {
97 return (is_ia32_irn(irn) && get_ia32_op_type(irn) == ia32_Const) ?
98 classify_tarval(get_ia32_Immop_tarval(irn)) == TV_CLASSIFY_ONE : 0;
102 * Returns the Proj representing the UNKNOWN register for given mode.
104 static ir_node *be_get_unknown_for_mode(ia32_code_gen_t *cg, ir_mode *mode) {
105 be_abi_irg_t *babi = cg->birg->abi;
106 const arch_register_t *unknwn_reg = NULL;
108 if (mode_is_float(mode)) {
109 unknwn_reg = USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_UKNWN] : &ia32_vfp_regs[REG_VFP_UKNWN];
112 unknwn_reg = &ia32_gp_regs[REG_GP_UKNWN];
115 return be_abi_get_callee_save_irn(babi, unknwn_reg);
119 * Gets the Proj with number pn from irn.
121 static ir_node *get_proj_for_pn(const ir_node *irn, long pn) {
122 const ir_edge_t *edge;
124 assert(get_irn_mode(irn) == mode_T && "need mode_T");
126 foreach_out_edge(irn, edge) {
127 proj = get_edge_src_irn(edge);
129 if (get_Proj_proj(proj) == pn)
137 * Collects all Projs of a node into the node array. Index is the projnum.
138 * BEWARE: The caller has to assure the appropriate array size!
140 static void ia32_collect_Projs(ir_node *irn, ir_node **projs, int size) {
141 const ir_edge_t *edge;
143 assert(get_irn_mode(irn) == mode_T && "need mode_T");
145 memset(projs, 0, size * sizeof(projs[0]));
147 foreach_out_edge(irn, edge) {
148 proj = get_edge_src_irn(edge);
149 projs[get_Proj_proj(proj)] = proj;
154 * Renumbers the proj having pn_old in the array tp pn_new
155 * and removes the proj from the array.
157 static INLINE void ia32_renumber_Proj(ir_node **projs, long pn_old, long pn_new) {
159 set_Proj_proj(projs[pn_old], pn_new);
160 projs[pn_old] = NULL;
165 * SSE convert of an integer node into a floating point node.
167 static ir_node *gen_sse_conv_int2float(ia32_code_gen_t *cg, dbg_info *dbg, ir_graph *irg, ir_node *block,
168 ir_node *in, ir_node *old_node, ir_mode *tgt_mode)
170 ir_node *noreg = ia32_new_NoReg_gp(cg);
171 ir_node *nomem = new_rd_NoMem(irg);
173 ir_node *conv = new_rd_ia32_Conv_I2FP(dbg, irg, block, noreg, noreg, in, nomem, tgt_mode);
174 set_ia32_am_support(conv, ia32_am_Source);
175 SET_IA32_ORIG_NODE(conv, ia32_get_old_node_name(cg, old_node));
181 * SSE convert of an float node into a double node.
183 static ir_node *gen_sse_conv_f2d(ia32_code_gen_t *cg, dbg_info *dbg, ir_graph *irg, ir_node *block,
184 ir_node *in, ir_node *old_node)
186 ir_node *noreg = ia32_new_NoReg_gp(cg);
187 ir_node *nomem = new_rd_NoMem(irg);
189 ir_node *conv = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, in, nomem, mode_D);
190 set_ia32_am_support(conv, ia32_am_Source);
191 SET_IA32_ORIG_NODE(conv, ia32_get_old_node_name(cg, old_node));
196 /* Generates an entity for a known FP const (used for FP Neg + Abs) */
197 static ident *gen_fp_known_const(ia32_known_const_t kct) {
198 static const struct {
200 const char *ent_name;
201 const char *cnst_str;
202 } names [ia32_known_const_max] = {
203 { TP_SFP_SIGN, ENT_SFP_SIGN, SFP_SIGN }, /* ia32_SSIGN */
204 { TP_DFP_SIGN, ENT_DFP_SIGN, DFP_SIGN }, /* ia32_DSIGN */
205 { TP_SFP_ABS, ENT_SFP_ABS, SFP_ABS }, /* ia32_SABS */
206 { TP_DFP_ABS, ENT_DFP_ABS, DFP_ABS } /* ia32_DABS */
208 static ir_entity *ent_cache[ia32_known_const_max];
210 const char *tp_name, *ent_name, *cnst_str;
218 ent_name = names[kct].ent_name;
219 if (! ent_cache[kct]) {
220 tp_name = names[kct].tp_name;
221 cnst_str = names[kct].cnst_str;
223 mode = kct == ia32_SSIGN || kct == ia32_SABS ? mode_Iu : mode_Lu;
224 tv = new_tarval_from_str(cnst_str, strlen(cnst_str), mode);
225 tp = new_type_primitive(new_id_from_str(tp_name), mode);
226 ent = new_entity(get_glob_type(), new_id_from_str(ent_name), tp);
228 set_entity_ld_ident(ent, get_entity_ident(ent));
229 set_entity_visibility(ent, visibility_local);
230 set_entity_variability(ent, variability_constant);
231 set_entity_allocation(ent, allocation_static);
233 /* we create a new entity here: It's initialization must resist on the
235 rem = current_ir_graph;
236 current_ir_graph = get_const_code_irg();
237 cnst = new_Const(mode, tv);
238 current_ir_graph = rem;
240 set_atomic_ent_value(ent, cnst);
242 /* cache the entry */
243 ent_cache[kct] = ent;
246 return get_entity_ident(ent_cache[kct]);
251 * Prints the old node name on cg obst and returns a pointer to it.
253 const char *ia32_get_old_node_name(ia32_code_gen_t *cg, ir_node *irn) {
254 ia32_isa_t *isa = (ia32_isa_t *)cg->arch_env->isa;
256 lc_eoprintf(firm_get_arg_env(), isa->name_obst, "%+F", irn);
257 obstack_1grow(isa->name_obst, 0);
258 return obstack_finish(isa->name_obst);
262 /* determine if one operator is an Imm */
263 static ir_node *get_immediate_op(ir_node *op1, ir_node *op2) {
265 return is_ia32_Cnst(op1) ? op1 : (is_ia32_Cnst(op2) ? op2 : NULL);
266 else return is_ia32_Cnst(op2) ? op2 : NULL;
269 /* determine if one operator is not an Imm */
270 static ir_node *get_expr_op(ir_node *op1, ir_node *op2) {
271 return !is_ia32_Cnst(op1) ? op1 : (!is_ia32_Cnst(op2) ? op2 : NULL);
274 static void fold_immediate(ia32_transform_env_t *env, ir_node *node, int in1, int in2) {
278 if(! (env->cg->opt & IA32_OPT_IMMOPS))
281 left = get_irn_n(node, in1);
282 right = get_irn_n(node, in2);
283 if(!is_ia32_Cnst(right) && is_ia32_Cnst(left)) {
284 /* we can only set right operand to immediate */
285 if(!is_ia32_commutative(node))
287 /* exchange left/right */
288 set_irn_n(node, in1, right);
289 set_irn_n(node, in2, ia32_get_admissible_noreg(env->cg, node, in2));
290 set_ia32_Immop_attr(node, left);
291 } else if(is_ia32_Cnst(right)) {
292 set_irn_n(node, in2, ia32_get_admissible_noreg(env->cg, node, in2));
293 set_ia32_Immop_attr(node, right);
298 set_ia32_am_support(node, get_ia32_am_support(node) & ~ia32_am_Source);
302 * Construct a standard binary operation, set AM and immediate if required.
304 * @param env The transformation environment
305 * @param op1 The first operand
306 * @param op2 The second operand
307 * @param func The node constructor function
308 * @return The constructed ia32 node.
310 static ir_node *gen_binop(ia32_transform_env_t *env, ir_node *op1, ir_node *op2, construct_binop_func *func) {
311 ir_node *new_op = NULL;
312 ir_mode *mode = env->mode;
313 dbg_info *dbg = env->dbg;
314 ir_graph *irg = env->irg;
315 ir_node *block = env->block;
316 ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg);
317 ir_node *nomem = new_NoMem();
319 if(mode_is_float(mode)) {
320 new_op = func(dbg, irg, block, noreg_gp, noreg_gp, op1, op2, nomem, mode);
321 set_ia32_am_support(new_op, ia32_am_Source);
323 new_op = func(dbg, irg, block, noreg_gp, noreg_gp, op1, op2, nomem, mode);
324 if(func == new_rd_ia32_Mul) {
325 set_ia32_am_support(new_op, ia32_am_Source);
327 set_ia32_am_support(new_op, ia32_am_Full);
331 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
332 set_ia32_res_mode(new_op, mode);
333 if (is_op_commutative(get_irn_op(env->irn))) {
334 set_ia32_commutative(new_op);
336 if(func != new_rd_ia32_vfmul) {
337 fold_immediate(env, new_op, 2, 3);
346 * Construct a shift/rotate binary operation, sets AM and immediate if required.
348 * @param env The transformation environment
349 * @param op1 The first operand
350 * @param op2 The second operand
351 * @param func The node constructor function
352 * @return The constructed ia32 node.
354 static ir_node *gen_shift_binop(ia32_transform_env_t *env, ir_node *op1, ir_node *op2, construct_binop_func *func) {
355 ir_node *new_op = NULL;
356 ir_mode *mode = env->mode;
357 dbg_info *dbg = env->dbg;
358 ir_graph *irg = env->irg;
359 ir_node *block = env->block;
360 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
361 ir_node *nomem = new_NoMem();
362 ir_node *expr_op, *imm_op;
364 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
366 assert(! mode_is_float(mode) && "Shift/Rotate with float not supported");
368 /* Check if immediate optimization is on and */
369 /* if it's an operation with immediate. */
370 imm_op = (env->cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(NULL, op2) : NULL;
371 expr_op = get_expr_op(op1, op2);
373 assert((expr_op || imm_op) && "invalid operands");
376 /* We have two consts here: not yet supported */
380 /* Limit imm_op within range imm8 */
382 tv = get_ia32_Immop_tarval(imm_op);
385 tv = tarval_mod(tv, new_tarval_from_long(32, get_tarval_mode(tv)));
386 set_ia32_Immop_tarval(imm_op, tv);
393 /* integer operations */
395 /* This is shift/rot with const */
396 DB((mod, LEVEL_1, "Shift/Rot with immediate ..."));
398 new_op = func(dbg, irg, block, noreg, noreg, expr_op, noreg, nomem, mode);
399 set_ia32_Immop_attr(new_op, imm_op);
402 /* This is a normal shift/rot */
403 DB((mod, LEVEL_1, "Shift/Rot binop ..."));
404 new_op = func(dbg, irg, block, noreg, noreg, op1, op2, nomem, mode);
408 set_ia32_am_support(new_op, ia32_am_Dest);
410 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
412 set_ia32_res_mode(new_op, mode);
413 set_ia32_emit_cl(new_op);
420 * Construct a standard unary operation, set AM and immediate if required.
422 * @param env The transformation environment
423 * @param op The operand
424 * @param func The node constructor function
425 * @return The constructed ia32 node.
427 static ir_node *gen_unop(ia32_transform_env_t *env, ir_node *op, construct_unop_func *func) {
428 ir_node *new_op = NULL;
429 ir_mode *mode = env->mode;
430 dbg_info *dbg = env->dbg;
431 ir_graph *irg = env->irg;
432 ir_node *block = env->block;
433 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
434 ir_node *nomem = new_NoMem();
435 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
437 new_op = func(dbg, irg, block, noreg, noreg, op, nomem, mode);
439 if (mode_is_float(mode)) {
440 DB((mod, LEVEL_1, "FP unop ..."));
441 /* floating point operations don't support implicit store */
442 set_ia32_am_support(new_op, ia32_am_None);
445 DB((mod, LEVEL_1, "INT unop ..."));
446 set_ia32_am_support(new_op, ia32_am_Dest);
449 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
451 set_ia32_res_mode(new_op, mode);
459 * Creates an ia32 Add with immediate.
461 * @param env The transformation environment
462 * @param expr_op The expression operator
463 * @param const_op The constant
464 * @return the created ia32 Add node
466 static ir_node *gen_imm_Add(ia32_transform_env_t *env, ir_node *expr_op, ir_node *const_op) {
467 ir_node *new_op = NULL;
468 tarval *tv = get_ia32_Immop_tarval(const_op);
469 dbg_info *dbg = env->dbg;
470 ir_graph *irg = env->irg;
471 ir_node *block = env->block;
472 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
473 ir_node *nomem = new_NoMem();
474 ir_mode *mode = env->mode;
476 tarval_classification_t class_tv, class_negtv;
477 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
479 /* try to optimize to inc/dec */
480 if ((env->cg->opt & IA32_OPT_INCDEC) && tv && (get_ia32_op_type(const_op) == ia32_Const)) {
481 /* optimize tarvals */
482 class_tv = classify_tarval(tv);
483 class_negtv = classify_tarval(tarval_neg(tv));
485 if (class_tv == TV_CLASSIFY_ONE) { /* + 1 == INC */
486 DB((env->mod, LEVEL_2, "Add(1) to Inc ... "));
487 new_op = new_rd_ia32_Inc(dbg, irg, block, noreg, noreg, expr_op, nomem, mode);
490 else if (class_tv == TV_CLASSIFY_ALL_ONE || class_negtv == TV_CLASSIFY_ONE) { /* + (-1) == DEC */
491 DB((mod, LEVEL_2, "Add(-1) to Dec ... "));
492 new_op = new_rd_ia32_Dec(dbg, irg, block, noreg, noreg, expr_op, nomem, mode);
498 new_op = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, expr_op, noreg, nomem, mode);
499 set_ia32_Immop_attr(new_op, const_op);
500 set_ia32_commutative(new_op);
507 * Creates an ia32 Add.
509 * @param env The transformation environment
510 * @return the created ia32 Add node
512 static ir_node *gen_Add(ia32_transform_env_t *env) {
513 ir_node *new_op = NULL;
514 dbg_info *dbg = env->dbg;
515 ir_mode *mode = env->mode;
516 ir_graph *irg = env->irg;
517 ir_node *block = env->block;
518 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
519 ir_node *nomem = new_NoMem();
520 ir_node *expr_op, *imm_op;
521 ir_node *op1 = get_Add_left(env->irn);
522 ir_node *op2 = get_Add_right(env->irn);
524 /* Check if immediate optimization is on and */
525 /* if it's an operation with immediate. */
526 imm_op = (env->cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(op1, op2) : NULL;
527 expr_op = get_expr_op(op1, op2);
529 assert((expr_op || imm_op) && "invalid operands");
531 if (mode_is_float(mode)) {
533 if (USE_SSE2(env->cg))
534 return gen_binop(env, op1, op2, new_rd_ia32_xAdd);
536 return gen_binop(env, op1, op2, new_rd_ia32_vfadd);
541 /* No expr_op means, that we have two const - one symconst and */
542 /* one tarval or another symconst - because this case is not */
543 /* covered by constant folding */
544 /* We need to check for: */
545 /* 1) symconst + const -> becomes a LEA */
546 /* 2) symconst + symconst -> becomes a const + LEA as the elf */
547 /* linker doesn't support two symconsts */
549 if (get_ia32_op_type(op1) == ia32_SymConst && get_ia32_op_type(op2) == ia32_SymConst) {
550 /* this is the 2nd case */
551 new_op = new_rd_ia32_Lea(dbg, irg, block, op1, noreg, mode);
552 set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
553 set_ia32_am_flavour(new_op, ia32_am_OB);
555 DBG_OPT_LEA3(op1, op2, env->irn, new_op);
558 /* this is the 1st case */
559 new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode);
561 DBG_OPT_LEA3(op1, op2, env->irn, new_op);
563 if (get_ia32_op_type(op1) == ia32_SymConst) {
564 set_ia32_am_sc(new_op, get_ia32_id_cnst(op1));
565 add_ia32_am_offs(new_op, get_ia32_cnst(op2));
568 add_ia32_am_offs(new_op, get_ia32_cnst(op1));
569 set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
571 set_ia32_am_flavour(new_op, ia32_am_O);
575 set_ia32_am_support(new_op, ia32_am_Source);
576 set_ia32_op_type(new_op, ia32_AddrModeS);
578 /* Lea doesn't need a Proj */
582 /* This is expr + const */
583 new_op = gen_imm_Add(env, expr_op, imm_op);
586 set_ia32_am_support(new_op, ia32_am_Dest);
589 /* This is a normal add */
590 new_op = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, op1, op2, nomem, mode);
593 set_ia32_am_support(new_op, ia32_am_Full);
594 set_ia32_commutative(new_op);
598 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
600 set_ia32_res_mode(new_op, mode);
608 * Creates an ia32 Mul.
610 * @param env The transformation environment
611 * @return the created ia32 Mul node
613 static ir_node *gen_Mul(ia32_transform_env_t *env) {
614 ir_node *op1 = get_Mul_left(env->irn);
615 ir_node *op2 = get_Mul_right(env->irn);
618 if (mode_is_float(env->mode)) {
620 if (USE_SSE2(env->cg))
621 new_op = gen_binop(env, op1, op2, new_rd_ia32_xMul);
623 new_op = gen_binop(env, op1, op2, new_rd_ia32_vfmul);
626 new_op = gen_binop(env, op1, op2, new_rd_ia32_Mul);
635 * Creates an ia32 Mulh.
636 * Note: Mul produces a 64Bit result and Mulh returns the upper 32 bit of
637 * this result while Mul returns the lower 32 bit.
639 * @param env The transformation environment
640 * @return the created ia32 Mulh node
642 static ir_node *gen_Mulh(ia32_transform_env_t *env) {
643 ir_node *op1 = get_irn_n(env->irn, 0);
644 ir_node *op2 = get_irn_n(env->irn, 1);
645 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
646 ir_node *proj_EAX, *proj_EDX, *mulh;
649 assert(!mode_is_float(env->mode) && "Mulh with float not supported");
650 mulh = new_rd_ia32_Mulh(env->dbg, env->irg, env->block, noreg, noreg, op1, op2, new_NoMem());
651 set_ia32_commutative(mulh);
652 set_ia32_res_mode(mulh, env->mode);
653 set_ia32_am_support(mulh, ia32_am_Source);
654 /* imediates are not supported, so no fold_immediate */
656 proj_EAX = new_rd_Proj(env->dbg, env->irg, env->block, mulh, mode_Is, pn_EAX);
657 proj_EDX = new_rd_Proj(env->dbg, env->irg, env->block, mulh, mode_Is, pn_EDX);
661 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], env->irg, env->block, 1, in);
669 * Creates an ia32 And.
671 * @param env The transformation environment
672 * @return The created ia32 And node
674 static ir_node *gen_And(ia32_transform_env_t *env) {
675 ir_node *op1 = get_And_left(env->irn);
676 ir_node *op2 = get_And_right(env->irn);
678 assert (! mode_is_float(env->mode));
679 return gen_binop(env, op1, op2, new_rd_ia32_And);
685 * Creates an ia32 Or.
687 * @param env The transformation environment
688 * @return The created ia32 Or node
690 static ir_node *gen_Or(ia32_transform_env_t *env) {
691 ir_node *op1 = get_Or_left(env->irn);
692 ir_node *op2 = get_Or_right(env->irn);
694 assert (! mode_is_float(env->mode));
695 return gen_binop(env, op1, op2, new_rd_ia32_Or);
701 * Creates an ia32 Eor.
703 * @param env The transformation environment
704 * @return The created ia32 Eor node
706 static ir_node *gen_Eor(ia32_transform_env_t *env) {
707 ir_node *op1 = get_Eor_left(env->irn);
708 ir_node *op2 = get_Eor_right(env->irn);
710 assert(! mode_is_float(env->mode));
711 return gen_binop(env, op1, op2, new_rd_ia32_Eor);
717 * Creates an ia32 Max.
719 * @param env The transformation environment
720 * @return the created ia32 Max node
722 static ir_node *gen_Max(ia32_transform_env_t *env) {
723 ir_node *op1 = get_irn_n(env->irn, 0);
724 ir_node *op2 = get_irn_n(env->irn, 1);
727 if (mode_is_float(env->mode)) {
729 if (USE_SSE2(env->cg))
730 new_op = gen_binop(env, op1, op2, new_rd_ia32_xMax);
736 new_op = new_rd_ia32_Max(env->dbg, env->irg, env->block, op1, op2, env->mode);
737 set_ia32_am_support(new_op, ia32_am_None);
738 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
747 * Creates an ia32 Min.
749 * @param env The transformation environment
750 * @return the created ia32 Min node
752 static ir_node *gen_Min(ia32_transform_env_t *env) {
753 ir_node *op1 = get_irn_n(env->irn, 0);
754 ir_node *op2 = get_irn_n(env->irn, 1);
757 if (mode_is_float(env->mode)) {
759 if (USE_SSE2(env->cg))
760 new_op = gen_binop(env, op1, op2, new_rd_ia32_xMin);
766 new_op = new_rd_ia32_Min(env->dbg, env->irg, env->block, op1, op2, env->mode);
767 set_ia32_am_support(new_op, ia32_am_None);
768 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
777 * Creates an ia32 Sub with immediate.
779 * @param env The transformation environment
780 * @param expr_op The first operator
781 * @param const_op The constant operator
782 * @return The created ia32 Sub node
784 static ir_node *gen_imm_Sub(ia32_transform_env_t *env, ir_node *expr_op, ir_node *const_op) {
785 ir_node *new_op = NULL;
786 tarval *tv = get_ia32_Immop_tarval(const_op);
787 dbg_info *dbg = env->dbg;
788 ir_graph *irg = env->irg;
789 ir_node *block = env->block;
790 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
791 ir_node *nomem = new_NoMem();
792 ir_mode *mode = env->mode;
794 tarval_classification_t class_tv, class_negtv;
795 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
797 /* try to optimize to inc/dec */
798 if ((env->cg->opt & IA32_OPT_INCDEC) && tv && (get_ia32_op_type(const_op) == ia32_Const)) {
799 /* optimize tarvals */
800 class_tv = classify_tarval(tv);
801 class_negtv = classify_tarval(tarval_neg(tv));
803 if (class_tv == TV_CLASSIFY_ONE) { /* - 1 == DEC */
804 DB((mod, LEVEL_2, "Sub(1) to Dec ... "));
805 new_op = new_rd_ia32_Dec(dbg, irg, block, noreg, noreg, expr_op, nomem, mode);
808 else if (class_negtv == TV_CLASSIFY_ONE) { /* - (-1) == Sub */
809 DB((mod, LEVEL_2, "Sub(-1) to Inc ... "));
810 new_op = new_rd_ia32_Inc(dbg, irg, block, noreg, noreg, expr_op, nomem, mode);
816 new_op = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, expr_op, noreg, nomem, mode);
817 set_ia32_Immop_attr(new_op, const_op);
824 * Creates an ia32 Sub.
826 * @param env The transformation environment
827 * @return The created ia32 Sub node
829 static ir_node *gen_Sub(ia32_transform_env_t *env) {
830 ir_node *new_op = NULL;
831 dbg_info *dbg = env->dbg;
832 ir_mode *mode = env->mode;
833 ir_graph *irg = env->irg;
834 ir_node *block = env->block;
835 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
836 ir_node *nomem = new_NoMem();
837 ir_node *op1 = get_Sub_left(env->irn);
838 ir_node *op2 = get_Sub_right(env->irn);
839 ir_node *expr_op, *imm_op;
841 /* Check if immediate optimization is on and */
842 /* if it's an operation with immediate. */
843 imm_op = (env->cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(NULL, op2) : NULL;
844 expr_op = get_expr_op(op1, op2);
846 assert((expr_op || imm_op) && "invalid operands");
848 if (mode_is_float(mode)) {
850 if (USE_SSE2(env->cg))
851 return gen_binop(env, op1, op2, new_rd_ia32_xSub);
853 return gen_binop(env, op1, op2, new_rd_ia32_vfsub);
858 /* No expr_op means, that we have two const - one symconst and */
859 /* one tarval or another symconst - because this case is not */
860 /* covered by constant folding */
861 /* We need to check for: */
862 /* 1) symconst - const -> becomes a LEA */
863 /* 2) symconst - symconst -> becomes a const - LEA as the elf */
864 /* linker doesn't support two symconsts */
866 if (get_ia32_op_type(op1) == ia32_SymConst && get_ia32_op_type(op2) == ia32_SymConst) {
867 /* this is the 2nd case */
868 new_op = new_rd_ia32_Lea(dbg, irg, block, op1, noreg, mode);
869 set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
870 set_ia32_am_sc_sign(new_op);
871 set_ia32_am_flavour(new_op, ia32_am_OB);
873 DBG_OPT_LEA3(op1, op2, env->irn, new_op);
876 /* this is the 1st case */
877 new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode);
879 DBG_OPT_LEA3(op1, op2, env->irn, new_op);
881 if (get_ia32_op_type(op1) == ia32_SymConst) {
882 set_ia32_am_sc(new_op, get_ia32_id_cnst(op1));
883 sub_ia32_am_offs(new_op, get_ia32_cnst(op2));
886 add_ia32_am_offs(new_op, get_ia32_cnst(op1));
887 set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
888 set_ia32_am_sc_sign(new_op);
890 set_ia32_am_flavour(new_op, ia32_am_O);
894 set_ia32_am_support(new_op, ia32_am_Source);
895 set_ia32_op_type(new_op, ia32_AddrModeS);
897 /* Lea doesn't need a Proj */
901 /* This is expr - const */
902 new_op = gen_imm_Sub(env, expr_op, imm_op);
905 set_ia32_am_support(new_op, ia32_am_Dest);
908 /* This is a normal sub */
909 new_op = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, op1, op2, nomem, mode);
912 set_ia32_am_support(new_op, ia32_am_Full);
916 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
918 set_ia32_res_mode(new_op, mode);
926 * Generates an ia32 DivMod with additional infrastructure for the
927 * register allocator if needed.
929 * @param env The transformation environment
930 * @param dividend -no comment- :)
931 * @param divisor -no comment- :)
932 * @param dm_flav flavour_Div/Mod/DivMod
933 * @return The created ia32 DivMod node
935 static ir_node *generate_DivMod(ia32_transform_env_t *env, ir_node *dividend, ir_node *divisor, ia32_op_flavour_t dm_flav) {
936 ir_node *res, *proj_div, *proj_mod;
937 ir_node *edx_node, *cltd;
939 dbg_info *dbg = env->dbg;
940 ir_graph *irg = env->irg;
941 ir_node *block = env->block;
942 ir_mode *mode = env->mode;
943 ir_node *irn = env->irn;
945 ir_node *projs[pn_DivMod_max];
947 ia32_collect_Projs(irn, projs, pn_DivMod_max);
951 mem = get_Div_mem(irn);
952 mode = get_irn_mode(get_proj_for_pn(irn, pn_Div_res));
955 mem = get_Mod_mem(irn);
956 mode = get_irn_mode(get_proj_for_pn(irn, pn_Mod_res));
959 mem = get_DivMod_mem(irn);
960 proj_div = get_proj_for_pn(irn, pn_DivMod_res_div);
961 proj_mod = get_proj_for_pn(irn, pn_DivMod_res_mod);
962 mode = proj_div ? get_irn_mode(proj_div) : get_irn_mode(proj_mod);
968 if (mode_is_signed(mode)) {
969 /* in signed mode, we need to sign extend the dividend */
970 cltd = new_rd_ia32_Cdq(dbg, irg, block, dividend);
971 dividend = new_rd_Proj(dbg, irg, block, cltd, mode_Is, pn_ia32_Cdq_EAX);
972 edx_node = new_rd_Proj(dbg, irg, block, cltd, mode_Is, pn_ia32_Cdq_EDX);
975 edx_node = new_rd_ia32_Const(dbg, irg, block, mode_Iu);
976 add_irn_dep(edx_node, be_abi_get_start_barrier(env->cg->birg->abi));
977 set_ia32_Const_type(edx_node, ia32_Const);
978 set_ia32_Immop_tarval(edx_node, get_tarval_null(mode_Iu));
981 res = new_rd_ia32_DivMod(dbg, irg, block, dividend, divisor, edx_node, mem, dm_flav);
982 set_ia32_n_res(res, 2);
984 /* Only one proj is used -> We must add a second proj and */
985 /* connect this one to a Keep node to eat up the second */
986 /* destroyed register. */
987 /* We also renumber the Firm projs into ia32 projs. */
989 switch (get_irn_opcode(irn)) {
991 ia32_renumber_Proj(projs, pn_Div_M, pn_ia32_DivMod_M);
992 ia32_renumber_Proj(projs, pn_Div_res, pn_ia32_DivMod_div_res);
993 /* add Proj-Keep for mod res */
994 in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode, pn_ia32_DivMod_mod_res);
995 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep);
998 ia32_renumber_Proj(projs, pn_Mod_M, pn_ia32_DivMod_M);
999 ia32_renumber_Proj(projs, pn_Mod_res, pn_ia32_DivMod_mod_res);
1000 /* add Proj-Keep for div res */
1001 in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode, pn_ia32_DivMod_div_res);
1002 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep);
1005 /* check, which Proj-Keep, we need to add */
1006 proj_div = get_proj_for_pn(irn, pn_DivMod_res_div);
1007 proj_mod = get_proj_for_pn(irn, pn_DivMod_res_mod);
1009 /* BEWARE: renumber after getting original projs */
1010 ia32_renumber_Proj(projs, pn_DivMod_M, pn_ia32_DivMod_M);
1012 if (proj_div && proj_mod) {
1013 /* we have both results used: simply renumber */
1014 ia32_renumber_Proj(projs, pn_DivMod_res_div, pn_ia32_DivMod_div_res);
1015 ia32_renumber_Proj(projs, pn_DivMod_res_mod, pn_ia32_DivMod_mod_res);
1017 else if (! proj_div && ! proj_mod) {
1018 assert(0 && "Missing DivMod result proj");
1020 else if (! proj_div) {
1021 /* We have only mod result: add div res Proj-Keep */
1022 ia32_renumber_Proj(projs, pn_DivMod_res_mod, pn_ia32_DivMod_mod_res);
1023 in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode, pn_ia32_DivMod_div_res);
1024 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep);
1027 /* We have only div result: add mod res Proj-Keep */
1028 ia32_renumber_Proj(projs, pn_DivMod_res_div, pn_ia32_DivMod_div_res);
1029 in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode, pn_ia32_DivMod_mod_res);
1030 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep);
1034 assert(0 && "Div, Mod, or DivMod expected.");
1038 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn));
1039 set_ia32_res_mode(res, mode);
1046 * Wrapper for generate_DivMod. Sets flavour_Mod.
1048 * @param env The transformation environment
1050 static ir_node *gen_Mod(ia32_transform_env_t *env) {
1051 return generate_DivMod(env, get_Mod_left(env->irn), get_Mod_right(env->irn), flavour_Mod);
1055 * Wrapper for generate_DivMod. Sets flavour_Div.
1057 * @param env The transformation environment
1059 static ir_node *gen_Div(ia32_transform_env_t *env) {
1060 return generate_DivMod(env, get_Div_left(env->irn), get_Div_right(env->irn), flavour_Div);
1064 * Wrapper for generate_DivMod. Sets flavour_DivMod.
1066 static ir_node *gen_DivMod(ia32_transform_env_t *env) {
1067 return generate_DivMod(env, get_DivMod_left(env->irn), get_DivMod_right(env->irn), flavour_DivMod);
1073 * Creates an ia32 floating Div.
1075 * @param env The transformation environment
1076 * @return The created ia32 xDiv node
1078 static ir_node *gen_Quot(ia32_transform_env_t *env) {
1079 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1081 ir_node *nomem = new_rd_NoMem(env->irg);
1082 ir_node *op1 = get_Quot_left(env->irn);
1083 ir_node *op2 = get_Quot_right(env->irn);
1084 ir_mode *mode = get_irn_mode(get_proj_for_pn(env->irn, pn_Quot_res));
1085 ir_node *projs[pn_Quot_max];
1086 /* BEWARE: Projs will be renumbered, so retrieve res Proj here */
1088 ia32_collect_Projs(env->irn, projs, pn_Quot_max);
1091 if (USE_SSE2(env->cg)) {
1092 if (is_ia32_xConst(op2)) {
1093 new_op = new_rd_ia32_xDiv(env->dbg, env->irg, env->block, noreg, noreg, op1, noreg, nomem);
1094 set_ia32_am_support(new_op, ia32_am_None);
1095 set_ia32_Immop_attr(new_op, op2);
1098 new_op = new_rd_ia32_xDiv(env->dbg, env->irg, env->block, noreg, noreg, op1, op2, nomem);
1099 set_ia32_am_support(new_op, ia32_am_Source);
1101 ia32_renumber_Proj(projs, pn_Quot_M, pn_ia32_xDiv_M);
1102 ia32_renumber_Proj(projs, pn_Quot_res, pn_ia32_xDiv_res);
1105 new_op = new_rd_ia32_vfdiv(env->dbg, env->irg, env->block, noreg, noreg, op1, op2, nomem);
1106 set_ia32_am_support(new_op, ia32_am_Source);
1107 ia32_renumber_Proj(projs, pn_Quot_M, pn_ia32_vfdiv_M);
1108 ia32_renumber_Proj(projs, pn_Quot_res, pn_ia32_vfdiv_res);
1110 set_ia32_res_mode(new_op, mode);
1111 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
1118 * Creates an ia32 Shl.
1120 * @param env The transformation environment
1121 * @return The created ia32 Shl node
1123 static ir_node *gen_Shl(ia32_transform_env_t *env) {
1124 return gen_shift_binop(env, get_Shl_left(env->irn), get_Shl_right(env->irn), new_rd_ia32_Shl);
1130 * Creates an ia32 Shr.
1132 * @param env The transformation environment
1133 * @return The created ia32 Shr node
1135 static ir_node *gen_Shr(ia32_transform_env_t *env) {
1136 return gen_shift_binop(env, get_Shr_left(env->irn), get_Shr_right(env->irn), new_rd_ia32_Shr);
1142 * Creates an ia32 Shrs.
1144 * @param env The transformation environment
1145 * @return The created ia32 Shrs node
1147 static ir_node *gen_Shrs(ia32_transform_env_t *env) {
1148 return gen_shift_binop(env, get_Shrs_left(env->irn), get_Shrs_right(env->irn), new_rd_ia32_Shrs);
1154 * Creates an ia32 RotL.
1156 * @param env The transformation environment
1157 * @param op1 The first operator
1158 * @param op2 The second operator
1159 * @return The created ia32 RotL node
1161 static ir_node *gen_RotL(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
1162 return gen_shift_binop(env, op1, op2, new_rd_ia32_RotL);
1168 * Creates an ia32 RotR.
1169 * NOTE: There is no RotR with immediate because this would always be a RotL
1170 * "imm-mode_size_bits" which can be pre-calculated.
1172 * @param env The transformation environment
1173 * @param op1 The first operator
1174 * @param op2 The second operator
1175 * @return The created ia32 RotR node
1177 static ir_node *gen_RotR(ia32_transform_env_t *env, ir_node *op1, ir_node *op2) {
1178 return gen_shift_binop(env, op1, op2, new_rd_ia32_RotR);
1184 * Creates an ia32 RotR or RotL (depending on the found pattern).
1186 * @param env The transformation environment
1187 * @return The created ia32 RotL or RotR node
1189 static ir_node *gen_Rot(ia32_transform_env_t *env) {
1190 ir_node *rotate = NULL;
1191 ir_node *op1 = get_Rot_left(env->irn);
1192 ir_node *op2 = get_Rot_right(env->irn);
1194 /* Firm has only Rot (which is a RotL), so we are looking for a right (op2)
1195 operand "-e+mode_size_bits" (it's an already modified "mode_size_bits-e",
1196 that means we can create a RotR instead of an Add and a RotL */
1199 ir_node *pred = get_Proj_pred(op2);
1201 if (is_ia32_Add(pred)) {
1202 ir_node *pred_pred = get_irn_n(pred, 2);
1203 tarval *tv = get_ia32_Immop_tarval(pred);
1204 long bits = get_mode_size_bits(env->mode);
1206 if (is_Proj(pred_pred)) {
1207 pred_pred = get_Proj_pred(pred_pred);
1210 if (is_ia32_Minus(pred_pred) &&
1211 tarval_is_long(tv) &&
1212 get_tarval_long(tv) == bits)
1214 DB((env->mod, LEVEL_1, "RotL into RotR ... "));
1215 rotate = gen_RotR(env, op1, get_irn_n(pred_pred, 2));
1222 rotate = gen_RotL(env, op1, op2);
1231 * Transforms a Minus node.
1233 * @param env The transformation environment
1234 * @param op The Minus operand
1235 * @return The created ia32 Minus node
1237 ir_node *gen_Minus_ex(ia32_transform_env_t *env, ir_node *op) {
1240 ir_mode *mode = env->mode;
1243 if (mode_is_float(mode)) {
1245 if (USE_SSE2(env->cg)) {
1246 ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg);
1247 ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg);
1248 ir_node *nomem = new_rd_NoMem(env->irg);
1250 new_op = new_rd_ia32_xEor(env->dbg, env->irg, env->block, noreg_gp, noreg_gp, op, noreg_fp, nomem, mode);
1252 size = get_mode_size_bits(mode);
1253 name = gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN);
1255 set_ia32_am_sc(new_op, name);
1257 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
1259 set_ia32_res_mode(new_op, mode);
1260 set_ia32_op_type(new_op, ia32_AddrModeS);
1261 set_ia32_ls_mode(new_op, mode);
1264 new_op = new_rd_ia32_vfchs(env->dbg, env->irg, env->block, op, mode);
1265 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
1269 new_op = gen_unop(env, op, new_rd_ia32_Minus);
1276 * Transforms a Minus node.
1278 * @param env The transformation environment
1279 * @return The created ia32 Minus node
1281 static ir_node *gen_Minus(ia32_transform_env_t *env) {
1282 return gen_Minus_ex(env, get_Minus_op(env->irn));
1287 * Transforms a Not node.
1289 * @param env The transformation environment
1290 * @return The created ia32 Not node
1292 static ir_node *gen_Not(ia32_transform_env_t *env) {
1293 assert (! mode_is_float(env->mode));
1294 return gen_unop(env, get_Not_op(env->irn), new_rd_ia32_Not);
1300 * Transforms an Abs node.
1302 * @param env The transformation environment
1303 * @return The created ia32 Abs node
1305 static ir_node *gen_Abs(ia32_transform_env_t *env) {
1306 ir_node *res, *p_eax, *p_edx;
1307 dbg_info *dbg = env->dbg;
1308 ir_mode *mode = env->mode;
1309 ir_graph *irg = env->irg;
1310 ir_node *block = env->block;
1311 ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg);
1312 ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg);
1313 ir_node *nomem = new_NoMem();
1314 ir_node *op = get_Abs_op(env->irn);
1318 if (mode_is_float(mode)) {
1320 if (USE_SSE2(env->cg)) {
1321 res = new_rd_ia32_xAnd(dbg,irg, block, noreg_gp, noreg_gp, op, noreg_fp, nomem, mode);
1323 size = get_mode_size_bits(mode);
1324 name = gen_fp_known_const(size == 32 ? ia32_SABS : ia32_DABS);
1326 set_ia32_am_sc(res, name);
1328 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn));
1330 set_ia32_res_mode(res, mode);
1331 set_ia32_op_type(res, ia32_AddrModeS);
1332 set_ia32_ls_mode(res, env->mode);
1335 res = new_rd_ia32_vfabs(dbg, irg, block, op, mode);
1336 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn));
1340 // Matze: does this work with other modes? I'm not sure...
1341 // someone should check and remove this assert then
1342 assert(get_mode_size_bits(mode) == 32);
1344 res = new_rd_ia32_Cdq(dbg, irg, block, op);
1345 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn));
1346 set_ia32_res_mode(res, mode);
1348 p_eax = new_rd_Proj(dbg, irg, block, res, mode, pn_EAX);
1349 p_edx = new_rd_Proj(dbg, irg, block, res, mode, pn_EDX);
1351 res = new_rd_ia32_Eor(dbg, irg, block, noreg_gp, noreg_gp, p_eax, p_edx, nomem, mode);
1352 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn));
1353 set_ia32_res_mode(res, mode);
1355 res = new_rd_ia32_Sub(dbg, irg, block, noreg_gp, noreg_gp, res, p_edx, nomem, mode);
1356 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn));
1357 set_ia32_res_mode(res, mode);
1366 * Transforms a Load.
1368 * @param env The transformation environment
1369 * @return the created ia32 Load node
1371 static ir_node *gen_Load(ia32_transform_env_t *env) {
1372 ir_node *node = env->irn;
1373 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1374 ir_node *ptr = get_Load_ptr(node);
1375 ir_node *lptr = ptr;
1376 ir_mode *mode = get_Load_mode(node);
1379 ia32_am_flavour_t am_flav = ia32_am_B;
1380 ir_node *projs[pn_Load_max];
1382 ia32_collect_Projs(env->irn, projs, pn_Load_max);
1385 check for special case: the loaded value might not be used (optimized, volatile, ...)
1386 we add a Proj + Keep for volatile loads and ignore all other cases
1388 if (! get_proj_for_pn(node, pn_Load_res) && get_Load_volatility(node) == volatility_is_volatile) {
1389 /* add a result proj and a Keep to produce a pseudo use */
1390 ir_node *proj = new_r_Proj(env->irg, env->block, node, mode, pn_ia32_Load_res);
1391 be_new_Keep(arch_get_irn_reg_class(env->cg->arch_env, proj, -1), env->irg, env->block, 1, &proj);
1394 /* address might be a constant (symconst or absolute address) */
1395 if (is_ia32_Const(ptr)) {
1400 if (mode_is_float(mode)) {
1402 if (USE_SSE2(env->cg)) {
1403 new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node));
1404 ia32_renumber_Proj(projs, pn_Load_M, pn_ia32_xLoad_M);
1405 ia32_renumber_Proj(projs, pn_Load_res, pn_ia32_xLoad_res);
1408 new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node));
1409 ia32_renumber_Proj(projs, pn_Load_M, pn_ia32_vfld_M);
1410 ia32_renumber_Proj(projs, pn_Load_res, pn_ia32_vfld_res);
1414 new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node));
1415 ia32_renumber_Proj(projs, pn_Load_M, pn_ia32_Load_M);
1416 ia32_renumber_Proj(projs, pn_Load_res, pn_ia32_Load_res);
1419 /* base is an constant address */
1421 if (get_ia32_op_type(ptr) == ia32_SymConst) {
1422 set_ia32_am_sc(new_op, get_ia32_id_cnst(ptr));
1423 am_flav = ia32_am_N;
1426 add_ia32_am_offs(new_op, get_ia32_cnst(ptr));
1427 am_flav = ia32_am_O;
1429 /* add dependency to barrier, if we are in start block */
1430 if (get_irg_start_block(env->irg) == env->block)
1431 add_irn_dep(new_op, be_abi_get_start_barrier(env->cg->birg->abi));
1434 set_ia32_am_support(new_op, ia32_am_Source);
1435 set_ia32_op_type(new_op, ia32_AddrModeS);
1436 set_ia32_am_flavour(new_op, am_flav);
1437 set_ia32_ls_mode(new_op, mode);
1439 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
1447 * Transforms a Store.
1449 * @param env The transformation environment
1450 * @return the created ia32 Store node
1452 static ir_node *gen_Store(ia32_transform_env_t *env) {
1453 ir_node *node = env->irn;
1454 ir_graph *irg = env->irg;
1455 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1456 ir_node *val = get_Store_value(node);
1457 ir_node *ptr = get_Store_ptr(node);
1458 ir_node *sptr = ptr;
1459 ir_node *mem = get_Store_mem(node);
1460 ir_mode *mode = get_irn_mode(val);
1461 ir_node *sval = val;
1464 ia32_am_flavour_t am_flav = ia32_am_B;
1465 ia32_immop_type_t immop = ia32_ImmNone;
1467 if (! mode_is_float(mode)) {
1468 /* in case of storing a const (but not a symconst) -> make it an attribute */
1469 if (is_ia32_Cnst(val)) {
1470 switch (get_ia32_op_type(val)) {
1472 immop = ia32_ImmConst;
1475 immop = ia32_ImmSymConst;
1478 assert(0 && "unsupported Const type");
1484 /* address might be a constant (symconst or absolute address) */
1485 if (is_ia32_Const(ptr)) {
1490 if (mode_is_float(mode)) {
1492 if (USE_SSE2(env->cg)) {
1493 new_op = new_rd_ia32_xStore(env->dbg, irg, env->block, sptr, noreg, sval, mem);
1496 new_op = new_rd_ia32_vfst(env->dbg, irg, env->block, sptr, noreg, sval, mem);
1499 else if (get_mode_size_bits(mode) == 8) {
1500 new_op = new_rd_ia32_Store8Bit(env->dbg, irg, env->block, sptr, noreg, sval, mem);
1503 new_op = new_rd_ia32_Store(env->dbg, irg, env->block, sptr, noreg, sval, mem);
1506 /* stored const is an attribute (saves a register) */
1507 if (! mode_is_float(mode) && is_ia32_Cnst(val)) {
1508 set_ia32_Immop_attr(new_op, val);
1511 /* base is an constant address */
1513 if (get_ia32_op_type(ptr) == ia32_SymConst) {
1514 set_ia32_am_sc(new_op, get_ia32_id_cnst(ptr));
1515 am_flav = ia32_am_N;
1518 add_ia32_am_offs(new_op, get_ia32_cnst(ptr));
1519 am_flav = ia32_am_O;
1523 set_ia32_am_support(new_op, ia32_am_Dest);
1524 set_ia32_op_type(new_op, ia32_AddrModeD);
1525 set_ia32_am_flavour(new_op, am_flav);
1526 set_ia32_ls_mode(new_op, mode);
1527 set_ia32_immop_type(new_op, immop);
1529 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
1537 * Transforms a Cond -> Proj[b] -> Cmp into a CondJmp, CondJmp_i or TestJmp
1539 * @param env The transformation environment
1540 * @return The transformed node.
1542 static ir_node *gen_Cond(ia32_transform_env_t *env) {
1543 dbg_info *dbg = env->dbg;
1544 ir_graph *irg = env->irg;
1545 ir_node *block = env->block;
1546 ir_node *node = env->irn;
1547 ir_node *sel = get_Cond_selector(node);
1548 ir_mode *sel_mode = get_irn_mode(sel);
1549 ir_node *res = NULL;
1550 ir_node *pred = NULL;
1551 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1552 ir_node *cmp_a, *cmp_b, *cnst, *expr;
1554 if (is_Proj(sel) && sel_mode == mode_b) {
1555 ir_node *nomem = new_NoMem();
1556 pn_Cmp pnc = get_Proj_proj(sel);
1558 pred = get_Proj_pred(sel);
1560 /* get both compare operators */
1561 cmp_a = get_Cmp_left(pred);
1562 cmp_b = get_Cmp_right(pred);
1564 /* check if we can use a CondJmp with immediate */
1565 cnst = (env->cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(cmp_a, cmp_b) : NULL;
1566 expr = get_expr_op(cmp_a, cmp_b);
1569 /* immop has to be the right operand, we might need to flip pnc */
1571 pnc = get_inversed_pnc(pnc);
1574 if ((pnc == pn_Cmp_Eq || pnc == pn_Cmp_Lg) && mode_is_int(get_irn_mode(expr))) {
1575 if (get_ia32_op_type(cnst) == ia32_Const &&
1576 classify_tarval(get_ia32_Immop_tarval(cnst)) == TV_CLASSIFY_NULL)
1578 /* a Cmp A =/!= 0 */
1579 ir_node *op1 = expr;
1580 ir_node *op2 = expr;
1581 ir_node *and = skip_Proj(expr);
1582 const char *cnst = NULL;
1584 /* check, if expr is an only once used And operation */
1585 if (get_irn_n_edges(expr) == 1 && is_ia32_And(and)) {
1586 op1 = get_irn_n(and, 2);
1587 op2 = get_irn_n(and, 3);
1589 cnst = (is_ia32_ImmConst(and) || is_ia32_ImmSymConst(and)) ? get_ia32_cnst(and) : NULL;
1591 res = new_rd_ia32_TestJmp(dbg, irg, block, op1, op2);
1592 set_ia32_pncode(res, pnc);
1593 set_ia32_res_mode(res, get_irn_mode(op1));
1596 copy_ia32_Immop_attr(res, and);
1599 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn));
1604 if (mode_is_float(get_irn_mode(expr))) {
1606 if (USE_SSE2(env->cg))
1607 res = new_rd_ia32_xCondJmp(dbg, irg, block, noreg, noreg, expr, noreg, nomem);
1613 res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, expr, noreg, nomem);
1615 set_ia32_Immop_attr(res, cnst);
1616 set_ia32_res_mode(res, get_irn_mode(expr));
1619 if (mode_is_float(get_irn_mode(cmp_a))) {
1621 if (USE_SSE2(env->cg))
1622 res = new_rd_ia32_xCondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem);
1625 res = new_rd_ia32_vfCondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem);
1626 proj_eax = new_r_Proj(irg, block, res, mode_Is, pn_ia32_vfCondJmp_temp_reg_eax);
1627 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, &proj_eax);
1631 res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem);
1632 set_ia32_commutative(res);
1634 set_ia32_res_mode(res, get_irn_mode(cmp_a));
1637 set_ia32_pncode(res, pnc);
1638 //set_ia32_am_support(res, ia32_am_Source);
1641 /* determine the smallest switch case value */
1642 int switch_min = INT_MAX;
1643 const ir_edge_t *edge;
1646 foreach_out_edge(node, edge) {
1647 int pn = get_Proj_proj(get_edge_src_irn(edge));
1648 switch_min = pn < switch_min ? pn : switch_min;
1652 /* if smallest switch case is not 0 we need an additional sub */
1653 snprintf(buf, sizeof(buf), "%d", switch_min);
1654 res = new_rd_ia32_Lea(dbg, irg, block, sel, noreg, mode_Is);
1655 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn));
1656 sub_ia32_am_offs(res, buf);
1657 set_ia32_am_flavour(res, ia32_am_OB);
1658 set_ia32_am_support(res, ia32_am_Source);
1659 set_ia32_op_type(res, ia32_AddrModeS);
1662 res = new_rd_ia32_SwitchJmp(dbg, irg, block, switch_min ? res : sel, mode_T);
1663 set_ia32_pncode(res, get_Cond_defaultProj(node));
1664 set_ia32_res_mode(res, get_irn_mode(sel));
1667 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn));
1674 * Transforms a CopyB node.
1676 * @param env The transformation environment
1677 * @return The transformed node.
1679 static ir_node *gen_CopyB(ia32_transform_env_t *env) {
1680 ir_node *res = NULL;
1681 dbg_info *dbg = env->dbg;
1682 ir_graph *irg = env->irg;
1683 ir_node *block = env->block;
1684 ir_node *node = env->irn;
1685 ir_node *src = get_CopyB_src(node);
1686 ir_node *dst = get_CopyB_dst(node);
1687 ir_node *mem = get_CopyB_mem(node);
1688 int size = get_type_size_bytes(get_CopyB_type(node));
1689 ir_mode *dst_mode = get_irn_mode(dst);
1690 ir_mode *src_mode = get_irn_mode(src);
1693 ir_node *projs[pn_CopyB_max];
1695 ia32_collect_Projs(env->irn, projs, pn_CopyB_max);
1697 /* If we have to copy more than 32 bytes, we use REP MOVSx and */
1698 /* then we need the size explicitly in ECX. */
1699 if (size >= 32 * 4) {
1700 rem = size & 0x3; /* size % 4 */
1703 res = new_rd_ia32_Const(dbg, irg, block, mode_Is);
1704 add_irn_dep(res, be_abi_get_start_barrier(env->cg->birg->abi));
1705 set_ia32_op_type(res, ia32_Const);
1706 set_ia32_Immop_tarval(res, new_tarval_from_long(size, mode_Is));
1708 res = new_rd_ia32_CopyB(dbg, irg, block, dst, src, res, mem);
1709 set_ia32_Immop_tarval(res, new_tarval_from_long(rem, mode_Is));
1711 /* ok: now attach Proj's because rep movsd will destroy esi, edi and ecx */
1712 in[0] = new_r_Proj(irg, block, res, dst_mode, pn_ia32_CopyB_DST);
1713 in[1] = new_r_Proj(irg, block, res, src_mode, pn_ia32_CopyB_SRC);
1714 in[2] = new_r_Proj(irg, block, res, mode_Is, pn_ia32_CopyB_CNT);
1715 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 3, in);
1717 ia32_renumber_Proj(projs, pn_CopyB_M_regular, pn_ia32_CopyB_M);
1720 res = new_rd_ia32_CopyB_i(dbg, irg, block, dst, src, mem);
1721 set_ia32_Immop_tarval(res, new_tarval_from_long(size, mode_Is));
1722 set_ia32_immop_type(res, ia32_ImmConst);
1724 /* ok: now attach Proj's because movsd will destroy esi and edi */
1725 in[0] = new_r_Proj(irg, block, res, dst_mode, pn_ia32_CopyB_i_DST);
1726 in[1] = new_r_Proj(irg, block, res, src_mode, pn_ia32_CopyB_i_SRC);
1727 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 2, in);
1729 ia32_renumber_Proj(projs, pn_CopyB_M_regular, pn_ia32_CopyB_i_M);
1732 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn));
1740 * Transforms a Mux node into CMov.
1742 * @param env The transformation environment
1743 * @return The transformed node.
1745 static ir_node *gen_Mux(ia32_transform_env_t *env) {
1747 ir_node *node = env->irn;
1748 ir_node *new_op = new_rd_ia32_CMov(env->dbg, env->irg, env->block, \
1749 get_Mux_sel(node), get_Mux_false(node), get_Mux_true(node), env->mode);
1751 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
1758 typedef ir_node *cmov_func_t(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *cmp_a, ir_node *cmp_b, \
1759 ir_node *psi_true, ir_node *psi_default, ir_mode *mode);
1762 * Transforms a Psi node into CMov.
1764 * @param env The transformation environment
1765 * @return The transformed node.
1767 static ir_node *gen_Psi(ia32_transform_env_t *env) {
1768 ia32_code_gen_t *cg = env->cg;
1769 dbg_info *dbg = env->dbg;
1770 ir_graph *irg = env->irg;
1771 ir_mode *mode = env->mode;
1772 ir_node *block = env->block;
1773 ir_node *node = env->irn;
1774 ir_node *cmp_proj = get_Mux_sel(node);
1775 ir_node *psi_true = get_Psi_val(node, 0);
1776 ir_node *psi_default = get_Psi_default(node);
1777 ir_node *noreg = ia32_new_NoReg_gp(cg);
1778 ir_node *nomem = new_rd_NoMem(irg);
1779 ir_node *cmp, *cmp_a, *cmp_b, *and1, *and2, *new_op = NULL;
1782 assert(get_irn_mode(cmp_proj) == mode_b && "Condition for Psi must have mode_b");
1784 cmp = get_Proj_pred(cmp_proj);
1785 cmp_a = get_Cmp_left(cmp);
1786 cmp_b = get_Cmp_right(cmp);
1787 pnc = get_Proj_proj(cmp_proj);
1789 if (mode_is_float(mode)) {
1790 /* floating point psi */
1793 /* 1st case: compare operands are float too */
1795 /* psi(cmp(a, b), t, f) can be done as: */
1796 /* tmp = cmp a, b */
1797 /* tmp2 = t and tmp */
1798 /* tmp3 = f and not tmp */
1799 /* res = tmp2 or tmp3 */
1801 /* in case the compare operands are int, we move them into xmm register */
1802 if (! mode_is_float(get_irn_mode(cmp_a))) {
1803 cmp_a = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_a, node, mode_D);
1804 cmp_b = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_b, node, mode_D);
1806 pnc |= 8; /* transform integer compare to fp compare */
1809 new_op = new_rd_ia32_xCmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem, mode);
1810 set_ia32_pncode(new_op, pnc);
1811 set_ia32_am_support(new_op, ia32_am_Source);
1812 set_ia32_res_mode(new_op, mode);
1813 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
1815 and1 = new_rd_ia32_xAnd(dbg, irg, block, noreg, noreg, psi_true, new_op, nomem, mode);
1816 set_ia32_am_support(and1, ia32_am_None);
1817 set_ia32_res_mode(and1, mode);
1818 set_ia32_commutative(and1);
1819 SET_IA32_ORIG_NODE(and1, ia32_get_old_node_name(cg, node));
1821 and2 = new_rd_ia32_xAndNot(dbg, irg, block, noreg, noreg, new_op, psi_default, nomem, mode);
1822 set_ia32_am_support(and2, ia32_am_None);
1823 set_ia32_res_mode(and2, mode);
1824 set_ia32_commutative(and2);
1825 SET_IA32_ORIG_NODE(and2, ia32_get_old_node_name(cg, node));
1827 new_op = new_rd_ia32_xOr(dbg, irg, block, noreg, noreg, and1, and2, nomem, mode);
1828 set_ia32_am_support(new_op, ia32_am_None);
1829 set_ia32_res_mode(new_op, mode);
1830 set_ia32_commutative(new_op);
1831 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
1835 new_op = new_rd_ia32_vfCMov(dbg, irg, block, cmp_a, cmp_b, psi_true, psi_default, mode);
1836 set_ia32_pncode(new_op, pnc);
1837 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
1842 construct_binop_func *set_func = NULL;
1843 cmov_func_t *cmov_func = NULL;
1845 if (mode_is_float(get_irn_mode(cmp_a))) {
1846 /* 1st case: compare operands are floats */
1851 set_func = new_rd_ia32_xCmpSet;
1852 cmov_func = new_rd_ia32_xCmpCMov;
1856 set_func = new_rd_ia32_vfCmpSet;
1857 cmov_func = new_rd_ia32_vfCmpCMov;
1860 pnc &= 7; /* fp compare -> int compare */
1863 /* 2nd case: compare operand are integer too */
1864 set_func = new_rd_ia32_CmpSet;
1865 cmov_func = new_rd_ia32_CmpCMov;
1868 /* create the nodes */
1870 /* check for special case first: And/Or -- Cmp with 0 -- Psi */
1871 if (is_ia32_Const_0(cmp_b) && is_Proj(cmp_a) && (is_ia32_And(get_Proj_pred(cmp_a)) || is_ia32_Or(get_Proj_pred(cmp_a)))) {
1872 if (is_ia32_Const_1(psi_true) && is_ia32_Const_0(psi_default)) {
1873 /* first case for SETcc: default is 0, set to 1 iff condition is true */
1874 new_op = new_rd_ia32_PsiCondSet(dbg, irg, block, cmp_a, mode);
1875 set_ia32_pncode(new_op, pnc);
1877 else if (is_ia32_Const_0(psi_true) && is_ia32_Const_1(psi_default)) {
1878 /* second case for SETcc: default is 1, set to 0 iff condition is true: */
1879 /* we invert condition and set default to 0 */
1880 new_op = new_rd_ia32_PsiCondSet(dbg, irg, block, cmp_a, mode);
1881 set_ia32_pncode(new_op, get_inversed_pnc(pnc));
1884 /* otherwise: use CMOVcc */
1885 new_op = new_rd_ia32_PsiCondCMov(dbg, irg, block, cmp_a, psi_true, psi_default, mode);
1886 set_ia32_pncode(new_op, pnc);
1889 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
1893 if (is_ia32_Const_1(psi_true) && is_ia32_Const_0(psi_default)) {
1894 /* first case for SETcc: default is 0, set to 1 iff condition is true */
1895 new_op = gen_binop(env, cmp_a, cmp_b, set_func);
1896 set_ia32_pncode(new_op, pnc);
1897 set_ia32_am_support(new_op, ia32_am_Source);
1899 else if (is_ia32_Const_0(psi_true) && is_ia32_Const_1(psi_default)) {
1900 /* second case for SETcc: default is 1, set to 0 iff condition is true: */
1901 /* we invert condition and set default to 0 */
1902 new_op = gen_binop(env, cmp_a, cmp_b, set_func);
1903 set_ia32_pncode(new_op, get_inversed_pnc(pnc));
1904 set_ia32_am_support(new_op, ia32_am_Source);
1907 /* otherwise: use CMOVcc */
1908 new_op = cmov_func(dbg, irg, block, cmp_a, cmp_b, psi_true, psi_default, mode);
1909 set_ia32_pncode(new_op, pnc);
1910 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
1920 * Following conversion rules apply:
1924 * 1) n bit -> m bit n > m (downscale)
1925 * a) target is signed: movsx
1926 * b) target is unsigned: and with lower bits sets
1927 * 2) n bit -> m bit n == m (sign change)
1929 * 3) n bit -> m bit n < m (upscale)
1930 * a) source is signed: movsx
1931 * b) source is unsigned: and with lower bits sets
1935 * SSE(1/2) convert to float or double (cvtsi2ss/sd)
1939 * SSE(1/2) convert from float or double to 32bit int (cvtss/sd2si)
1940 * if target mode < 32bit: additional INT -> INT conversion (see above)
1944 * SSE(1/2) convert from float or double to double or float (cvtss/sd2sd/ss)
1945 * x87 is mode_E internally, conversions happen only at load and store
1946 * in non-strict semantic
1950 * Create a conversion from x87 state register to general purpose.
1952 static ir_node *gen_x87_fp_to_gp(ia32_transform_env_t *env, ir_mode *tgt_mode) {
1953 ia32_code_gen_t *cg = env->cg;
1954 ir_entity *ent = cg->fp_to_gp;
1955 ir_graph *irg = env->irg;
1956 ir_node *irn = env->irn;
1957 ir_node *block = env->block;
1958 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1959 ir_node *op = get_Conv_op(env->irn);
1960 ir_node *fist, *mem, *load;
1964 int size = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_vfp].mode);
1965 ent = cg->fp_to_gp =
1966 frame_alloc_area(get_irg_frame_type(env->irg), size, 16, 0);
1968 panic("Couldn't allocate space on stack for fp conversion");
1974 fist = new_rd_ia32_vfist(env->dbg, irg, block, get_irg_frame(irg), noreg, op, get_irg_no_mem(irg));
1976 set_ia32_frame_ent(fist, ent);
1977 set_ia32_use_frame(fist);
1978 set_ia32_am_support(fist, ia32_am_Dest);
1979 set_ia32_op_type(fist, ia32_AddrModeD);
1980 set_ia32_am_flavour(fist, ia32_B);
1981 set_ia32_ls_mode(fist, mode_F);
1982 SET_IA32_ORIG_NODE(fist, ia32_get_old_node_name(cg, irn));
1987 load = new_rd_ia32_Load(env->dbg, irg, block, get_irg_frame(irg), noreg, mem);
1989 set_ia32_frame_ent(load, ent);
1990 set_ia32_use_frame(load);
1991 set_ia32_am_support(load, ia32_am_Source);
1992 set_ia32_op_type(load, ia32_AddrModeS);
1993 set_ia32_am_flavour(load, ia32_B);
1994 set_ia32_ls_mode(load, tgt_mode);
1995 SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(cg, irn));
1997 return new_r_Proj(irg, block, load, tgt_mode, pn_ia32_Load_res);
2001 * Create a conversion from general purpose to x87 register
2003 static ir_node *gen_x87_gp_to_fp(ia32_transform_env_t *env, ir_mode *src_mode) {
2004 ia32_code_gen_t *cg = env->cg;
2005 ir_entity *ent = cg->gp_to_fp;
2006 ir_node *irn = env->irn;
2007 ir_graph *irg = env->irg;
2008 ir_node *block = env->block;
2009 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2010 ir_node *nomem = get_irg_no_mem(irg);
2011 ir_node *op = get_Conv_op(env->irn);
2012 ir_node *fild, *store;
2016 int size = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_gp].mode);
2017 ent = cg->gp_to_fp =
2018 frame_alloc_area(get_irg_frame_type(env->irg), size, size, 0);
2020 panic("Couldn't allocate space on stack for fp conversion");
2024 /* first convert to 32 bit */
2025 src_bits = get_mode_size_bits(src_mode);
2026 if (src_bits == 8) {
2027 op = new_rd_ia32_Conv_I2I8Bit(env->dbg, irg, block, noreg, noreg, op, nomem, mode_Is);
2028 set_ia32_am_support(op, ia32_am_Source);
2029 SET_IA32_ORIG_NODE(op, ia32_get_old_node_name(cg, irn));
2031 else if (src_bits < 32) {
2032 op = new_rd_ia32_Conv_I2I(env->dbg, irg, block, noreg, noreg, op, nomem, mode_Is);
2033 set_ia32_am_support(op, ia32_am_Source);
2034 SET_IA32_ORIG_NODE(op, ia32_get_old_node_name(cg, irn));
2038 store = new_rd_ia32_Store(env->dbg, irg, block, get_irg_frame(irg), noreg, op, nomem);
2040 set_ia32_frame_ent(store, ent);
2041 set_ia32_use_frame(store);
2042 set_ia32_am_support(store, ia32_am_Dest);
2043 set_ia32_op_type(store, ia32_AddrModeD);
2044 set_ia32_am_flavour(store, ia32_am_OB);
2045 set_ia32_ls_mode(store, mode_Is);
2048 fild = new_rd_ia32_vfild(env->dbg, irg, block, get_irg_frame(irg), noreg, store);
2050 set_ia32_frame_ent(fild, ent);
2051 set_ia32_use_frame(fild);
2052 set_ia32_am_support(fild, ia32_am_Source);
2053 set_ia32_op_type(fild, ia32_AddrModeS);
2054 set_ia32_am_flavour(fild, ia32_am_OB);
2055 set_ia32_ls_mode(fild, mode_Is);
2057 return new_r_Proj(irg, block, fild, mode_F, pn_ia32_vfild_res);
2061 * Transforms a Conv node.
2063 * @param env The transformation environment
2064 * @return The created ia32 Conv node
2066 static ir_node *gen_Conv(ia32_transform_env_t *env) {
2067 dbg_info *dbg = env->dbg;
2068 ir_graph *irg = env->irg;
2069 ir_node *op = get_Conv_op(env->irn);
2070 ir_mode *src_mode = get_irn_mode(op);
2071 ir_mode *tgt_mode = env->mode;
2072 int src_bits = get_mode_size_bits(src_mode);
2073 int tgt_bits = get_mode_size_bits(tgt_mode);
2075 ir_node *block = env->block;
2076 ir_node *new_op = NULL;
2077 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2078 ir_node *nomem = new_rd_NoMem(irg);
2079 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
2081 if (src_mode == tgt_mode) {
2082 /* this can happen when changing mode_P to mode_Is */
2083 DB((mod, LEVEL_1, "killed Conv(mode, mode) ..."));
2084 exchange(env->irn, op);
2086 else if (mode_is_float(src_mode)) {
2087 /* we convert from float ... */
2088 if (mode_is_float(tgt_mode)) {
2090 if (USE_SSE2(env->cg)) {
2091 DB((mod, LEVEL_1, "create Conv(float, float) ..."));
2092 new_op = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, op, nomem, tgt_mode);
2095 DB((mod, LEVEL_1, "killed Conv(float, float) ..."));
2097 remark: we create a intermediate conv here, so modes will be spread correctly
2098 these convs will be killed later
2100 new_op = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, op, nomem, tgt_mode);
2106 DB((mod, LEVEL_1, "create Conv(float, int) ..."));
2107 if (USE_SSE2(env->cg)) {
2108 new_op = new_rd_ia32_Conv_FP2I(dbg, irg, block, noreg, noreg, op, nomem, mode_Is);
2111 return gen_x87_fp_to_gp(env, mode_Is);
2113 /* if target mode is not int: add an additional downscale convert */
2114 if (tgt_mode != mode_Is) {
2115 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
2117 if (tgt_bits == 8 || src_bits == 8) {
2118 new_op = new_rd_ia32_Conv_I2I8Bit(dbg, irg, block, noreg, noreg, new_op, nomem, tgt_mode);
2121 new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, new_op, nomem, tgt_mode);
2128 /* we convert from int ... */
2129 if (mode_is_float(tgt_mode)) {
2132 DB((mod, LEVEL_1, "create Conv(int, float) ..."));
2133 if (USE_SSE2(env->cg)) {
2134 new_op = new_rd_ia32_Conv_I2FP(dbg, irg, block, noreg, noreg, op, nomem, tgt_mode);
2137 return gen_x87_gp_to_fp(env, src_mode);
2141 if (get_mode_size_bits(src_mode) == tgt_bits) {
2142 DB((mod, LEVEL_1, "omitting equal size Conv(%+F, %+F) ...", src_mode, tgt_mode));
2144 remark: we create a intermediate conv here, so modes will be spread correctly
2145 these convs will be killed later
2147 new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, op, nomem, tgt_mode);
2151 DB((mod, LEVEL_1, "create Conv(int, int) ...", src_mode, tgt_mode));
2152 if (tgt_bits == 8 || src_bits == 8) {
2153 new_op = new_rd_ia32_Conv_I2I8Bit(dbg, irg, block, noreg, noreg, op, nomem, tgt_mode);
2156 new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, op, nomem, tgt_mode);
2163 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
2165 if(tgt_bits >= src_bits)
2166 set_ia32_am_support(new_op, ia32_am_Source);
2169 nodeset_insert(env->cg->kill_conv, new_op);
2177 /********************************************
2180 * | |__ ___ _ __ ___ __| | ___ ___
2181 * | '_ \ / _ \ '_ \ / _ \ / _` |/ _ \/ __|
2182 * | |_) | __/ | | | (_) | (_| | __/\__ \
2183 * |_.__/ \___|_| |_|\___/ \__,_|\___||___/
2185 ********************************************/
2187 static ir_node *gen_be_StackParam(ia32_transform_env_t *env) {
2188 ir_node *new_op = NULL;
2189 ir_node *node = env->irn;
2190 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2191 ir_node *mem = new_rd_NoMem(env->irg);
2192 ir_node *ptr = get_irn_n(node, 0);
2193 ir_entity *ent = arch_get_frame_entity(env->cg->arch_env, node);
2194 ir_mode *mode = env->mode;
2197 if (mode_is_float(mode)) {
2199 if (USE_SSE2(env->cg)) {
2200 new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem);
2201 pn_res = pn_ia32_xLoad_res;
2204 new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem);
2205 pn_res = pn_ia32_vfld_res;
2209 new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem);
2210 pn_res = pn_ia32_Load_res;
2213 set_ia32_frame_ent(new_op, ent);
2214 set_ia32_use_frame(new_op);
2216 set_ia32_am_support(new_op, ia32_am_Source);
2217 set_ia32_op_type(new_op, ia32_AddrModeS);
2218 set_ia32_am_flavour(new_op, ia32_B);
2219 set_ia32_ls_mode(new_op, mode);
2220 set_ia32_flags(new_op, get_ia32_flags(new_op) | arch_irn_flags_rematerializable);
2222 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
2224 return new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_res);
2228 * Transforms a FrameAddr into an ia32 Add.
2230 static ir_node *gen_be_FrameAddr(ia32_transform_env_t *env) {
2231 ir_node *new_op = NULL;
2232 ir_node *node = env->irn;
2233 ir_node *op = get_irn_n(node, 0);
2234 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2235 ir_node *nomem = new_rd_NoMem(env->irg);
2236 ir_mode *mode = env->mode;
2238 new_op = new_rd_ia32_Add(env->dbg, env->irg, env->block, noreg, noreg, op, noreg, nomem, mode);
2239 set_ia32_frame_ent(new_op, arch_get_frame_entity(env->cg->arch_env, node));
2240 set_ia32_am_support(new_op, ia32_am_Full);
2241 set_ia32_use_frame(new_op);
2242 set_ia32_immop_type(new_op, ia32_ImmConst);
2243 set_ia32_commutative(new_op);
2245 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
2251 * Transforms a FrameLoad into an ia32 Load.
2253 static ir_node *gen_be_FrameLoad(ia32_transform_env_t *env) {
2254 ir_node *new_op = NULL;
2255 ir_node *node = env->irn;
2256 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2257 ir_node *mem = get_irn_n(node, 0);
2258 ir_node *ptr = get_irn_n(node, 1);
2259 ir_entity *ent = arch_get_frame_entity(env->cg->arch_env, node);
2260 ir_mode *mode = get_type_mode(get_entity_type(ent));
2261 ir_node *projs[pn_Load_max];
2263 ia32_collect_Projs(env->irn, projs, pn_Load_max);
2265 if (mode_is_float(mode)) {
2267 if (USE_SSE2(env->cg)) {
2268 new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem);
2269 ia32_renumber_Proj(projs, pn_Load_M, pn_ia32_xLoad_M);
2270 ia32_renumber_Proj(projs, pn_Load_res, pn_ia32_xLoad_res);
2273 new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem);
2274 ia32_renumber_Proj(projs, pn_Load_M, pn_ia32_vfld_M);
2275 ia32_renumber_Proj(projs, pn_Load_res, pn_ia32_vfld_res);
2279 new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem);
2280 ia32_renumber_Proj(projs, pn_Load_M, pn_ia32_Load_M);
2281 ia32_renumber_Proj(projs, pn_Load_res, pn_ia32_Load_res);
2284 set_ia32_frame_ent(new_op, ent);
2285 set_ia32_use_frame(new_op);
2287 set_ia32_am_support(new_op, ia32_am_Source);
2288 set_ia32_op_type(new_op, ia32_AddrModeS);
2289 set_ia32_am_flavour(new_op, ia32_B);
2290 set_ia32_ls_mode(new_op, mode);
2292 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
2299 * Transforms a FrameStore into an ia32 Store.
2301 static ir_node *gen_be_FrameStore(ia32_transform_env_t *env) {
2302 ir_node *new_op = NULL;
2303 ir_node *node = env->irn;
2304 ir_graph *irg = env->irg;
2305 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2306 ir_node *mem = get_irn_n(node, 0);
2307 ir_node *ptr = get_irn_n(node, 1);
2308 ir_node *val = get_irn_n(node, 2);
2309 ir_entity *ent = arch_get_frame_entity(env->cg->arch_env, node);
2310 ir_mode *mode = get_irn_mode(val);
2311 ir_node *projs[pn_Store_max];
2313 ia32_collect_Projs(env->irn, projs, pn_Store_max);
2315 if (mode_is_float(mode)) {
2317 if (USE_SSE2(env->cg)) {
2318 new_op = new_rd_ia32_xStore(env->dbg, irg, env->block, ptr, noreg, val, mem);
2321 new_op = new_rd_ia32_vfst(env->dbg, irg, env->block, ptr, noreg, val, mem);
2324 else if (get_mode_size_bits(mode) == 8) {
2325 new_op = new_rd_ia32_Store8Bit(env->dbg, irg, env->block, ptr, noreg, val, mem);
2328 new_op = new_rd_ia32_Store(env->dbg, irg, env->block, ptr, noreg, val, mem);
2331 set_ia32_frame_ent(new_op, ent);
2332 set_ia32_use_frame(new_op);
2334 set_ia32_am_support(new_op, ia32_am_Dest);
2335 set_ia32_op_type(new_op, ia32_AddrModeD);
2336 set_ia32_am_flavour(new_op, ia32_B);
2337 set_ia32_ls_mode(new_op, mode);
2339 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
2345 * In case SSE is used we need to copy the result from FPU TOS.
2347 static ir_node *gen_be_Call(ia32_transform_env_t *env) {
2348 ir_node *call_res = get_proj_for_pn(env->irn, pn_be_Call_first_res);
2349 ir_node *call_mem = get_proj_for_pn(env->irn, pn_be_Call_M_regular);
2351 ir_node *nomem = new_NoMem();
2352 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2354 if (! call_res || ! USE_SSE2(env->cg))
2357 mode = get_irn_mode(call_res);
2359 /* in case there is no memory output: create one to serialize the copy FPU -> SSE */
2361 call_mem = new_r_Proj(env->irg, env->block, env->irn, mode_M, pn_be_Call_M_regular);
2363 if (mode_is_float(mode)) {
2364 /* store st(0) onto stack */
2365 ir_node *frame = get_irg_frame(env->irg);
2366 ir_node *fstp = new_rd_ia32_GetST0(env->dbg, env->irg, env->block, frame, noreg, nomem);
2367 ir_entity *ent = frame_alloc_area(get_irg_frame_type(env->irg), get_mode_size_bytes(mode), 16, 0);
2368 ir_node *sse_load, *p, *bad, *keep;
2373 set_ia32_ls_mode(fstp, mode);
2374 set_ia32_op_type(fstp, ia32_AddrModeD);
2375 set_ia32_use_frame(fstp);
2376 set_ia32_frame_ent(fstp, ent);
2377 set_ia32_am_flavour(fstp, ia32_B);
2378 set_ia32_am_support(fstp, ia32_am_Dest);
2380 /* load into SSE register */
2381 sse_load = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, frame, ia32_new_NoReg_gp(env->cg), fstp);
2382 set_ia32_ls_mode(sse_load, mode);
2383 set_ia32_op_type(sse_load, ia32_AddrModeS);
2384 set_ia32_use_frame(sse_load);
2385 set_ia32_frame_ent(sse_load, ent);
2386 set_ia32_am_flavour(sse_load, ia32_B);
2387 set_ia32_am_support(sse_load, ia32_am_Source);
2388 mproj = new_r_Proj(env->irg, env->block, sse_load, mode_M, pn_ia32_xLoad_M);
2389 sse_load = new_r_Proj(env->irg, env->block, sse_load, mode, pn_ia32_xLoad_res);
2391 /* reroute all users of the result proj to the sse load */
2392 edges_reroute(call_res, sse_load, env->irg);
2393 edges_reroute_kind(call_res, sse_load, EDGE_KIND_DEP, env->irg);
2395 /* reroute all users of the old call memory to the sse load memory */
2396 edges_reroute(call_mem, mproj, env->irg);
2397 edges_reroute_kind(call_mem, mproj, EDGE_KIND_DEP, env->irg);
2399 /* now, we can set the old call mem as input of GetST0 */
2400 set_irn_n(fstp, 1, call_mem);
2402 /* now: create new Keep whith all former ins and one additional in - the result Proj */
2404 /* get a Proj representing a caller save register */
2405 p = get_proj_for_pn(env->irn, pn_be_Call_first_res + 1);
2406 assert(is_Proj(p) && "Proj expected.");
2408 /* user of the the proj is the Keep */
2409 p = get_edge_src_irn(get_irn_out_edge_first(p));
2410 assert(be_is_Keep(p) && "Keep expected.");
2412 /* copy in array of the old keep and set the result proj as additional in */
2413 keep_arity = get_irn_arity(p) + 1;
2414 NEW_ARR_A(ir_node *, in_keep, keep_arity);
2415 in_keep[keep_arity - 1] = call_res;
2416 for (i = 0; i < keep_arity - 1; ++i)
2417 in_keep[i] = get_irn_n(p, i);
2419 /* create new keep and set the in class requirements properly */
2420 keep = be_new_Keep(NULL, env->irg, env->block, keep_arity, in_keep);
2421 for(i = 0; i < keep_arity; ++i) {
2422 const arch_register_class_t *cls = arch_get_irn_reg_class(env->cg->arch_env, in_keep[i], -1);
2423 be_node_set_reg_class(keep, i, cls);
2426 /* kill the old keep */
2427 bad = get_irg_bad(env->irg);
2428 for (i = 0; i < keep_arity - 1; i++)
2429 set_irn_n(p, i, bad);
2430 remove_End_keepalive(get_irg_end(env->irg), p);
2437 * In case SSE is used we need to copy the result from XMM0 to FPU TOS before return.
2439 static ir_node *gen_be_Return(ia32_transform_env_t *env) {
2440 ir_node *ret_val = get_irn_n(env->irn, be_pos_Return_val);
2441 ir_node *ret_mem = get_irn_n(env->irn, be_pos_Return_mem);
2442 ir_entity *ent = get_irg_entity(get_irn_irg(ret_val));
2443 ir_type *tp = get_entity_type(ent);
2445 if (be_Return_get_n_rets(env->irn) < 1 || ! ret_val || ! USE_SSE2(env->cg))
2448 if (get_method_n_ress(tp) == 1) {
2449 ir_type *res_type = get_method_res_type(tp, 0);
2452 if (is_Primitive_type(res_type)) {
2453 mode = get_type_mode(res_type);
2454 if (mode_is_float(mode)) {
2457 ir_node *sse_store, *fld, *mproj, *barrier;
2458 int pn_ret_val = get_Proj_proj(ret_val);
2459 int pn_ret_mem = get_Proj_proj(ret_mem);
2461 /* get the Barrier */
2462 barrier = get_Proj_pred(ret_val);
2464 /* get result input of the Barrier */
2465 ret_val = get_irn_n(barrier, pn_ret_val);
2467 /* get memory input of the Barrier */
2468 ret_mem = get_irn_n(barrier, pn_ret_mem);
2470 frame = get_irg_frame(env->irg);
2471 ent = frame_alloc_area(get_irg_frame_type(env->irg), get_mode_size_bytes(mode), 16, 0);
2473 /* store xmm0 onto stack */
2474 sse_store = new_rd_ia32_xStoreSimple(env->dbg, env->irg, env->block, frame, ret_val, ret_mem);
2475 set_ia32_ls_mode(sse_store, mode);
2476 set_ia32_op_type(sse_store, ia32_AddrModeD);
2477 set_ia32_use_frame(sse_store);
2478 set_ia32_frame_ent(sse_store, ent);
2479 set_ia32_am_flavour(sse_store, ia32_B);
2480 set_ia32_am_support(sse_store, ia32_am_Dest);
2483 fld = new_rd_ia32_SetST0(env->dbg, env->irg, env->block, frame, sse_store);
2484 set_ia32_ls_mode(fld, mode);
2485 set_ia32_op_type(fld, ia32_AddrModeS);
2486 set_ia32_use_frame(fld);
2487 set_ia32_frame_ent(fld, ent);
2488 set_ia32_am_flavour(fld, ia32_B);
2489 set_ia32_am_support(fld, ia32_am_Source);
2490 mproj = new_r_Proj(env->irg, env->block, fld, mode_M, pn_ia32_SetST0_M);
2491 fld = new_r_Proj(env->irg, env->block, fld, mode, pn_ia32_SetST0_res);
2492 arch_set_irn_register(env->cg->arch_env, fld, &ia32_vfp_regs[REG_VF0]);
2494 /* set new return value */
2495 set_irn_n(barrier, pn_ret_val, fld);
2496 set_irn_n(barrier, pn_ret_mem, mproj);
2505 * Transform a be_AddSP into an ia32_AddSP. Eat up const sizes.
2507 static ir_node *gen_be_AddSP(ia32_transform_env_t *env) {
2509 const ir_edge_t *edge;
2510 ir_node *sz = get_irn_n(env->irn, be_pos_AddSP_size);
2511 ir_node *sp = get_irn_n(env->irn, be_pos_AddSP_old_sp);
2513 new_op = new_rd_ia32_AddSP(env->dbg, env->irg, env->block, sp, sz);
2515 if (is_ia32_Const(sz)) {
2516 set_ia32_Immop_attr(new_op, sz);
2517 set_irn_n(new_op, 1, ia32_new_NoReg_gp(env->cg));
2519 else if (is_ia32_Load(sz) && get_ia32_am_flavour(sz) == ia32_O) {
2520 set_ia32_immop_type(new_op, ia32_ImmSymConst);
2521 set_ia32_op_type(new_op, ia32_AddrModeS);
2522 set_ia32_am_sc(new_op, get_ia32_am_sc(sz));
2523 add_ia32_am_offs(new_op, get_ia32_am_offs(sz));
2524 set_irn_n(new_op, 1, ia32_new_NoReg_gp(env->cg));
2528 foreach_out_edge(env->irn, edge) {
2529 ir_node *proj = get_edge_src_irn(edge);
2531 assert(is_Proj(proj));
2533 if (get_Proj_proj(proj) == pn_be_AddSP_res) {
2534 /* the node is not yet exchanged: we need to set the register manually */
2535 ia32_attr_t *attr = get_ia32_attr(new_op);
2536 attr->slots[pn_ia32_AddSP_stack] = &ia32_gp_regs[REG_ESP];
2537 set_Proj_proj(proj, pn_ia32_AddSP_stack);
2539 else if (get_Proj_proj(proj) == pn_be_AddSP_M) {
2540 set_Proj_proj(proj, pn_ia32_AddSP_M);
2547 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
2553 * Transform a be_SubSP into an ia32_SubSP. Eat up const sizes.
2555 static ir_node *gen_be_SubSP(ia32_transform_env_t *env) {
2557 const ir_edge_t *edge;
2558 ir_node *sz = get_irn_n(env->irn, be_pos_SubSP_size);
2559 ir_node *sp = get_irn_n(env->irn, be_pos_SubSP_old_sp);
2561 new_op = new_rd_ia32_SubSP(env->dbg, env->irg, env->block, sp, sz);
2563 if (is_ia32_Const(sz)) {
2564 set_ia32_Immop_attr(new_op, sz);
2565 set_irn_n(new_op, 1, ia32_new_NoReg_gp(env->cg));
2567 else if (is_ia32_Load(sz) && get_ia32_am_flavour(sz) == ia32_O) {
2568 set_ia32_immop_type(new_op, ia32_ImmSymConst);
2569 set_ia32_op_type(new_op, ia32_AddrModeS);
2570 set_ia32_am_sc(new_op, get_ia32_am_sc(sz));
2571 add_ia32_am_offs(new_op, get_ia32_am_offs(sz));
2572 set_irn_n(new_op, 1, ia32_new_NoReg_gp(env->cg));
2576 foreach_out_edge(env->irn, edge) {
2577 ir_node *proj = get_edge_src_irn(edge);
2579 assert(is_Proj(proj));
2581 if (get_Proj_proj(proj) == pn_be_SubSP_res) {
2582 /* the node is not yet exchanged: we need to set the register manually */
2583 ia32_attr_t *attr = get_ia32_attr(new_op);
2584 attr->slots[pn_ia32_SubSP_stack] = &ia32_gp_regs[REG_ESP];
2585 set_Proj_proj(proj, pn_ia32_SubSP_stack);
2587 else if (get_Proj_proj(proj) == pn_be_SubSP_M) {
2588 set_Proj_proj(proj, pn_ia32_SubSP_M);
2595 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
2601 * This function just sets the register for the Unknown node
2602 * as this is not done during register allocation because Unknown
2603 * is an "ignore" node.
2605 static ir_node *gen_Unknown(ia32_transform_env_t *env) {
2606 ir_mode *mode = env->mode;
2607 ir_node *irn = env->irn;
2609 if (mode_is_float(mode)) {
2610 if (USE_SSE2(env->cg))
2611 arch_set_irn_register(env->cg->arch_env, irn, &ia32_xmm_regs[REG_XMM_UKNWN]);
2613 arch_set_irn_register(env->cg->arch_env, irn, &ia32_vfp_regs[REG_VFP_UKNWN]);
2615 else if (mode_is_int(mode) || mode_is_reference(mode) || mode_is_character(mode)) {
2616 arch_set_irn_register(env->cg->arch_env, irn, &ia32_gp_regs[REG_GP_UKNWN]);
2619 assert(0 && "unsupported Unknown-Mode");
2625 static ir_node *gen_Proj(ia32_transform_env_t *env) {
2626 ir_node *pred = get_Proj_pred(env->irn);
2627 int proj = get_Proj_proj(env->irn);
2629 if(is_Store(pred)) {
2630 if(proj == pn_Store_M) {
2632 return gen_Store(env);
2636 } else if(be_is_FrameStore(pred)) {
2637 if(proj == pn_Store_M) {
2639 return gen_be_FrameStore(env);
2648 /**********************************************************************
2651 * | | _____ _____ _ __ ___ __| | _ __ ___ __| | ___ ___
2652 * | |/ _ \ \ /\ / / _ \ '__/ _ \/ _` | | '_ \ / _ \ / _` |/ _ \/ __|
2653 * | | (_) \ V V / __/ | | __/ (_| | | | | | (_) | (_| | __/\__ \
2654 * |_|\___/ \_/\_/ \___|_| \___|\__,_| |_| |_|\___/ \__,_|\___||___/
2656 **********************************************************************/
2658 /* These nodes are created in intrinsic lowering (64bit -> 32bit) */
2660 typedef ir_node *construct_load_func(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, \
2663 typedef ir_node *construct_store_func(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, \
2664 ir_node *val, ir_node *mem);
2667 * Transforms a lowered Load into a "real" one.
2669 static ir_node *gen_lowered_Load(ia32_transform_env_t *env, construct_load_func func, char fp_unit) {
2670 ir_node *node = env->irn;
2671 ir_mode *mode = get_ia32_ls_mode(node);
2675 Could be that we have SSE2 unit, but due to 64Bit Div/Conv
2676 lowering we have x87 nodes, so we need to enforce simulation.
2678 if (mode_is_float(mode)) {
2680 if (fp_unit == fp_x87)
2684 new_op = func(env->dbg, env->irg, env->block, get_irn_n(node, 0), get_irn_n(node, 1), get_irn_n(node, 2));
2686 set_ia32_am_support(new_op, ia32_am_Source);
2687 set_ia32_op_type(new_op, ia32_AddrModeS);
2688 set_ia32_am_flavour(new_op, get_ia32_am_flavour(node));
2689 set_ia32_am_offs_int(new_op, get_ia32_am_offs_int(node));
2690 set_ia32_am_sc(new_op, get_ia32_am_sc(node));
2691 if(is_ia32_am_sc_sign(node))
2692 set_ia32_am_sc_sign(new_op);
2693 set_ia32_am_scale(new_op, get_ia32_am_scale(node));
2694 set_ia32_ls_mode(new_op, get_ia32_ls_mode(node));
2695 if(is_ia32_use_frame(node)) {
2696 set_ia32_frame_ent(new_op, get_ia32_frame_ent(node));
2697 set_ia32_use_frame(new_op);
2700 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
2706 * Transforms a lowered Store into a "real" one.
2708 static ir_node *gen_lowered_Store(ia32_transform_env_t *env, construct_store_func func, char fp_unit) {
2709 ir_node *node = env->irn;
2710 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2711 ir_mode *mode = get_ia32_ls_mode(node);
2714 ia32_am_flavour_t am_flav = ia32_B;
2717 Could be that we have SSE2 unit, but due to 64Bit Div/Conv
2718 lowering we have x87 nodes, so we need to enforce simulation.
2720 if (mode_is_float(mode)) {
2722 if (fp_unit == fp_x87)
2726 new_op = func(env->dbg, env->irg, env->block, get_irn_n(node, 0), noreg, get_irn_n(node, 1), get_irn_n(node, 2));
2728 if ((am_offs = get_ia32_am_offs(node)) != NULL) {
2730 add_ia32_am_offs(new_op, am_offs);
2733 set_ia32_am_support(new_op, ia32_am_Dest);
2734 set_ia32_op_type(new_op, ia32_AddrModeD);
2735 set_ia32_am_flavour(new_op, am_flav);
2736 set_ia32_ls_mode(new_op, mode);
2737 set_ia32_frame_ent(new_op, get_ia32_frame_ent(node));
2738 set_ia32_use_frame(new_op);
2740 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
2747 * Transforms an ia32_l_XXX into a "real" XXX node
2749 * @param env The transformation environment
2750 * @return the created ia32 XXX node
2752 #define GEN_LOWERED_OP(op) \
2753 static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \
2754 if (mode_is_float(env->mode)) \
2756 return gen_binop(env, get_binop_left(env->irn), get_binop_right(env->irn), new_rd_ia32_##op); \
2759 #define GEN_LOWERED_x87_OP(op) \
2760 static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \
2762 FORCE_x87(env->cg); \
2763 new_op = gen_binop(env, get_binop_left(env->irn), get_binop_right(env->irn), new_rd_ia32_##op); \
2767 #define GEN_LOWERED_UNOP(op) \
2768 static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \
2769 return gen_unop(env, get_unop_op(env->irn), new_rd_ia32_##op); \
2772 #define GEN_LOWERED_SHIFT_OP(op) \
2773 static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \
2774 return gen_shift_binop(env, get_binop_left(env->irn), get_binop_right(env->irn), new_rd_ia32_##op); \
2777 #define GEN_LOWERED_LOAD(op, fp_unit) \
2778 static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \
2779 return gen_lowered_Load(env, new_rd_ia32_##op, fp_unit); \
2782 #define GEN_LOWERED_STORE(op, fp_unit) \
2783 static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \
2784 return gen_lowered_Store(env, new_rd_ia32_##op, fp_unit); \
2787 GEN_LOWERED_OP(AddC)
2789 GEN_LOWERED_OP(SubC)
2793 GEN_LOWERED_x87_OP(vfprem)
2794 GEN_LOWERED_x87_OP(vfmul)
2795 GEN_LOWERED_x87_OP(vfsub)
2797 GEN_LOWERED_UNOP(Minus)
2799 GEN_LOWERED_LOAD(vfild, fp_x87)
2800 GEN_LOWERED_LOAD(Load, fp_none)
2801 GEN_LOWERED_STORE(vfist, fp_x87)
2802 GEN_LOWERED_STORE(Store, fp_none)
2804 static ir_node *gen_ia32_l_vfdiv(ia32_transform_env_t *env) {
2805 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2806 ir_node *node = env->irn;
2807 ir_node *left = get_binop_left(node);
2808 ir_node *right = get_binop_right(node);
2809 ir_mode *mode = env->mode;
2811 ir_node *projs[pn_DivMod_max];
2813 vfdiv = new_rd_ia32_vfdiv(env->dbg, env->irg, env->block, noreg, noreg, left, right, new_NoMem());
2814 clear_ia32_commutative(vfdiv);
2815 set_ia32_am_support(vfdiv, ia32_am_Source);
2816 set_ia32_res_mode(vfdiv, mode);
2817 fold_immediate(env, vfdiv, 2, 3);
2819 ia32_collect_Projs(node, projs, pn_DivMod_max);
2820 ia32_renumber_Proj(projs, pn_Div_M, pn_ia32_vfdiv_M);
2821 ia32_renumber_Proj(projs, pn_Div_res, pn_ia32_vfdiv_res);
2829 * Transforms a l_MulS into a "real" MulS node.
2831 * @param env The transformation environment
2832 * @return the created ia32 MulS node
2834 static ir_node *gen_ia32_l_MulS(ia32_transform_env_t *env) {
2835 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2836 ir_node *left = get_binop_left(env->irn);
2837 ir_node *right = get_binop_right(env->irn);
2838 ir_mode *mode = env->mode;
2841 /* l_MulS is already a mode_T node, so we create the MulS in the normal way */
2842 /* and then skip the result Proj, because all needed Projs are already there. */
2844 ir_node *muls = new_rd_ia32_MulS(env->dbg, env->irg, env->block, noreg, noreg, left, right, new_NoMem());
2845 clear_ia32_commutative(muls);
2846 set_ia32_am_support(muls, ia32_am_Source);
2847 set_ia32_res_mode(muls, mode);
2848 fold_immediate(env, muls, 2, 3);
2850 /* check if EAX and EDX proj exist, add missing one */
2851 in[0] = new_rd_Proj(env->dbg, env->irg, env->block, muls, mode, pn_EAX);
2852 in[1] = new_rd_Proj(env->dbg, env->irg, env->block, muls, mode, pn_EDX);
2853 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], env->irg, env->block, 2, in);
2858 GEN_LOWERED_SHIFT_OP(Shl)
2859 GEN_LOWERED_SHIFT_OP(Shr)
2860 GEN_LOWERED_SHIFT_OP(Shrs)
2863 * Transforms a l_ShlD/l_ShrD into a ShlD/ShrD. Those nodes have 3 data inputs:
2864 * op1 - target to be shifted
2865 * op2 - contains bits to be shifted into target
2867 * Only op3 can be an immediate.
2869 static ir_node *gen_lowered_64bit_shifts(ia32_transform_env_t *env, ir_node *op1, ir_node *op2, ir_node *count) {
2870 ir_node *new_op = NULL;
2871 ir_mode *mode = env->mode;
2872 dbg_info *dbg = env->dbg;
2873 ir_graph *irg = env->irg;
2874 ir_node *block = env->block;
2875 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2876 ir_node *nomem = new_NoMem();
2879 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
2881 assert(! mode_is_float(mode) && "Shift/Rotate with float not supported");
2883 /* Check if immediate optimization is on and */
2884 /* if it's an operation with immediate. */
2885 imm_op = (env->cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(NULL, count) : NULL;
2887 /* Limit imm_op within range imm8 */
2889 tv = get_ia32_Immop_tarval(imm_op);
2892 tv = tarval_mod(tv, new_tarval_from_long(32, mode_Iu));
2893 set_ia32_Immop_tarval(imm_op, tv);
2900 /* integer operations */
2902 /* This is ShiftD with const */
2903 DB((mod, LEVEL_1, "ShiftD with immediate ..."));
2905 if (is_ia32_l_ShlD(env->irn))
2906 new_op = new_rd_ia32_ShlD(dbg, irg, block, noreg, noreg, op1, op2, noreg, nomem, mode);
2908 new_op = new_rd_ia32_ShrD(dbg, irg, block, noreg, noreg, op1, op2, noreg, nomem, mode);
2909 set_ia32_Immop_attr(new_op, imm_op);
2912 /* This is a normal ShiftD */
2913 DB((mod, LEVEL_1, "ShiftD binop ..."));
2914 if (is_ia32_l_ShlD(env->irn))
2915 new_op = new_rd_ia32_ShlD(dbg, irg, block, noreg, noreg, op1, op2, count, nomem, mode);
2917 new_op = new_rd_ia32_ShrD(dbg, irg, block, noreg, noreg, op1, op2, count, nomem, mode);
2920 /* set AM support */
2921 set_ia32_am_support(new_op, ia32_am_Dest);
2923 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn));
2925 set_ia32_res_mode(new_op, mode);
2926 set_ia32_emit_cl(new_op);
2931 static ir_node *gen_ia32_l_ShlD(ia32_transform_env_t *env) {
2932 return gen_lowered_64bit_shifts(env, get_irn_n(env->irn, 0), get_irn_n(env->irn, 1), get_irn_n(env->irn, 2));
2935 static ir_node *gen_ia32_l_ShrD(ia32_transform_env_t *env) {
2936 return gen_lowered_64bit_shifts(env, get_irn_n(env->irn, 0), get_irn_n(env->irn, 1), get_irn_n(env->irn, 2));
2940 * In case SSE Unit is used, the node is transformed into a vfst + xLoad.
2942 static ir_node *gen_ia32_l_X87toSSE(ia32_transform_env_t *env) {
2943 ia32_code_gen_t *cg = env->cg;
2944 ir_node *res = NULL;
2945 ir_node *ptr = get_irn_n(env->irn, 0);
2946 ir_node *val = get_irn_n(env->irn, 1);
2947 ir_node *mem = get_irn_n(env->irn, 2);
2950 ir_node *noreg = ia32_new_NoReg_gp(cg);
2952 /* Store x87 -> MEM */
2953 res = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, mem);
2954 set_ia32_frame_ent(res, get_ia32_frame_ent(env->irn));
2955 set_ia32_use_frame(res);
2956 set_ia32_ls_mode(res, get_ia32_ls_mode(env->irn));
2957 set_ia32_am_support(res, ia32_am_Dest);
2958 set_ia32_am_flavour(res, ia32_B);
2959 set_ia32_op_type(res, ia32_AddrModeD);
2961 /* Load MEM -> SSE */
2962 res = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, res);
2963 set_ia32_frame_ent(res, get_ia32_frame_ent(env->irn));
2964 set_ia32_use_frame(res);
2965 set_ia32_ls_mode(res, get_ia32_ls_mode(env->irn));
2966 set_ia32_am_support(res, ia32_am_Source);
2967 set_ia32_am_flavour(res, ia32_B);
2968 set_ia32_op_type(res, ia32_AddrModeS);
2969 res = new_rd_Proj(env->dbg, env->irg, env->block, res, get_ia32_ls_mode(env->irn), pn_ia32_xLoad_res);
2972 /* SSE unit is not used -> skip this node. */
2975 edges_reroute(env->irn, val, env->irg);
2976 for (i = get_irn_arity(env->irn) - 1; i >= 0; i--)
2977 set_irn_n(env->irn, i, get_irg_bad(env->irg));
2984 * In case SSE Unit is used, the node is transformed into a xStore + vfld.
2986 static ir_node *gen_ia32_l_SSEtoX87(ia32_transform_env_t *env) {
2987 ia32_code_gen_t *cg = env->cg;
2988 ir_node *res = NULL;
2989 ir_node *ptr = get_irn_n(env->irn, 0);
2990 ir_node *val = get_irn_n(env->irn, 1);
2991 ir_node *mem = get_irn_n(env->irn, 2);
2992 ir_entity *fent = get_ia32_frame_ent(env->irn);
2993 ir_mode *lsmode = get_ia32_ls_mode(env->irn);
2997 ir_node *noreg = ia32_new_NoReg_gp(cg);
2999 /* Store SSE -> MEM */
3000 if (is_ia32_xLoad(skip_Proj(val))) {
3001 ir_node *ld = skip_Proj(val);
3003 /* we can vfld the value directly into the fpu */
3004 fent = get_ia32_frame_ent(ld);
3005 ptr = get_irn_n(ld, 0);
3006 offs = get_ia32_am_offs_int(ld);
3009 res = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, mem);
3010 set_ia32_frame_ent(res, fent);
3011 set_ia32_use_frame(res);
3012 set_ia32_ls_mode(res, lsmode);
3013 set_ia32_am_support(res, ia32_am_Dest);
3014 set_ia32_am_flavour(res, ia32_B);
3015 set_ia32_op_type(res, ia32_AddrModeD);
3019 /* Load MEM -> x87 */
3020 res = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem);
3021 set_ia32_frame_ent(res, fent);
3022 set_ia32_use_frame(res);
3023 set_ia32_ls_mode(res, lsmode);
3024 add_ia32_am_offs_int(res, offs);
3025 set_ia32_am_support(res, ia32_am_Source);
3026 set_ia32_am_flavour(res, ia32_B);
3027 set_ia32_op_type(res, ia32_AddrModeS);
3028 res = new_rd_Proj(env->dbg, env->irg, env->block, res, lsmode, pn_ia32_vfld_res);
3031 /* SSE unit is not used -> skip this node. */
3034 edges_reroute(env->irn, val, env->irg);
3035 for (i = get_irn_arity(env->irn) - 1; i >= 0; i--)
3036 set_irn_n(env->irn, i, get_irg_bad(env->irg));
3042 /*********************************************************
3045 * _ __ ___ __ _ _ _ __ __| |_ __ ___ _____ _ __
3046 * | '_ ` _ \ / _` | | '_ \ / _` | '__| \ \ / / _ \ '__|
3047 * | | | | | | (_| | | | | | | (_| | | | |\ V / __/ |
3048 * |_| |_| |_|\__,_|_|_| |_| \__,_|_| |_| \_/ \___|_|
3050 *********************************************************/
3053 * the BAD transformer.
3055 static ir_node *bad_transform(ia32_transform_env_t *env) {
3056 ir_fprintf(stderr, "Not implemented: %+F\n", env->irn);
3062 * Enters all transform functions into the generic pointer
3064 void ia32_register_transformers(void) {
3065 ir_op *op_Max, *op_Min, *op_Mulh;
3067 /* first clear the generic function pointer for all ops */
3068 clear_irp_opcodes_generic_func();
3070 #define GEN(a) op_##a->ops.generic = (op_func)gen_##a
3071 #define BAD(a) op_##a->ops.generic = (op_func)bad_transform
3106 /* transform ops from intrinsic lowering */
3128 GEN(ia32_l_X87toSSE);
3129 GEN(ia32_l_SSEtoX87);
3144 /* constant transformation happens earlier */
3149 /* we should never see these nodes */
3164 /* handle generic backend nodes */
3169 //GEN(be_FrameStore);
3174 /* set the register for all Unknown nodes */
3177 op_Max = get_op_Max();
3180 op_Min = get_op_Min();
3183 op_Mulh = get_op_Mulh();
3192 typedef ir_node *(transform_func)(ia32_transform_env_t *env);
3195 * Transforms the given firm node (and maybe some other related nodes)
3196 * into one or more assembler nodes.
3198 * @param node the firm node
3199 * @param env the debug module
3201 void ia32_transform_node(ir_node *node, void *env) {
3202 ia32_code_gen_t *cg = (ia32_code_gen_t *)env;
3203 ir_op *op = get_irn_op(node);
3204 ir_node *asm_node = NULL;
3210 /* link arguments pointing to Unknown to the UNKNOWN Proj */
3211 for (i = get_irn_arity(node) - 1; i >= 0; i--) {
3212 if (is_Unknown(get_irn_n(node, i)))
3213 set_irn_n(node, i, be_get_unknown_for_mode(cg, get_irn_mode(get_irn_n(node, i))));
3216 DBG((cg->mod, LEVEL_1, "check %+F ... ", node));
3217 if (op->ops.generic) {
3218 ia32_transform_env_t tenv;
3219 transform_func *transform = (transform_func *)op->ops.generic;
3221 tenv.block = get_nodes_block(node);
3222 tenv.dbg = get_irn_dbg_info(node);
3223 tenv.irg = current_ir_graph;
3225 tenv.mode = get_irn_mode(node);
3227 DEBUG_ONLY(tenv.mod = cg->mod;)
3229 asm_node = (*transform)(&tenv);
3232 /* exchange nodes if a new one was generated */
3234 exchange(node, asm_node);
3235 DB((cg->mod, LEVEL_1, "created node %+F[%p]\n", asm_node, asm_node));
3238 DB((cg->mod, LEVEL_1, "ignored\n"));
3243 * Transforms a psi condition.
3245 static void transform_psi_cond(ir_node *cond, ir_mode *mode, ia32_code_gen_t *cg) {
3248 /* if the mode is target mode, we have already seen this part of the tree */
3249 if (get_irn_mode(cond) == mode)
3252 assert(get_irn_mode(cond) == mode_b && "logical operator for condition must be mode_b");
3254 set_irn_mode(cond, mode);
3256 for (i = get_irn_arity(cond) - 1; i >= 0; i--) {
3257 ir_node *in = get_irn_n(cond, i);
3259 /* if in is a compare: transform into Set/xCmp */
3261 ir_node *new_op = NULL;
3262 ir_node *cmp = get_Proj_pred(in);
3263 ir_node *cmp_a = get_Cmp_left(cmp);
3264 ir_node *cmp_b = get_Cmp_right(cmp);
3265 dbg_info *dbg = get_irn_dbg_info(cmp);
3266 ir_graph *irg = get_irn_irg(cmp);
3267 ir_node *block = get_nodes_block(cmp);
3268 ir_node *noreg = ia32_new_NoReg_gp(cg);
3269 ir_node *nomem = new_rd_NoMem(irg);
3270 int pnc = get_Proj_proj(in);
3272 /* this is a compare */
3273 if (mode_is_float(mode)) {
3274 /* Psi is float, we need a floating point compare */
3277 ir_mode *m = get_irn_mode(cmp_a);
3279 if (! mode_is_float(m)) {
3280 cmp_a = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_a, cmp_a, mode);
3281 cmp_b = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_b, cmp_b, mode);
3283 else if (m == mode_F) {
3284 /* we convert cmp values always to double, to get correct bitmask with cmpsd */
3285 cmp_a = gen_sse_conv_f2d(cg, dbg, irg, block, cmp_a, cmp_a);
3286 cmp_b = gen_sse_conv_f2d(cg, dbg, irg, block, cmp_b, cmp_b);
3289 new_op = new_rd_ia32_xCmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem, mode);
3290 set_ia32_pncode(new_op, pnc);
3291 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, cmp));
3300 ia32_transform_env_t tenv;
3301 construct_binop_func *set_func = NULL;
3303 if (mode_is_float(get_irn_mode(cmp_a))) {
3304 /* 1st case: compare operands are floats */
3309 set_func = new_rd_ia32_xCmpSet;
3313 set_func = new_rd_ia32_vfCmpSet;
3316 pnc &= 7; /* fp compare -> int compare */
3319 /* 2nd case: compare operand are integer too */
3320 set_func = new_rd_ia32_CmpSet;
3329 DEBUG_ONLY(tenv.mod = cg->mod;)
3331 new_op = gen_binop(&tenv, cmp_a, cmp_b, set_func);
3332 set_ia32_pncode(new_op, pnc);
3333 set_ia32_am_support(new_op, ia32_am_Source);
3336 /* the the new compare as in */
3337 set_irn_n(cond, i, new_op);
3340 /* another complex condition */
3341 transform_psi_cond(in, mode, cg);
3347 * The Psi selector can be a tree of compares combined with "And"s and "Or"s.
3348 * We create a Set node, respectively a xCmp in case the Psi is a float, for each
3349 * compare, which causes the compare result to be stores in a register. The
3350 * "And"s and "Or"s are transformed later, we just have to set their mode right.
3352 void ia32_transform_psi_cond_tree(ir_node *node, void *env) {
3353 ia32_code_gen_t *cg = env;
3354 ir_node *psi_sel, *new_cmp, *block;
3359 if (get_irn_opcode(node) != iro_Psi)
3362 psi_sel = get_Psi_cond(node, 0);
3364 /* if psi_cond is a cmp: do nothing, this case is covered by gen_Psi */
3365 if (is_Proj(psi_sel))
3368 //mode = get_irn_mode(node);
3371 transform_psi_cond(psi_sel, mode, cg);
3373 irg = get_irn_irg(node);
3374 block = get_nodes_block(node);
3376 /* we need to compare the evaluated condition tree with 0 */
3377 mode = get_irn_mode(node);
3378 if (mode_is_float(mode)) {
3379 psi_sel = gen_sse_conv_int2float(cg, NULL, irg, block, psi_sel, NULL, mode);
3380 /* BEWARE: new_r_Const_long works for floating point as well */
3381 new_cmp = new_r_Cmp(irg, block, psi_sel, new_r_Const_long(irg, block, mode, 0));
3382 new_cmp = new_r_Proj(irg, block, new_cmp, mode_b, pn_Cmp_Ne);
3385 new_cmp = new_r_Cmp(irg, block, psi_sel, new_r_Const_long(irg, block, mode_Iu, 0));
3386 new_cmp = new_r_Proj(irg, block, new_cmp, mode_b, pn_Cmp_Gt | pn_Cmp_Lt);
3389 set_Psi_cond(node, 0, new_cmp);