2 * This file implements the IR transformation from firm into ia32-Firm.
3 * @author Christian Wuerdig
14 #include "irgraph_t.h"
19 #include "iredges_t.h"
30 #include "archop.h" /* we need this for Min and Max nodes */
37 #include "../benode_t.h"
38 #include "../besched.h"
40 #include "../beutil.h"
42 #include "bearch_ia32_t.h"
43 #include "ia32_nodes_attr.h"
44 #include "ia32_transform.h"
45 #include "ia32_new_nodes.h"
46 #include "ia32_map_regs.h"
47 #include "ia32_dbg_stat.h"
48 #include "ia32_optimize.h"
49 #include "ia32_util.h"
51 #include "gen_ia32_regalloc_if.h"
53 #define SFP_SIGN "0x80000000"
54 #define DFP_SIGN "0x8000000000000000"
55 #define SFP_ABS "0x7FFFFFFF"
56 #define DFP_ABS "0x7FFFFFFFFFFFFFFF"
58 #define TP_SFP_SIGN "ia32_sfp_sign"
59 #define TP_DFP_SIGN "ia32_dfp_sign"
60 #define TP_SFP_ABS "ia32_sfp_abs"
61 #define TP_DFP_ABS "ia32_dfp_abs"
63 #define ENT_SFP_SIGN "IA32_SFP_SIGN"
64 #define ENT_DFP_SIGN "IA32_DFP_SIGN"
65 #define ENT_SFP_ABS "IA32_SFP_ABS"
66 #define ENT_DFP_ABS "IA32_DFP_ABS"
68 typedef struct ia32_transform_env_t {
69 ir_graph *irg; /**< The irg, the node should be created in */
70 ia32_code_gen_t *cg; /**< The code generator */
71 int visited; /**< visited count that indicates whether a
72 node is already transformed */
73 pdeq *worklist; /**< worklist of nodes that still need to be
75 ir_node **old_anchors;/**< the list of anchors nodes in the old irg*/
76 DEBUG_ONLY(firm_dbg_module_t *mod;) /**< The firm debugger */
77 } ia32_transform_env_t;
79 extern ir_op *get_op_Mulh(void);
81 typedef ir_node *construct_binop_func(dbg_info *db, ir_graph *irg,
82 ir_node *block, ir_node *base, ir_node *index, ir_node *op1,
83 ir_node *op2, ir_node *mem);
85 typedef ir_node *construct_unop_func(dbg_info *db, ir_graph *irg,
86 ir_node *block, ir_node *base, ir_node *index, ir_node *op,
89 typedef ir_node *(transform_func)(ia32_transform_env_t *env, ir_node *node);
91 /****************************************************************************************************
93 * | | | | / _| | | (_)
94 * _ __ ___ __| | ___ | |_ _ __ __ _ _ __ ___| |_ ___ _ __ _ __ ___ __ _| |_ _ ___ _ __
95 * | '_ \ / _ \ / _` |/ _ \ | __| '__/ _` | '_ \/ __| _/ _ \| '__| '_ ` _ \ / _` | __| |/ _ \| '_ \
96 * | | | | (_) | (_| | __/ | |_| | | (_| | | | \__ \ || (_) | | | | | | | | (_| | |_| | (_) | | | |
97 * |_| |_|\___/ \__,_|\___| \__|_| \__,_|_| |_|___/_| \___/|_| |_| |_| |_|\__,_|\__|_|\___/|_| |_|
99 ****************************************************************************************************/
101 static ir_node *duplicate_node(ia32_transform_env_t *env, ir_node *node);
102 static ir_node *transform_node(ia32_transform_env_t *env, ir_node *node);
103 static void duplicate_deps(ia32_transform_env_t *env, ir_node *old_node,
106 static INLINE void set_new_node(ir_node *old_node, ir_node *new_node)
108 set_irn_link(old_node, new_node);
111 static INLINE ir_node *get_new_node(ir_node *old_node)
113 assert(irn_visited(old_node));
114 return (ir_node*) get_irn_link(old_node);
118 * Returns 1 if irn is a Const representing 0, 0 otherwise
120 static INLINE int is_ia32_Const_0(ir_node *irn) {
121 return (is_ia32_irn(irn) && get_ia32_op_type(irn) == ia32_Const) ?
122 classify_tarval(get_ia32_Immop_tarval(irn)) == TV_CLASSIFY_NULL : 0;
126 * Returns 1 if irn is a Const representing 1, 0 otherwise
128 static INLINE int is_ia32_Const_1(ir_node *irn) {
129 return (is_ia32_irn(irn) && get_ia32_op_type(irn) == ia32_Const) ?
130 classify_tarval(get_ia32_Immop_tarval(irn)) == TV_CLASSIFY_ONE : 0;
134 * Collects all Projs of a node into the node array. Index is the projnum.
135 * BEWARE: The caller has to assure the appropriate array size!
137 static void ia32_collect_Projs(ir_node *irn, ir_node **projs, int size) {
138 const ir_edge_t *edge;
139 assert(get_irn_mode(irn) == mode_T && "need mode_T");
141 memset(projs, 0, size * sizeof(projs[0]));
143 foreach_out_edge(irn, edge) {
144 ir_node *proj = get_edge_src_irn(edge);
145 int proj_proj = get_Proj_proj(proj);
146 assert(proj_proj < size);
147 projs[proj_proj] = proj;
152 * Renumbers the proj having pn_old in the array tp pn_new
153 * and removes the proj from the array.
155 static INLINE void ia32_renumber_Proj(ir_node **projs, long pn_old, long pn_new) {
156 fprintf(stderr, "Warning: renumber_Proj used!\n");
158 set_Proj_proj(projs[pn_old], pn_new);
159 projs[pn_old] = NULL;
164 * creates a unique ident by adding a number to a tag
166 * @param tag the tag string, must contain a %d if a number
169 static ident *unique_id(const char *tag)
171 static unsigned id = 0;
174 snprintf(str, sizeof(str), tag, ++id);
175 return new_id_from_str(str);
179 * Get a primitive type for a mode.
181 static ir_type *get_prim_type(pmap *types, ir_mode *mode)
183 pmap_entry *e = pmap_find(types, mode);
188 snprintf(buf, sizeof(buf), "prim_type_%s", get_mode_name(mode));
189 res = new_type_primitive(new_id_from_str(buf), mode);
190 pmap_insert(types, mode, res);
198 * Get an entity that is initialized with a tarval
200 static ir_entity *get_entity_for_tv(ia32_code_gen_t *cg, ir_node *cnst)
202 tarval *tv = get_Const_tarval(cnst);
203 pmap_entry *e = pmap_find(cg->isa->tv_ent, tv);
208 ir_mode *mode = get_irn_mode(cnst);
209 ir_type *tp = get_Const_type(cnst);
210 if (tp == firm_unknown_type)
211 tp = get_prim_type(cg->isa->types, mode);
213 res = new_entity(get_glob_type(), unique_id(".LC%u"), tp);
215 set_entity_ld_ident(res, get_entity_ident(res));
216 set_entity_visibility(res, visibility_local);
217 set_entity_variability(res, variability_constant);
218 set_entity_allocation(res, allocation_static);
220 /* we create a new entity here: It's initialization must resist on the
222 rem = current_ir_graph;
223 current_ir_graph = get_const_code_irg();
224 set_atomic_ent_value(res, new_Const_type(tv, tp));
225 current_ir_graph = rem;
227 pmap_insert(cg->isa->tv_ent, tv, res);
235 * Transforms a Const.
237 * @param mod the debug module
238 * @param block the block the new node should belong to
239 * @param node the ir Const node
240 * @param mode mode of the Const
241 * @return the created ia32 Const node
243 static ir_node *gen_Const(ia32_transform_env_t *env, ir_node *node) {
244 ir_graph *irg = env->irg;
245 dbg_info *dbg = get_irn_dbg_info(node);
246 ir_mode *mode = get_irn_mode(node);
247 ir_node *block = transform_node(env, get_nodes_block(node));
249 if (mode_is_float(mode)) {
252 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
253 ir_node *nomem = new_NoMem();
257 if (! USE_SSE2(env->cg)) {
258 cnst_classify_t clss = classify_Const(node);
260 if (clss == CNST_NULL) {
261 load = new_rd_ia32_vfldz(dbg, irg, block);
263 } else if (clss == CNST_ONE) {
264 load = new_rd_ia32_vfld1(dbg, irg, block);
267 floatent = get_entity_for_tv(env->cg, node);
269 load = new_rd_ia32_vfld(dbg, irg, block, noreg, noreg, nomem);
270 set_ia32_am_support(load, ia32_am_Source);
271 set_ia32_op_type(load, ia32_AddrModeS);
272 set_ia32_am_flavour(load, ia32_am_N);
273 set_ia32_am_sc(load, ia32_get_ent_ident(floatent));
274 res = new_r_Proj(irg, block, load, mode_D, pn_ia32_vfld_res);
277 floatent = get_entity_for_tv(env->cg, node);
279 load = new_rd_ia32_xLoad(dbg, irg, block, noreg, noreg, nomem);
280 set_ia32_am_support(load, ia32_am_Source);
281 set_ia32_op_type(load, ia32_AddrModeS);
282 set_ia32_am_flavour(load, ia32_am_N);
283 set_ia32_am_sc(load, ia32_get_ent_ident(floatent));
284 res = new_r_Proj(irg, block, load, mode_D, pn_ia32_xLoad_res);
287 set_ia32_ls_mode(load, mode);
288 SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(env->cg, node));
290 /* Const Nodes before the initial IncSP are a bad idea, because
291 * they could be spilled and we have no SP ready at that point yet
293 if (get_irg_start_block(irg) == block) {
294 add_irn_dep(load, get_irg_frame(irg));
297 SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(env->cg, node));
300 ir_node *cnst = new_rd_ia32_Const(dbg, irg, block);
303 if (get_irg_start_block(irg) == block) {
304 add_irn_dep(cnst, get_irg_frame(irg));
307 set_ia32_Const_attr(cnst, node);
308 SET_IA32_ORIG_NODE(cnst, ia32_get_old_node_name(env->cg, node));
313 return new_r_Bad(irg);
317 * Transforms a SymConst.
319 * @param mod the debug module
320 * @param block the block the new node should belong to
321 * @param node the ir SymConst node
322 * @param mode mode of the SymConst
323 * @return the created ia32 Const node
325 static ir_node *gen_SymConst(ia32_transform_env_t *env, ir_node *node) {
326 ir_graph *irg = env->irg;
327 dbg_info *dbg = get_irn_dbg_info(node);
328 ir_mode *mode = get_irn_mode(node);
329 ir_node *block = transform_node(env, get_nodes_block(node));
332 if (mode_is_float(mode)) {
334 if (USE_SSE2(env->cg))
335 cnst = new_rd_ia32_xConst(dbg, irg, block);
337 cnst = new_rd_ia32_vfConst(dbg, irg, block);
338 set_ia32_ls_mode(cnst, mode);
340 cnst = new_rd_ia32_Const(dbg, irg, block);
343 /* Const Nodes before the initial IncSP are a bad idea, because
344 * they could be spilled and we have no SP ready at that point yet
346 if (get_irg_start_block(irg) == block) {
347 add_irn_dep(cnst, get_irg_frame(irg));
350 set_ia32_Const_attr(cnst, node);
351 SET_IA32_ORIG_NODE(cnst, ia32_get_old_node_name(env->cg, node));
357 * SSE convert of an integer node into a floating point node.
359 static ir_node *gen_sse_conv_int2float(ia32_code_gen_t *cg, dbg_info *dbg,
360 ir_graph *irg, ir_node *block,
361 ir_node *in, ir_node *old_node, ir_mode *tgt_mode)
363 ir_node *noreg = ia32_new_NoReg_gp(cg);
364 ir_node *nomem = new_rd_NoMem(irg);
365 ir_node *old_pred = get_Cmp_left(old_node);
366 ir_mode *in_mode = get_irn_mode(old_pred);
367 int in_bits = get_mode_size_bits(in_mode);
369 ir_node *conv = new_rd_ia32_Conv_I2FP(dbg, irg, block, noreg, noreg, in, nomem);
370 set_ia32_ls_mode(conv, tgt_mode);
372 set_ia32_am_support(conv, ia32_am_Source);
374 SET_IA32_ORIG_NODE(conv, ia32_get_old_node_name(cg, old_node));
380 * SSE convert of an float node into a double node.
382 static ir_node *gen_sse_conv_f2d(ia32_code_gen_t *cg, dbg_info *dbg,
383 ir_graph *irg, ir_node *block,
384 ir_node *in, ir_node *old_node)
386 ir_node *noreg = ia32_new_NoReg_gp(cg);
387 ir_node *nomem = new_rd_NoMem(irg);
389 ir_node *conv = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, in, nomem);
390 set_ia32_am_support(conv, ia32_am_Source);
391 set_ia32_ls_mode(conv, mode_D);
392 SET_IA32_ORIG_NODE(conv, ia32_get_old_node_name(cg, old_node));
397 /* Generates an entity for a known FP const (used for FP Neg + Abs) */
398 ident *ia32_gen_fp_known_const(ia32_known_const_t kct) {
399 static const struct {
401 const char *ent_name;
402 const char *cnst_str;
403 } names [ia32_known_const_max] = {
404 { TP_SFP_SIGN, ENT_SFP_SIGN, SFP_SIGN }, /* ia32_SSIGN */
405 { TP_DFP_SIGN, ENT_DFP_SIGN, DFP_SIGN }, /* ia32_DSIGN */
406 { TP_SFP_ABS, ENT_SFP_ABS, SFP_ABS }, /* ia32_SABS */
407 { TP_DFP_ABS, ENT_DFP_ABS, DFP_ABS } /* ia32_DABS */
409 static ir_entity *ent_cache[ia32_known_const_max];
411 const char *tp_name, *ent_name, *cnst_str;
419 ent_name = names[kct].ent_name;
420 if (! ent_cache[kct]) {
421 tp_name = names[kct].tp_name;
422 cnst_str = names[kct].cnst_str;
424 mode = kct == ia32_SSIGN || kct == ia32_SABS ? mode_Iu : mode_Lu;
425 tv = new_tarval_from_str(cnst_str, strlen(cnst_str), mode);
426 tp = new_type_primitive(new_id_from_str(tp_name), mode);
427 ent = new_entity(get_glob_type(), new_id_from_str(ent_name), tp);
429 set_entity_ld_ident(ent, get_entity_ident(ent));
430 set_entity_visibility(ent, visibility_local);
431 set_entity_variability(ent, variability_constant);
432 set_entity_allocation(ent, allocation_static);
434 /* we create a new entity here: It's initialization must resist on the
436 rem = current_ir_graph;
437 current_ir_graph = get_const_code_irg();
438 cnst = new_Const(mode, tv);
439 current_ir_graph = rem;
441 set_atomic_ent_value(ent, cnst);
443 /* cache the entry */
444 ent_cache[kct] = ent;
447 return get_entity_ident(ent_cache[kct]);
452 * Prints the old node name on cg obst and returns a pointer to it.
454 const char *ia32_get_old_node_name(ia32_code_gen_t *cg, ir_node *irn) {
455 ia32_isa_t *isa = (ia32_isa_t *)cg->arch_env->isa;
457 lc_eoprintf(firm_get_arg_env(), isa->name_obst, "%+F", irn);
458 obstack_1grow(isa->name_obst, 0);
459 return obstack_finish(isa->name_obst);
463 /* determine if one operator is an Imm */
464 static ir_node *get_immediate_op(ir_node *op1, ir_node *op2) {
466 return is_ia32_Cnst(op1) ? op1 : (is_ia32_Cnst(op2) ? op2 : NULL);
467 else return is_ia32_Cnst(op2) ? op2 : NULL;
470 /* determine if one operator is not an Imm */
471 static ir_node *get_expr_op(ir_node *op1, ir_node *op2) {
472 return !is_ia32_Cnst(op1) ? op1 : (!is_ia32_Cnst(op2) ? op2 : NULL);
475 static void fold_immediate(ia32_transform_env_t *env, ir_node *node, int in1, int in2) {
479 if(! (env->cg->opt & IA32_OPT_IMMOPS))
482 left = get_irn_n(node, in1);
483 right = get_irn_n(node, in2);
484 if(!is_ia32_Cnst(right) && is_ia32_Cnst(left)) {
485 /* we can only set right operand to immediate */
486 if(!is_ia32_commutative(node))
488 /* exchange left/right */
489 set_irn_n(node, in1, right);
490 set_irn_n(node, in2, ia32_get_admissible_noreg(env->cg, node, in2));
491 set_ia32_Immop_attr(node, left);
492 } else if(is_ia32_Cnst(right)) {
493 set_irn_n(node, in2, ia32_get_admissible_noreg(env->cg, node, in2));
494 set_ia32_Immop_attr(node, right);
499 set_ia32_am_support(node, get_ia32_am_support(node) & ~ia32_am_Source);
503 * Construct a standard binary operation, set AM and immediate if required.
505 * @param env The transformation environment
506 * @param op1 The first operand
507 * @param op2 The second operand
508 * @param func The node constructor function
509 * @return The constructed ia32 node.
511 static ir_node *gen_binop(ia32_transform_env_t *env, ir_node *node,
512 ir_node *op1, ir_node *op2,
513 construct_binop_func *func) {
514 ir_node *new_node = NULL;
515 ir_graph *irg = env->irg;
516 dbg_info *dbg = get_irn_dbg_info(node);
517 ir_node *block = transform_node(env, get_nodes_block(node));
518 ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg);
519 ir_node *nomem = new_NoMem();
520 ir_node *new_op1 = transform_node(env, op1);
521 ir_node *new_op2 = transform_node(env, op2);
523 new_node = func(dbg, irg, block, noreg_gp, noreg_gp, new_op1, new_op2, nomem);
524 if(func == new_rd_ia32_Mul) {
525 set_ia32_am_support(new_node, ia32_am_Source);
527 set_ia32_am_support(new_node, ia32_am_Full);
530 SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env->cg, node));
531 if (is_op_commutative(get_irn_op(node))) {
532 set_ia32_commutative(new_node);
534 fold_immediate(env, new_node, 2, 3);
540 * Construct a standard binary operation, set AM and immediate if required.
542 * @param env The transformation environment
543 * @param op1 The first operand
544 * @param op2 The second operand
545 * @param func The node constructor function
546 * @return The constructed ia32 node.
548 static ir_node *gen_binop_float(ia32_transform_env_t *env, ir_node *node,
549 ir_node *op1, ir_node *op2,
550 construct_binop_func *func)
552 ir_node *new_node = NULL;
553 dbg_info *dbg = get_irn_dbg_info(node);
554 ir_graph *irg = env->irg;
555 ir_mode *mode = get_irn_mode(node);
556 ir_node *block = transform_node(env, get_nodes_block(node));
557 ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg);
558 ir_node *nomem = new_NoMem();
559 ir_node *new_op1 = transform_node(env, op1);
560 ir_node *new_op2 = transform_node(env, op2);
562 new_node = func(dbg, irg, block, noreg_gp, noreg_gp, new_op1, new_op2, nomem);
563 set_ia32_am_support(new_node, ia32_am_Source);
564 if (is_op_commutative(get_irn_op(node))) {
565 set_ia32_commutative(new_node);
567 if (USE_SSE2(env->cg)) {
568 set_ia32_ls_mode(new_node, mode);
571 SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env->cg, node));
578 * Construct a shift/rotate binary operation, sets AM and immediate if required.
580 * @param env The transformation environment
581 * @param op1 The first operand
582 * @param op2 The second operand
583 * @param func The node constructor function
584 * @return The constructed ia32 node.
586 static ir_node *gen_shift_binop(ia32_transform_env_t *env, ir_node *node,
587 ir_node *op1, ir_node *op2,
588 construct_binop_func *func) {
589 ir_node *new_op = NULL;
590 ir_mode *mode = get_irn_mode(node);
591 dbg_info *dbg = get_irn_dbg_info(node);
592 ir_graph *irg = env->irg;
593 ir_node *block = transform_node(env, get_nodes_block(node));
594 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
595 ir_node *nomem = new_NoMem();
598 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
599 ir_node *new_op1 = transform_node(env, op1);
600 ir_node *new_op2 = transform_node(env, op2);
603 assert(! mode_is_float(mode) && "Shift/Rotate with float not supported");
605 /* Check if immediate optimization is on and */
606 /* if it's an operation with immediate. */
607 imm_op = (env->cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(NULL, new_op2) : NULL;
608 expr_op = get_expr_op(new_op1, new_op2);
610 assert((expr_op || imm_op) && "invalid operands");
613 /* We have two consts here: not yet supported */
617 /* Limit imm_op within range imm8 */
619 tv = get_ia32_Immop_tarval(imm_op);
622 tv = tarval_mod(tv, new_tarval_from_long(32, get_tarval_mode(tv)));
623 set_ia32_Immop_tarval(imm_op, tv);
630 /* integer operations */
632 /* This is shift/rot with const */
633 DB((mod, LEVEL_1, "Shift/Rot with immediate ..."));
635 new_op = func(dbg, irg, block, noreg, noreg, expr_op, noreg, nomem);
636 set_ia32_Immop_attr(new_op, imm_op);
638 /* This is a normal shift/rot */
639 DB((mod, LEVEL_1, "Shift/Rot binop ..."));
640 new_op = func(dbg, irg, block, noreg, noreg, new_op1, new_op2, nomem);
644 set_ia32_am_support(new_op, ia32_am_Dest);
646 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
648 set_ia32_emit_cl(new_op);
655 * Construct a standard unary operation, set AM and immediate if required.
657 * @param env The transformation environment
658 * @param op The operand
659 * @param func The node constructor function
660 * @return The constructed ia32 node.
662 static ir_node *gen_unop(ia32_transform_env_t *env, ir_node *node, ir_node *op,
663 construct_unop_func *func) {
664 ir_node *new_node = NULL;
665 ir_graph *irg = env->irg;
666 dbg_info *dbg = get_irn_dbg_info(node);
667 ir_node *block = transform_node(env, get_nodes_block(node));
668 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
669 ir_node *nomem = new_NoMem();
670 ir_node *new_op = transform_node(env, op);
671 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
673 new_node = func(dbg, irg, block, noreg, noreg, new_op, nomem);
674 DB((mod, LEVEL_1, "INT unop ..."));
675 set_ia32_am_support(new_node, ia32_am_Dest);
677 SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env->cg, node));
684 * Creates an ia32 Add with immediate.
686 * @param env The transformation environment
687 * @param expr_op The expression operator
688 * @param const_op The constant
689 * @return the created ia32 Add node
691 static ir_node *gen_imm_Add(ia32_transform_env_t *env, ir_node *node,
692 ir_node *expr_op, ir_node *const_op) {
693 ir_node *new_op = NULL;
694 tarval *tv = get_ia32_Immop_tarval(const_op);
695 ir_graph *irg = env->irg;
696 dbg_info *dbg = get_irn_dbg_info(node);
697 ir_node *block = transform_node(env, get_nodes_block(node));
698 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
699 ir_node *nomem = new_NoMem();
701 tarval_classification_t class_tv, class_negtv;
702 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
704 /* try to optimize to inc/dec */
705 if ((env->cg->opt & IA32_OPT_INCDEC) && tv && (get_ia32_op_type(const_op) == ia32_Const)) {
706 /* optimize tarvals */
707 class_tv = classify_tarval(tv);
708 class_negtv = classify_tarval(tarval_neg(tv));
710 if (class_tv == TV_CLASSIFY_ONE) { /* + 1 == INC */
711 DB((env->mod, LEVEL_2, "Add(1) to Inc ... "));
712 new_op = new_rd_ia32_Inc(dbg, irg, block, noreg, noreg, expr_op, nomem);
715 else if (class_tv == TV_CLASSIFY_ALL_ONE || class_negtv == TV_CLASSIFY_ONE) { /* + (-1) == DEC */
716 DB((mod, LEVEL_2, "Add(-1) to Dec ... "));
717 new_op = new_rd_ia32_Dec(dbg, irg, block, noreg, noreg, expr_op, nomem);
723 new_op = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, expr_op, noreg, nomem);
724 set_ia32_Immop_attr(new_op, const_op);
725 set_ia32_commutative(new_op);
732 * Creates an ia32 Add.
734 * @param env The transformation environment
735 * @return the created ia32 Add node
737 static ir_node *gen_Add(ia32_transform_env_t *env, ir_node *node) {
738 ir_node *new_op = NULL;
739 ir_graph *irg = env->irg;
740 dbg_info *dbg = get_irn_dbg_info(node);
741 ir_mode *mode = get_irn_mode(node);
742 ir_node *block = transform_node(env, get_nodes_block(node));
743 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
744 ir_node *nomem = new_NoMem();
745 ir_node *expr_op, *imm_op;
746 ir_node *op1 = get_Add_left(node);
747 ir_node *op2 = get_Add_right(node);
748 ir_node *new_op1 = transform_node(env, op1);
749 ir_node *new_op2 = transform_node(env, op2);
751 /* Check if immediate optimization is on and */
752 /* if it's an operation with immediate. */
753 imm_op = (env->cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(new_op1, new_op2) : NULL;
754 expr_op = get_expr_op(new_op1, new_op2);
756 assert((expr_op || imm_op) && "invalid operands");
758 if (mode_is_float(mode)) {
760 if (USE_SSE2(env->cg))
761 return gen_binop_float(env, node, op1, op2, new_rd_ia32_xAdd);
763 return gen_binop_float(env, node, op1, op2, new_rd_ia32_vfadd);
768 /* No expr_op means, that we have two const - one symconst and */
769 /* one tarval or another symconst - because this case is not */
770 /* covered by constant folding */
771 /* We need to check for: */
772 /* 1) symconst + const -> becomes a LEA */
773 /* 2) symconst + symconst -> becomes a const + LEA as the elf */
774 /* linker doesn't support two symconsts */
776 if (get_ia32_op_type(new_op1) == ia32_SymConst
777 && get_ia32_op_type(new_op2) == ia32_SymConst) {
778 /* this is the 2nd case */
779 new_op = new_rd_ia32_Lea(dbg, irg, block, new_op1, noreg);
780 set_ia32_am_sc(new_op, get_ia32_id_cnst(new_op2));
781 set_ia32_am_flavour(new_op, ia32_am_OB);
782 set_ia32_am_support(new_op, ia32_am_Source);
783 set_ia32_op_type(new_op, ia32_AddrModeS);
785 DBG_OPT_LEA3(new_op1, new_op2, node, new_op);
787 /* this is the 1st case */
788 if (get_ia32_op_type(new_op1) == ia32_SymConst) {
789 tarval *tv = get_ia32_cnst_tv(new_op2);
790 long offs = get_tarval_long(tv);
792 new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg);
793 DBG_OPT_LEA3(new_op1, new_op2, node, new_op);
795 set_ia32_am_sc(new_op, get_ia32_id_cnst(new_op1));
796 add_ia32_am_offs_int(new_op, offs);
797 set_ia32_am_flavour(new_op, ia32_am_O);
798 set_ia32_am_support(new_op, ia32_am_Source);
799 set_ia32_op_type(new_op, ia32_AddrModeS);
800 } else if (get_ia32_op_type(new_op2) == ia32_SymConst) {
801 tarval *tv = get_ia32_cnst_tv(new_op1);
802 long offs = get_tarval_long(tv);
804 new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg);
805 DBG_OPT_LEA3(new_op1, new_op2, node, new_op);
807 add_ia32_am_offs_int(new_op, offs);
808 set_ia32_am_sc(new_op, get_ia32_id_cnst(new_op2));
809 set_ia32_am_flavour(new_op, ia32_am_O);
810 set_ia32_am_support(new_op, ia32_am_Source);
811 set_ia32_op_type(new_op, ia32_AddrModeS);
813 tarval *tv1 = get_ia32_cnst_tv(new_op1);
814 tarval *tv2 = get_ia32_cnst_tv(new_op2);
815 tarval *restv = tarval_add(tv1, tv2);
817 DEBUG_ONLY(ir_fprintf(stderr, "Warning: add with 2 consts not folded: %+F\n", node));
819 new_op = new_rd_ia32_Const(dbg, irg, block);
820 set_ia32_Const_tarval(new_op, restv);
821 DBG_OPT_LEA3(new_op1, new_op2, node, new_op);
828 /* This is expr + const */
829 new_op = gen_imm_Add(env, node, expr_op, imm_op);
832 set_ia32_am_support(new_op, ia32_am_Dest);
835 /* This is a normal add */
836 new_op = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, new_op1, new_op2, nomem);
839 set_ia32_am_support(new_op, ia32_am_Full);
840 set_ia32_commutative(new_op);
844 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
852 * Creates an ia32 Mul.
854 * @param env The transformation environment
855 * @return the created ia32 Mul node
857 static ir_node *gen_Mul(ia32_transform_env_t *env, ir_node *node) {
858 ir_node *op1 = get_Mul_left(node);
859 ir_node *op2 = get_Mul_right(node);
861 ir_mode *mode = get_irn_mode(node);
863 if (mode_is_float(mode)) {
865 if (USE_SSE2(env->cg))
866 new_op = gen_binop_float(env, node, op1, op2, new_rd_ia32_xMul);
868 new_op = gen_binop_float(env, node, op1, op2, new_rd_ia32_vfmul);
871 new_op = gen_binop(env, node, op1, op2, new_rd_ia32_Mul);
880 * Creates an ia32 Mulh.
881 * Note: Mul produces a 64Bit result and Mulh returns the upper 32 bit of
882 * this result while Mul returns the lower 32 bit.
884 * @param env The transformation environment
885 * @return the created ia32 Mulh node
887 static ir_node *gen_Mulh(ia32_transform_env_t *env, ir_node *node) {
888 ir_graph *irg = env->irg;
889 dbg_info *dbg = get_irn_dbg_info(node);
890 ir_node *block = transform_node(env, get_nodes_block(node));
891 ir_node *op1 = get_irn_n(node, 0);
892 ir_node *op2 = get_irn_n(node, 1);
893 ir_node *new_op1 = transform_node(env, op1);
894 ir_node *new_op2 = transform_node(env, op2);
895 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
896 ir_node *proj_EAX, *proj_EDX, *mulh;
897 ir_mode *mode = get_irn_mode(node);
900 assert(!mode_is_float(mode) && "Mulh with float not supported");
901 mulh = new_rd_ia32_Mulh(dbg, irg, block, noreg, noreg, new_op1, new_op2, new_NoMem());
902 set_ia32_commutative(mulh);
903 set_ia32_am_support(mulh, ia32_am_Source);
905 /* imediates are not supported, so no fold_immediate */
906 proj_EAX = new_rd_Proj(dbg, irg, block, mulh, mode_Iu, pn_EAX);
907 proj_EDX = new_rd_Proj(dbg, irg, block, mulh, mode_Iu, pn_EDX);
911 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
919 * Creates an ia32 And.
921 * @param env The transformation environment
922 * @return The created ia32 And node
924 static ir_node *gen_And(ia32_transform_env_t *env, ir_node *node) {
925 ir_node *op1 = get_And_left(node);
926 ir_node *op2 = get_And_right(node);
927 ir_mode *mode = get_irn_mode(node);
929 assert (! mode_is_float(mode));
930 return gen_binop(env, node, op1, op2, new_rd_ia32_And);
936 * Creates an ia32 Or.
938 * @param env The transformation environment
939 * @return The created ia32 Or node
941 static ir_node *gen_Or(ia32_transform_env_t *env, ir_node *node) {
942 ir_node *op1 = get_Or_left(node);
943 ir_node *op2 = get_Or_right(node);
944 ir_mode *mode = get_irn_mode(node);
946 assert (! mode_is_float(mode));
947 return gen_binop(env, node, op1, op2, new_rd_ia32_Or);
953 * Creates an ia32 Eor.
955 * @param env The transformation environment
956 * @return The created ia32 Eor node
958 static ir_node *gen_Eor(ia32_transform_env_t *env, ir_node *node) {
959 ir_node *op1 = get_Eor_left(node);
960 ir_node *op2 = get_Eor_right(node);
961 ir_mode *mode = get_irn_mode(node);
963 assert(! mode_is_float(mode));
964 return gen_binop(env, node, op1, op2, new_rd_ia32_Eor);
970 * Creates an ia32 Max.
972 * @param env The transformation environment
973 * @return the created ia32 Max node
975 static ir_node *gen_Max(ia32_transform_env_t *env, ir_node *node) {
976 ir_graph *irg = env->irg;
978 ir_mode *mode = get_irn_mode(node);
979 dbg_info *dbg = get_irn_dbg_info(node);
980 ir_node *block = transform_node(env, get_nodes_block(node));
981 ir_node *op1 = get_irn_n(node, 0);
982 ir_node *op2 = get_irn_n(node, 1);
983 ir_node *new_op1 = transform_node(env, op1);
984 ir_node *new_op2 = transform_node(env, op2);
985 ir_mode *op_mode = get_irn_mode(op1);
987 assert(get_mode_size_bits(mode) == 32);
989 if (mode_is_float(mode)) {
991 if (USE_SSE2(env->cg))
992 new_op = gen_binop_float(env, node, new_op1, new_op2, new_rd_ia32_xMax);
998 long pnc = pn_Cmp_Gt;
999 if(!mode_is_signed(op_mode)) {
1000 pnc |= ia32_pn_Cmp_Unsigned;
1002 new_op = new_rd_ia32_CmpCMov(dbg, irg, block, new_op1, new_op2, new_op1, new_op2);
1003 set_ia32_pncode(new_op, pnc);
1004 set_ia32_am_support(new_op, ia32_am_None);
1006 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
1012 * Creates an ia32 Min.
1014 * @param env The transformation environment
1015 * @return the created ia32 Min node
1017 static ir_node *gen_Min(ia32_transform_env_t *env, ir_node *node) {
1018 ir_graph *irg = env->irg;
1020 ir_mode *mode = get_irn_mode(node);
1021 dbg_info *dbg = get_irn_dbg_info(node);
1022 ir_node *block = transform_node(env, get_nodes_block(node));
1023 ir_node *op1 = get_irn_n(node, 0);
1024 ir_node *op2 = get_irn_n(node, 1);
1025 ir_node *new_op1 = transform_node(env, op1);
1026 ir_node *new_op2 = transform_node(env, op2);
1027 ir_mode *op_mode = get_irn_mode(op1);
1029 assert(get_mode_size_bits(mode) == 32);
1031 if (mode_is_float(mode)) {
1033 if (USE_SSE2(env->cg))
1034 new_op = gen_binop_float(env, node, op1, op2, new_rd_ia32_xMin);
1040 long pnc = pn_Cmp_Lt;
1041 if(!mode_is_signed(op_mode)) {
1042 pnc |= ia32_pn_Cmp_Unsigned;
1044 new_op = new_rd_ia32_CmpCMov(dbg, irg, block, new_op1, new_op2, new_op1, new_op2);
1045 set_ia32_pncode(new_op, pnc);
1046 set_ia32_am_support(new_op, ia32_am_None);
1048 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
1056 * Creates an ia32 Sub with immediate.
1058 * @param env The transformation environment
1059 * @param expr_op The first operator
1060 * @param const_op The constant operator
1061 * @return The created ia32 Sub node
1063 static ir_node *gen_imm_Sub(ia32_transform_env_t *env, ir_node *node,
1064 ir_node *expr_op, ir_node *const_op) {
1065 ir_node *new_op = NULL;
1066 tarval *tv = get_ia32_Immop_tarval(const_op);
1067 ir_graph *irg = env->irg;
1068 dbg_info *dbg = get_irn_dbg_info(node);
1069 ir_node *block = transform_node(env, get_nodes_block(node));
1070 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1071 ir_node *nomem = new_NoMem();
1073 tarval_classification_t class_tv, class_negtv;
1074 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
1076 /* try to optimize to inc/dec */
1077 if ((env->cg->opt & IA32_OPT_INCDEC) && tv && (get_ia32_op_type(const_op) == ia32_Const)) {
1078 /* optimize tarvals */
1079 class_tv = classify_tarval(tv);
1080 class_negtv = classify_tarval(tarval_neg(tv));
1082 if (class_tv == TV_CLASSIFY_ONE) { /* - 1 == DEC */
1083 DB((mod, LEVEL_2, "Sub(1) to Dec ... "));
1084 new_op = new_rd_ia32_Dec(dbg, irg, block, noreg, noreg, expr_op, nomem);
1087 else if (class_negtv == TV_CLASSIFY_ONE) { /* - (-1) == Sub */
1088 DB((mod, LEVEL_2, "Sub(-1) to Inc ... "));
1089 new_op = new_rd_ia32_Inc(dbg, irg, block, noreg, noreg, expr_op, nomem);
1095 new_op = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, expr_op, noreg, nomem);
1096 set_ia32_Immop_attr(new_op, const_op);
1103 * Creates an ia32 Sub.
1105 * @param env The transformation environment
1106 * @return The created ia32 Sub node
1108 static ir_node *gen_Sub(ia32_transform_env_t *env, ir_node *node) {
1109 ir_node *new_op = NULL;
1110 ir_graph *irg = env->irg;
1111 dbg_info *dbg = get_irn_dbg_info(node);
1112 ir_mode *mode = get_irn_mode(node);
1113 ir_node *block = transform_node(env, get_nodes_block(node));
1114 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1115 ir_node *nomem = new_NoMem();
1116 ir_node *op1 = get_Sub_left(node);
1117 ir_node *op2 = get_Sub_right(node);
1118 ir_node *new_op1 = transform_node(env, op1);
1119 ir_node *new_op2 = transform_node(env, op2);
1120 ir_node *expr_op, *imm_op;
1122 /* Check if immediate optimization is on and */
1123 /* if it's an operation with immediate. */
1124 imm_op = (env->cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(NULL, new_op2) : NULL;
1125 expr_op = get_expr_op(new_op1, new_op2);
1127 assert((expr_op || imm_op) && "invalid operands");
1129 if (mode_is_float(mode)) {
1131 if (USE_SSE2(env->cg))
1132 return gen_binop_float(env, node, op1, op2, new_rd_ia32_xSub);
1134 return gen_binop_float(env, node, op1, op2, new_rd_ia32_vfsub);
1138 /* No expr_op means, that we have two const - one symconst and */
1139 /* one tarval or another symconst - because this case is not */
1140 /* covered by constant folding */
1141 /* We need to check for: */
1142 /* 1) symconst - const -> becomes a LEA */
1143 /* 2) symconst - symconst -> becomes a const - LEA as the elf */
1144 /* linker doesn't support two symconsts */
1146 if (get_ia32_op_type(new_op1) == ia32_SymConst
1147 && get_ia32_op_type(new_op2) == ia32_SymConst) {
1148 /* this is the 2nd case */
1149 new_op = new_rd_ia32_Lea(dbg, irg, block, new_op1, noreg);
1150 set_ia32_am_sc(new_op, get_ia32_id_cnst(op2));
1151 set_ia32_am_sc_sign(new_op);
1152 set_ia32_am_flavour(new_op, ia32_am_OB);
1154 DBG_OPT_LEA3(op1, op2, node, new_op);
1156 /* this is the 1st case */
1157 new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg);
1159 DBG_OPT_LEA3(op1, op2, node, new_op);
1161 if (get_ia32_op_type(new_op1) == ia32_SymConst) {
1162 tarval *tv = get_ia32_cnst_tv(new_op2);
1163 long offs = get_tarval_long(tv);
1165 set_ia32_am_sc(new_op, get_ia32_id_cnst(new_op1));
1166 add_ia32_am_offs_int(new_op, -offs);
1167 set_ia32_am_flavour(new_op, ia32_am_O);
1168 set_ia32_am_support(new_op, ia32_am_Source);
1169 set_ia32_op_type(new_op, ia32_AddrModeS);
1170 } else if (get_ia32_op_type(new_op2) == ia32_SymConst) {
1171 tarval *tv = get_ia32_cnst_tv(new_op1);
1172 long offs = get_tarval_long(tv);
1174 add_ia32_am_offs_int(new_op, offs);
1175 set_ia32_am_sc(new_op, get_ia32_id_cnst(new_op2));
1176 set_ia32_am_sc_sign(new_op);
1177 set_ia32_am_flavour(new_op, ia32_am_O);
1178 set_ia32_am_support(new_op, ia32_am_Source);
1179 set_ia32_op_type(new_op, ia32_AddrModeS);
1181 tarval *tv1 = get_ia32_cnst_tv(new_op1);
1182 tarval *tv2 = get_ia32_cnst_tv(new_op2);
1183 tarval *restv = tarval_sub(tv1, tv2);
1185 DEBUG_ONLY(ir_fprintf(stderr, "Warning: sub with 2 consts not folded: %+F\n", node));
1187 new_op = new_rd_ia32_Const(dbg, irg, block);
1188 set_ia32_Const_tarval(new_op, restv);
1189 DBG_OPT_LEA3(new_op1, new_op2, node, new_op);
1194 } else if (imm_op) {
1195 /* This is expr - const */
1196 new_op = gen_imm_Sub(env, node, expr_op, imm_op);
1198 /* set AM support */
1199 set_ia32_am_support(new_op, ia32_am_Dest);
1201 /* This is a normal sub */
1202 new_op = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, new_op1, new_op2, nomem);
1204 /* set AM support */
1205 set_ia32_am_support(new_op, ia32_am_Full);
1209 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
1217 * Generates an ia32 DivMod with additional infrastructure for the
1218 * register allocator if needed.
1220 * @param env The transformation environment
1221 * @param dividend -no comment- :)
1222 * @param divisor -no comment- :)
1223 * @param dm_flav flavour_Div/Mod/DivMod
1224 * @return The created ia32 DivMod node
1226 static ir_node *generate_DivMod(ia32_transform_env_t *env, ir_node *node,
1227 ir_node *dividend, ir_node *divisor,
1228 ia32_op_flavour_t dm_flav) {
1229 ir_graph *irg = env->irg;
1230 dbg_info *dbg = get_irn_dbg_info(node);
1231 ir_mode *mode = get_irn_mode(node);
1232 ir_node *block = transform_node(env, get_nodes_block(node));
1233 ir_node *res, *proj_div, *proj_mod;
1234 ir_node *edx_node, *cltd;
1235 ir_node *in_keep[1];
1236 ir_node *mem, *new_mem;
1237 ir_node *projs[pn_DivMod_max];
1238 ir_node *new_dividend = transform_node(env, dividend);
1239 ir_node *new_divisor = transform_node(env, divisor);
1241 ia32_collect_Projs(node, projs, pn_DivMod_max);
1245 mem = get_Div_mem(node);
1246 mode = get_irn_mode(be_get_Proj_for_pn(node, pn_Div_res));
1249 mem = get_Mod_mem(node);
1250 mode = get_irn_mode(be_get_Proj_for_pn(node, pn_Mod_res));
1252 case flavour_DivMod:
1253 mem = get_DivMod_mem(node);
1254 proj_div = be_get_Proj_for_pn(node, pn_DivMod_res_div);
1255 proj_mod = be_get_Proj_for_pn(node, pn_DivMod_res_mod);
1256 mode = proj_div ? get_irn_mode(proj_div) : get_irn_mode(proj_mod);
1261 new_mem = transform_node(env, mem);
1263 if (mode_is_signed(mode)) {
1264 /* in signed mode, we need to sign extend the dividend */
1265 cltd = new_rd_ia32_Cdq(dbg, irg, block, new_dividend);
1266 new_dividend = new_rd_Proj(dbg, irg, block, cltd, mode_Iu, pn_ia32_Cdq_EAX);
1267 edx_node = new_rd_Proj(dbg, irg, block, cltd, mode_Iu, pn_ia32_Cdq_EDX);
1270 edx_node = new_rd_ia32_Const(dbg, irg, block);
1271 add_irn_dep(edx_node, be_abi_get_start_barrier(env->cg->birg->abi));
1272 set_ia32_Const_type(edx_node, ia32_Const);
1273 set_ia32_Immop_tarval(edx_node, get_tarval_null(mode_Iu));
1276 if(mode_is_signed(mode)) {
1277 res = new_rd_ia32_IDiv(dbg, irg, block, new_dividend, new_divisor, edx_node, new_mem, dm_flav);
1279 res = new_rd_ia32_Div(dbg, irg, block, new_dividend, new_divisor, edx_node, new_mem, dm_flav);
1281 set_ia32_n_res(res, 2);
1283 /* Only one proj is used -> We must add a second proj and */
1284 /* connect this one to a Keep node to eat up the second */
1285 /* destroyed register. */
1286 /* We also renumber the Firm projs into ia32 projs. */
1288 switch (get_irn_opcode(node)) {
1290 /* add Proj-Keep for mod res */
1291 in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode_Iu, pn_ia32_Div_mod_res);
1292 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep);
1295 /* add Proj-Keep for div res */
1296 in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode_Iu, pn_ia32_Div_div_res);
1297 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep);
1300 /* check, which Proj-Keep, we need to add */
1301 proj_div = be_get_Proj_for_pn(node, pn_DivMod_res_div);
1302 proj_mod = be_get_Proj_for_pn(node, pn_DivMod_res_mod);
1304 if (proj_div && proj_mod) {
1305 /* nothing to be done */
1307 else if (! proj_div && ! proj_mod) {
1308 assert(0 && "Missing DivMod result proj");
1310 else if (! proj_div) {
1311 /* We have only mod result: add div res Proj-Keep */
1312 in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode_Iu, pn_ia32_Div_div_res);
1313 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep);
1316 /* We have only div result: add mod res Proj-Keep */
1317 in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode_Iu, pn_ia32_Div_mod_res);
1318 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep);
1322 assert(0 && "Div, Mod, or DivMod expected.");
1326 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node));
1333 * Wrapper for generate_DivMod. Sets flavour_Mod.
1335 * @param env The transformation environment
1337 static ir_node *gen_Mod(ia32_transform_env_t *env, ir_node *node) {
1338 return generate_DivMod(env, node, get_Mod_left(node),
1339 get_Mod_right(node), flavour_Mod);
1343 * Wrapper for generate_DivMod. Sets flavour_Div.
1345 * @param env The transformation environment
1347 static ir_node *gen_Div(ia32_transform_env_t *env, ir_node *node) {
1348 return generate_DivMod(env, node, get_Div_left(node),
1349 get_Div_right(node), flavour_Div);
1353 * Wrapper for generate_DivMod. Sets flavour_DivMod.
1355 static ir_node *gen_DivMod(ia32_transform_env_t *env, ir_node *node) {
1356 return generate_DivMod(env, node, get_DivMod_left(node),
1357 get_DivMod_right(node), flavour_DivMod);
1363 * Creates an ia32 floating Div.
1365 * @param env The transformation environment
1366 * @return The created ia32 xDiv node
1368 static ir_node *gen_Quot(ia32_transform_env_t *env, ir_node *node) {
1369 ir_graph *irg = env->irg;
1370 dbg_info *dbg = get_irn_dbg_info(node);
1371 ir_node *block = transform_node(env, get_nodes_block(node));
1372 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1374 ir_node *nomem = new_rd_NoMem(env->irg);
1375 ir_node *op1 = get_Quot_left(node);
1376 ir_node *op2 = get_Quot_right(node);
1377 ir_node *new_op1 = transform_node(env, op1);
1378 ir_node *new_op2 = transform_node(env, op2);
1381 if (USE_SSE2(env->cg)) {
1382 ir_mode *mode = get_irn_mode(op1);
1383 if (is_ia32_xConst(new_op2)) {
1384 new_op = new_rd_ia32_xDiv(dbg, irg, block, noreg, noreg, new_op1, noreg, nomem);
1385 set_ia32_am_support(new_op, ia32_am_None);
1386 set_ia32_Immop_attr(new_op, new_op2);
1388 new_op = new_rd_ia32_xDiv(dbg, irg, block, noreg, noreg, new_op1, new_op2, nomem);
1389 // Matze: disabled for now, spillslot coalescer fails
1390 //set_ia32_am_support(new_op, ia32_am_Source);
1392 set_ia32_ls_mode(new_op, mode);
1394 new_op = new_rd_ia32_vfdiv(dbg, irg, block, noreg, noreg, new_op1, new_op2, nomem);
1395 // Matze: disabled for now (spillslot coalescer fails)
1396 //set_ia32_am_support(new_op, ia32_am_Source);
1398 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
1404 * Creates an ia32 Shl.
1406 * @param env The transformation environment
1407 * @return The created ia32 Shl node
1409 static ir_node *gen_Shl(ia32_transform_env_t *env, ir_node *node) {
1410 return gen_shift_binop(env, node, get_Shl_left(node), get_Shl_right(node),
1417 * Creates an ia32 Shr.
1419 * @param env The transformation environment
1420 * @return The created ia32 Shr node
1422 static ir_node *gen_Shr(ia32_transform_env_t *env, ir_node *node) {
1423 return gen_shift_binop(env, node, get_Shr_left(node),
1424 get_Shr_right(node), new_rd_ia32_Shr);
1430 * Creates an ia32 Shrs.
1432 * @param env The transformation environment
1433 * @return The created ia32 Shrs node
1435 static ir_node *gen_Shrs(ia32_transform_env_t *env, ir_node *node) {
1436 return gen_shift_binop(env, node, get_Shrs_left(node),
1437 get_Shrs_right(node), new_rd_ia32_Shrs);
1443 * Creates an ia32 RotL.
1445 * @param env The transformation environment
1446 * @param op1 The first operator
1447 * @param op2 The second operator
1448 * @return The created ia32 RotL node
1450 static ir_node *gen_RotL(ia32_transform_env_t *env, ir_node *node,
1451 ir_node *op1, ir_node *op2) {
1452 return gen_shift_binop(env, node, op1, op2, new_rd_ia32_RotL);
1458 * Creates an ia32 RotR.
1459 * NOTE: There is no RotR with immediate because this would always be a RotL
1460 * "imm-mode_size_bits" which can be pre-calculated.
1462 * @param env The transformation environment
1463 * @param op1 The first operator
1464 * @param op2 The second operator
1465 * @return The created ia32 RotR node
1467 static ir_node *gen_RotR(ia32_transform_env_t *env, ir_node *node, ir_node *op1,
1469 return gen_shift_binop(env, node, op1, op2, new_rd_ia32_RotR);
1475 * Creates an ia32 RotR or RotL (depending on the found pattern).
1477 * @param env The transformation environment
1478 * @return The created ia32 RotL or RotR node
1480 static ir_node *gen_Rot(ia32_transform_env_t *env, ir_node *node) {
1481 ir_node *rotate = NULL;
1482 ir_node *op1 = get_Rot_left(node);
1483 ir_node *op2 = get_Rot_right(node);
1485 /* Firm has only Rot (which is a RotL), so we are looking for a right (op2)
1486 operand "-e+mode_size_bits" (it's an already modified "mode_size_bits-e",
1487 that means we can create a RotR instead of an Add and a RotL */
1490 ir_node *pred = get_Proj_pred(op2);
1492 if (is_ia32_Add(pred)) {
1493 ir_node *pred_pred = get_irn_n(pred, 2);
1494 tarval *tv = get_ia32_Immop_tarval(pred);
1495 ir_mode *mode = get_irn_mode(node);
1496 long bits = get_mode_size_bits(mode);
1498 if (is_Proj(pred_pred)) {
1499 pred_pred = get_Proj_pred(pred_pred);
1502 if (is_ia32_Minus(pred_pred) &&
1503 tarval_is_long(tv) &&
1504 get_tarval_long(tv) == bits)
1506 DB((env->mod, LEVEL_1, "RotL into RotR ... "));
1507 rotate = gen_RotR(env, node, op1, get_irn_n(pred_pred, 2));
1514 rotate = gen_RotL(env, node, op1, op2);
1523 * Transforms a Minus node.
1525 * @param env The transformation environment
1526 * @param op The Minus operand
1527 * @return The created ia32 Minus node
1529 ir_node *gen_Minus_ex(ia32_transform_env_t *env, ir_node *node, ir_node *op) {
1532 ir_graph *irg = env->irg;
1533 dbg_info *dbg = get_irn_dbg_info(node);
1534 ir_node *block = transform_node(env, get_nodes_block(node));
1535 ir_mode *mode = get_irn_mode(node);
1538 if (mode_is_float(mode)) {
1539 ir_node *new_op = transform_node(env, op);
1541 if (USE_SSE2(env->cg)) {
1542 ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg);
1543 ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg);
1544 ir_node *nomem = new_rd_NoMem(irg);
1546 res = new_rd_ia32_xEor(dbg, irg, block, noreg_gp, noreg_gp, new_op, noreg_fp, nomem);
1548 size = get_mode_size_bits(mode);
1549 name = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN);
1551 set_ia32_am_sc(res, name);
1552 set_ia32_op_type(res, ia32_AddrModeS);
1553 set_ia32_ls_mode(res, mode);
1555 res = new_rd_ia32_vfchs(dbg, irg, block, new_op);
1558 res = gen_unop(env, node, op, new_rd_ia32_Minus);
1561 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node));
1567 * Transforms a Minus node.
1569 * @param env The transformation environment
1570 * @return The created ia32 Minus node
1572 static ir_node *gen_Minus(ia32_transform_env_t *env, ir_node *node) {
1573 return gen_Minus_ex(env, node, get_Minus_op(node));
1578 * Transforms a Not node.
1580 * @param env The transformation environment
1581 * @return The created ia32 Not node
1583 static ir_node *gen_Not(ia32_transform_env_t *env, ir_node *node) {
1584 ir_mode *mode = get_irn_mode(node);
1585 ir_node *op = get_Not_op(node);
1587 assert (! mode_is_float(mode));
1588 return gen_unop(env, node, op, new_rd_ia32_Not);
1594 * Transforms an Abs node.
1596 * @param env The transformation environment
1597 * @return The created ia32 Abs node
1599 static ir_node *gen_Abs(ia32_transform_env_t *env, ir_node *node) {
1600 ir_node *res, *p_eax, *p_edx;
1601 ir_graph *irg = env->irg;
1602 dbg_info *dbg = get_irn_dbg_info(node);
1603 ir_node *block = transform_node(env, get_nodes_block(node));
1604 ir_mode *mode = get_irn_mode(node);
1605 ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg);
1606 ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg);
1607 ir_node *nomem = new_NoMem();
1608 ir_node *op = get_Abs_op(node);
1609 ir_node *new_op = transform_node(env, op);
1613 if (mode_is_float(mode)) {
1615 if (USE_SSE2(env->cg)) {
1616 res = new_rd_ia32_xAnd(dbg,irg, block, noreg_gp, noreg_gp, new_op, noreg_fp, nomem);
1618 size = get_mode_size_bits(mode);
1619 name = ia32_gen_fp_known_const(size == 32 ? ia32_SABS : ia32_DABS);
1621 set_ia32_am_sc(res, name);
1623 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node));
1625 set_ia32_op_type(res, ia32_AddrModeS);
1626 set_ia32_ls_mode(res, mode);
1629 res = new_rd_ia32_vfabs(dbg, irg, block, new_op);
1630 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node));
1634 res = new_rd_ia32_Cdq(dbg, irg, block, new_op);
1635 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node));
1637 p_eax = new_rd_Proj(dbg, irg, block, res, mode_Iu, pn_EAX);
1638 p_edx = new_rd_Proj(dbg, irg, block, res, mode_Iu, pn_EDX);
1640 res = new_rd_ia32_Eor(dbg, irg, block, noreg_gp, noreg_gp, p_eax, p_edx, nomem);
1641 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node));
1643 res = new_rd_ia32_Sub(dbg, irg, block, noreg_gp, noreg_gp, res, p_edx, nomem);
1644 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node));
1653 * Transforms a Load.
1655 * @param env The transformation environment
1656 * @return the created ia32 Load node
1658 static ir_node *gen_Load(ia32_transform_env_t *env, ir_node *node) {
1659 ir_graph *irg = env->irg;
1660 dbg_info *dbg = get_irn_dbg_info(node);
1661 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1662 ir_mode *mode = get_Load_mode(node);
1663 ir_node *block = transform_node(env, get_nodes_block(node));
1664 ir_node *ptr = get_Load_ptr(node);
1665 ir_node *new_ptr = transform_node(env, ptr);
1666 ir_node *lptr = new_ptr;
1667 ir_node *mem = get_Load_mem(node);
1668 ir_node *new_mem = transform_node(env, mem);
1671 ia32_am_flavour_t am_flav = ia32_am_B;
1672 ir_node *projs[pn_Load_max];
1674 ia32_collect_Projs(node, projs, pn_Load_max);
1677 check for special case: the loaded value might not be used (optimized, volatile, ...)
1678 we add a Proj + Keep for volatile loads and ignore all other cases
1680 if (! be_get_Proj_for_pn(node, pn_Load_res) && get_Load_volatility(node) == volatility_is_volatile) {
1681 /* add a result proj and a Keep to produce a pseudo use */
1682 ir_node *proj = new_r_Proj(irg, block, node, mode_Iu, pn_ia32_Load_res);
1683 be_new_Keep(arch_get_irn_reg_class(env->cg->arch_env, proj, -1), irg, block, 1, &proj);
1686 /* address might be a constant (symconst or absolute address) */
1687 if (is_ia32_Const(new_ptr)) {
1692 if (mode_is_float(mode)) {
1694 if (USE_SSE2(env->cg)) {
1695 new_op = new_rd_ia32_xLoad(dbg, irg, block, lptr, noreg, new_mem);
1697 new_op = new_rd_ia32_vfld(dbg, irg, block, lptr, noreg, new_mem);
1700 new_op = new_rd_ia32_Load(dbg, irg, block, lptr, noreg, new_mem);
1703 /* base is a constant address */
1705 if (get_ia32_op_type(new_ptr) == ia32_SymConst) {
1706 set_ia32_am_sc(new_op, get_ia32_id_cnst(new_ptr));
1707 am_flav = ia32_am_N;
1709 tarval *tv = get_ia32_cnst_tv(new_ptr);
1710 long offs = get_tarval_long(tv);
1712 add_ia32_am_offs_int(new_op, offs);
1713 am_flav = ia32_am_O;
1717 set_ia32_am_support(new_op, ia32_am_Source);
1718 set_ia32_op_type(new_op, ia32_AddrModeS);
1719 set_ia32_am_flavour(new_op, am_flav);
1720 set_ia32_ls_mode(new_op, mode);
1722 /* make sure we are scheduled behind the intial IncSP/Barrier
1723 * to avoid spills being placed before it
1725 if(block == get_irg_start_block(irg)) {
1726 add_irn_dep(new_op, get_irg_frame(irg));
1729 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
1737 * Transforms a Store.
1739 * @param env The transformation environment
1740 * @return the created ia32 Store node
1742 static ir_node *gen_Store(ia32_transform_env_t *env, ir_node *node) {
1743 ir_graph *irg = env->irg;
1744 dbg_info *dbg = get_irn_dbg_info(node);
1745 ir_node *block = transform_node(env, get_nodes_block(node));
1746 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1747 ir_node *ptr = get_Store_ptr(node);
1748 ir_node *new_ptr = transform_node(env, ptr);
1749 ir_node *sptr = new_ptr;
1750 ir_node *val = get_Store_value(node);
1751 ir_node *new_val = transform_node(env, val);
1752 ir_node *mem = get_Store_mem(node);
1753 ir_node *new_mem = transform_node(env, mem);
1754 ir_mode *mode = get_irn_mode(val);
1755 ir_node *sval = new_val;
1758 ia32_am_flavour_t am_flav = ia32_am_B;
1759 ia32_immop_type_t immop = ia32_ImmNone;
1761 if (! mode_is_float(mode)) {
1762 /* in case of storing a const (but not a symconst) -> make it an attribute */
1763 if (is_ia32_Cnst(new_val)) {
1764 switch (get_ia32_op_type(new_val)) {
1766 immop = ia32_ImmConst;
1769 immop = ia32_ImmSymConst;
1772 assert(0 && "unsupported Const type");
1778 /* address might be a constant (symconst or absolute address) */
1779 if (is_ia32_Const(new_ptr)) {
1784 if (mode_is_float(mode)) {
1786 if (USE_SSE2(env->cg)) {
1787 new_op = new_rd_ia32_xStore(dbg, irg, block, sptr, noreg, sval, new_mem);
1790 new_op = new_rd_ia32_vfst(dbg, irg, block, sptr, noreg, sval, new_mem);
1793 else if (get_mode_size_bits(mode) == 8) {
1794 new_op = new_rd_ia32_Store8Bit(dbg, irg, block, sptr, noreg, sval, new_mem);
1797 new_op = new_rd_ia32_Store(dbg, irg, block, sptr, noreg, sval, new_mem);
1800 /* stored const is an immediate value */
1801 if (! mode_is_float(mode) && is_ia32_Cnst(new_val)) {
1802 set_ia32_Immop_attr(new_op, new_val);
1805 /* base is an constant address */
1807 if (get_ia32_op_type(new_ptr) == ia32_SymConst) {
1808 set_ia32_am_sc(new_op, get_ia32_id_cnst(new_ptr));
1809 am_flav = ia32_am_N;
1812 tarval *tv = get_ia32_cnst_tv(new_ptr);
1813 long offs = get_tarval_long(tv);
1815 add_ia32_am_offs_int(new_op, offs);
1816 am_flav = ia32_am_O;
1820 set_ia32_am_support(new_op, ia32_am_Dest);
1821 set_ia32_op_type(new_op, ia32_AddrModeD);
1822 set_ia32_am_flavour(new_op, am_flav);
1823 set_ia32_ls_mode(new_op, mode);
1824 set_ia32_immop_type(new_op, immop);
1826 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
1834 * Transforms a Cond -> Proj[b] -> Cmp into a CondJmp, CondJmp_i or TestJmp
1836 * @param env The transformation environment
1837 * @return The transformed node.
1839 static ir_node *gen_Cond(ia32_transform_env_t *env, ir_node *node) {
1840 ir_graph *irg = env->irg;
1841 dbg_info *dbg = get_irn_dbg_info(node);
1842 ir_node *block = transform_node(env, get_nodes_block(node));
1843 ir_node *sel = get_Cond_selector(node);
1844 ir_mode *sel_mode = get_irn_mode(sel);
1845 ir_node *res = NULL;
1846 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
1847 ir_node *cnst, *expr;
1849 if (is_Proj(sel) && sel_mode == mode_b) {
1850 ir_node *nomem = new_NoMem();
1851 ir_node *pred = get_Proj_pred(sel);
1852 ir_node *cmp_a = get_Cmp_left(pred);
1853 ir_node *new_cmp_a = transform_node(env, cmp_a);
1854 ir_node *cmp_b = get_Cmp_right(pred);
1855 ir_node *new_cmp_b = transform_node(env, cmp_b);
1856 ir_mode *cmp_mode = get_irn_mode(cmp_a);
1858 int pnc = get_Proj_proj(sel);
1859 if(mode_is_float(cmp_mode) || !mode_is_signed(cmp_mode)) {
1860 pnc |= ia32_pn_Cmp_Unsigned;
1863 /* check if we can use a CondJmp with immediate */
1864 cnst = (env->cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(new_cmp_a, new_cmp_b) : NULL;
1865 expr = get_expr_op(new_cmp_a, new_cmp_b);
1867 if (cnst != NULL && expr != NULL) {
1868 /* immop has to be the right operand, we might need to flip pnc */
1869 if(cnst != new_cmp_b) {
1870 pnc = get_inversed_pnc(pnc);
1873 if ((pnc == pn_Cmp_Eq || pnc == pn_Cmp_Lg) && mode_is_int(get_irn_mode(expr))) {
1874 if (get_ia32_op_type(cnst) == ia32_Const &&
1875 classify_tarval(get_ia32_Immop_tarval(cnst)) == TV_CLASSIFY_NULL)
1877 /* a Cmp A =/!= 0 */
1878 ir_node *op1 = expr;
1879 ir_node *op2 = expr;
1880 const char *cnst = NULL;
1882 /* check, if expr is an only once used And operation */
1883 if (is_ia32_And(expr) && get_irn_n_edges(expr)) {
1884 op1 = get_irn_n(expr, 2);
1885 op2 = get_irn_n(expr, 3);
1887 cnst = (is_ia32_ImmConst(expr) || is_ia32_ImmSymConst(expr)) ? get_ia32_cnst(expr) : NULL;
1889 res = new_rd_ia32_TestJmp(dbg, irg, block, op1, op2);
1890 set_ia32_pncode(res, pnc);
1893 copy_ia32_Immop_attr(res, expr);
1896 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node));
1901 if (mode_is_float(cmp_mode)) {
1903 if (USE_SSE2(env->cg)) {
1904 res = new_rd_ia32_xCondJmp(dbg, irg, block, noreg, noreg, expr, noreg, nomem);
1905 set_ia32_ls_mode(res, cmp_mode);
1911 res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, expr, noreg, nomem);
1913 set_ia32_Immop_attr(res, cnst);
1916 ir_mode *cmp_mode = get_irn_mode(cmp_a);
1918 if (mode_is_float(cmp_mode)) {
1920 if (USE_SSE2(env->cg)) {
1921 res = new_rd_ia32_xCondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem);
1922 set_ia32_ls_mode(res, cmp_mode);
1925 res = new_rd_ia32_vfCondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem);
1926 proj_eax = new_r_Proj(irg, block, res, mode_Iu, pn_ia32_vfCondJmp_temp_reg_eax);
1927 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, &proj_eax);
1931 res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem);
1932 set_ia32_commutative(res);
1936 set_ia32_pncode(res, pnc);
1937 // Matze: disabled for now, because the default collect_spills_walker
1938 // is not able to detect the mode of the spilled value
1939 // moreover, the lea optimize phase freely exchanges left/right
1940 // without updating the pnc
1941 //set_ia32_am_support(res, ia32_am_Source);
1944 /* determine the smallest switch case value */
1945 int switch_min = INT_MAX;
1946 const ir_edge_t *edge;
1947 ir_node *new_sel = transform_node(env, sel);
1949 foreach_out_edge(node, edge) {
1950 int pn = get_Proj_proj(get_edge_src_irn(edge));
1951 switch_min = pn < switch_min ? pn : switch_min;
1955 /* if smallest switch case is not 0 we need an additional sub */
1956 res = new_rd_ia32_Lea(dbg, irg, block, new_sel, noreg);
1957 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node));
1958 add_ia32_am_offs_int(res, -switch_min);
1959 set_ia32_am_flavour(res, ia32_am_OB);
1960 set_ia32_am_support(res, ia32_am_Source);
1961 set_ia32_op_type(res, ia32_AddrModeS);
1964 res = new_rd_ia32_SwitchJmp(dbg, irg, block, switch_min ? res : new_sel, mode_T);
1965 set_ia32_pncode(res, get_Cond_defaultProj(node));
1968 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node));
1975 * Transforms a CopyB node.
1977 * @param env The transformation environment
1978 * @return The transformed node.
1980 static ir_node *gen_CopyB(ia32_transform_env_t *env, ir_node *node) {
1981 ir_node *res = NULL;
1982 ir_graph *irg = env->irg;
1983 dbg_info *dbg = get_irn_dbg_info(node);
1984 ir_node *block = transform_node(env, get_nodes_block(node));
1985 ir_node *src = get_CopyB_src(node);
1986 ir_node *new_src = transform_node(env, src);
1987 ir_node *dst = get_CopyB_dst(node);
1988 ir_node *new_dst = transform_node(env, dst);
1989 ir_node *mem = get_CopyB_mem(node);
1990 ir_node *new_mem = transform_node(env, mem);
1991 int size = get_type_size_bytes(get_CopyB_type(node));
1992 ir_mode *dst_mode = get_irn_mode(dst);
1993 ir_mode *src_mode = get_irn_mode(src);
1997 /* If we have to copy more than 32 bytes, we use REP MOVSx and */
1998 /* then we need the size explicitly in ECX. */
1999 if (size >= 32 * 4) {
2000 rem = size & 0x3; /* size % 4 */
2003 res = new_rd_ia32_Const(dbg, irg, block);
2004 add_irn_dep(res, be_abi_get_start_barrier(env->cg->birg->abi));
2005 set_ia32_op_type(res, ia32_Const);
2006 set_ia32_Immop_tarval(res, new_tarval_from_long(size, mode_Is));
2008 res = new_rd_ia32_CopyB(dbg, irg, block, new_dst, new_src, res, new_mem);
2009 set_ia32_Immop_tarval(res, new_tarval_from_long(rem, mode_Is));
2011 /* ok: now attach Proj's because rep movsd will destroy esi, edi and ecx */
2012 in[0] = new_r_Proj(irg, block, res, dst_mode, pn_ia32_CopyB_DST);
2013 in[1] = new_r_Proj(irg, block, res, src_mode, pn_ia32_CopyB_SRC);
2014 in[2] = new_r_Proj(irg, block, res, mode_Iu, pn_ia32_CopyB_CNT);
2015 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 3, in);
2018 res = new_rd_ia32_CopyB_i(dbg, irg, block, new_dst, new_src, new_mem);
2019 set_ia32_Immop_tarval(res, new_tarval_from_long(size, mode_Is));
2020 set_ia32_immop_type(res, ia32_ImmConst);
2022 /* ok: now attach Proj's because movsd will destroy esi and edi */
2023 in[0] = new_r_Proj(irg, block, res, dst_mode, pn_ia32_CopyB_i_DST);
2024 in[1] = new_r_Proj(irg, block, res, src_mode, pn_ia32_CopyB_i_SRC);
2025 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 2, in);
2028 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node));
2036 * Transforms a Mux node into CMov.
2038 * @param env The transformation environment
2039 * @return The transformed node.
2041 static ir_node *gen_Mux(ia32_transform_env_t *env, ir_node *node) {
2042 ir_node *new_op = new_rd_ia32_CMov(env->dbg, env->irg, env->block, \
2043 get_Mux_sel(node), get_Mux_false(node), get_Mux_true(node), env->mode);
2045 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
2051 typedef ir_node *cmov_func_t(dbg_info *db, ir_graph *irg, ir_node *block,
2052 ir_node *cmp_a, ir_node *cmp_b, ir_node *psi_true,
2053 ir_node *psi_default);
2056 * Transforms a Psi node into CMov.
2058 * @param env The transformation environment
2059 * @return The transformed node.
2061 static ir_node *gen_Psi(ia32_transform_env_t *env, ir_node *node) {
2062 ia32_code_gen_t *cg = env->cg;
2063 ir_graph *irg = env->irg;
2064 dbg_info *dbg = get_irn_dbg_info(node);
2065 ir_mode *mode = get_irn_mode(node);
2066 ir_node *block = transform_node(env, get_nodes_block(node));
2067 ir_node *cmp_proj = get_Mux_sel(node);
2068 ir_node *psi_true = get_Psi_val(node, 0);
2069 ir_node *psi_default = get_Psi_default(node);
2070 ir_node *new_psi_true = transform_node(env, psi_true);
2071 ir_node *new_psi_default = transform_node(env, psi_default);
2072 ir_node *noreg = ia32_new_NoReg_gp(cg);
2073 ir_node *nomem = new_rd_NoMem(irg);
2074 ir_node *cmp, *cmp_a, *cmp_b, *and1, *and2, *new_op = NULL;
2075 ir_node *new_cmp_a, *new_cmp_b;
2079 assert(get_irn_mode(cmp_proj) == mode_b && "Condition for Psi must have mode_b");
2081 cmp = get_Proj_pred(cmp_proj);
2082 cmp_a = get_Cmp_left(cmp);
2083 cmp_b = get_Cmp_right(cmp);
2084 cmp_mode = get_irn_mode(cmp_a);
2085 new_cmp_a = transform_node(env, cmp_a);
2086 new_cmp_b = transform_node(env, cmp_b);
2088 pnc = get_Proj_proj(cmp_proj);
2089 if (mode_is_float(cmp_mode) || !mode_is_signed(cmp_mode)) {
2090 pnc |= ia32_pn_Cmp_Unsigned;
2093 if (mode_is_float(mode)) {
2094 /* floating point psi */
2097 /* 1st case: compare operands are float too */
2099 /* psi(cmp(a, b), t, f) can be done as: */
2100 /* tmp = cmp a, b */
2101 /* tmp2 = t and tmp */
2102 /* tmp3 = f and not tmp */
2103 /* res = tmp2 or tmp3 */
2105 /* in case the compare operands are int, we move them into xmm register */
2106 if (! mode_is_float(get_irn_mode(cmp_a))) {
2107 new_cmp_a = gen_sse_conv_int2float(cg, dbg, irg, block, new_cmp_a, node, mode_D);
2108 new_cmp_b = gen_sse_conv_int2float(cg, dbg, irg, block, new_cmp_b, node, mode_D);
2110 pnc |= 8; /* transform integer compare to fp compare */
2113 new_op = new_rd_ia32_xCmp(dbg, irg, block, noreg, noreg, new_cmp_a, new_cmp_b, nomem);
2114 set_ia32_pncode(new_op, pnc);
2115 set_ia32_am_support(new_op, ia32_am_Source);
2116 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
2118 and1 = new_rd_ia32_xAnd(dbg, irg, block, noreg, noreg, new_psi_true, new_op, nomem);
2119 set_ia32_am_support(and1, ia32_am_None);
2120 set_ia32_commutative(and1);
2121 SET_IA32_ORIG_NODE(and1, ia32_get_old_node_name(cg, node));
2123 and2 = new_rd_ia32_xAndNot(dbg, irg, block, noreg, noreg, new_op, new_psi_default, nomem);
2124 set_ia32_am_support(and2, ia32_am_None);
2125 set_ia32_commutative(and2);
2126 SET_IA32_ORIG_NODE(and2, ia32_get_old_node_name(cg, node));
2128 new_op = new_rd_ia32_xOr(dbg, irg, block, noreg, noreg, and1, and2, nomem);
2129 set_ia32_am_support(new_op, ia32_am_None);
2130 set_ia32_commutative(new_op);
2131 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
2135 new_op = new_rd_ia32_vfCMov(dbg, irg, block, new_cmp_a, new_cmp_b, new_psi_true, new_psi_default);
2136 set_ia32_pncode(new_op, pnc);
2137 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
2142 construct_binop_func *set_func = NULL;
2143 cmov_func_t *cmov_func = NULL;
2145 if (mode_is_float(get_irn_mode(cmp_a))) {
2146 /* 1st case: compare operands are floats */
2151 set_func = new_rd_ia32_xCmpSet;
2152 cmov_func = new_rd_ia32_xCmpCMov;
2156 set_func = new_rd_ia32_vfCmpSet;
2157 cmov_func = new_rd_ia32_vfCmpCMov;
2160 pnc &= ~0x8; /* fp compare -> int compare */
2163 /* 2nd case: compare operand are integer too */
2164 set_func = new_rd_ia32_CmpSet;
2165 cmov_func = new_rd_ia32_CmpCMov;
2168 /* check for special case first: And/Or -- Cmp with 0 -- Psi */
2169 if (is_ia32_Const_0(new_cmp_b) && is_Proj(new_cmp_a) && (is_ia32_And(get_Proj_pred(new_cmp_a)) || is_ia32_Or(get_Proj_pred(new_cmp_a)))) {
2170 if (is_ia32_Const_1(psi_true) && is_ia32_Const_0(psi_default)) {
2171 /* first case for SETcc: default is 0, set to 1 iff condition is true */
2172 new_op = new_rd_ia32_PsiCondSet(dbg, irg, block, new_cmp_a);
2173 set_ia32_pncode(new_op, pnc);
2175 else if (is_ia32_Const_0(psi_true) && is_ia32_Const_1(psi_default)) {
2176 /* second case for SETcc: default is 1, set to 0 iff condition is true: */
2177 /* we invert condition and set default to 0 */
2178 new_op = new_rd_ia32_PsiCondSet(dbg, irg, block, new_cmp_a);
2179 set_ia32_pncode(new_op, get_inversed_pnc(pnc));
2182 /* otherwise: use CMOVcc */
2183 new_op = new_rd_ia32_PsiCondCMov(dbg, irg, block, new_cmp_a, new_psi_true, new_psi_default);
2184 set_ia32_pncode(new_op, pnc);
2187 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
2190 if (is_ia32_Const_1(psi_true) && is_ia32_Const_0(psi_default)) {
2191 /* first case for SETcc: default is 0, set to 1 iff condition is true */
2192 new_op = gen_binop(env, node, cmp_a, cmp_b, set_func);
2193 set_ia32_pncode(new_op, pnc);
2194 set_ia32_am_support(new_op, ia32_am_Source);
2196 else if (is_ia32_Const_0(psi_true) && is_ia32_Const_1(psi_default)) {
2197 /* second case for SETcc: default is 1, set to 0 iff condition is true: */
2198 /* we invert condition and set default to 0 */
2199 new_op = gen_binop(env, node, cmp_a, cmp_b, set_func);
2200 set_ia32_pncode(new_op, get_inversed_pnc(pnc));
2201 set_ia32_am_support(new_op, ia32_am_Source);
2204 /* otherwise: use CMOVcc */
2205 new_op = cmov_func(dbg, irg, block, new_cmp_a, new_cmp_b, new_psi_true, new_psi_default);
2206 set_ia32_pncode(new_op, pnc);
2207 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
2217 * Following conversion rules apply:
2221 * 1) n bit -> m bit n > m (downscale)
2223 * 2) n bit -> m bit n == m (sign change)
2225 * 3) n bit -> m bit n < m (upscale)
2226 * a) source is signed: movsx
2227 * b) source is unsigned: and with lower bits sets
2231 * SSE(1/2) convert to float or double (cvtsi2ss/sd)
2235 * SSE(1/2) convert from float or double to 32bit int (cvtss/sd2si)
2239 * SSE(1/2) convert from float or double to double or float (cvtss/sd2sd/ss)
2240 * x87 is mode_E internally, conversions happen only at load and store
2241 * in non-strict semantic
2245 * Create a conversion from x87 state register to general purpose.
2247 static ir_node *gen_x87_fp_to_gp(ia32_transform_env_t *env, ir_node *node) {
2248 ia32_code_gen_t *cg = env->cg;
2249 ir_graph *irg = env->irg;
2250 dbg_info *dbg = get_irn_dbg_info(node);
2251 ir_node *block = transform_node(env, get_nodes_block(node));
2252 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2253 ir_node *op = get_Conv_op(node);
2254 ir_node *new_op = transform_node(env, op);
2255 ir_node *fist, *load;
2258 fist = new_rd_ia32_vfist(dbg, irg, block, get_irg_frame(irg), noreg, new_op, new_NoMem());
2260 set_ia32_use_frame(fist);
2261 set_ia32_am_support(fist, ia32_am_Dest);
2262 set_ia32_op_type(fist, ia32_AddrModeD);
2263 set_ia32_am_flavour(fist, ia32_am_B);
2264 set_ia32_ls_mode(fist, mode_Iu);
2265 SET_IA32_ORIG_NODE(fist, ia32_get_old_node_name(cg, node));
2268 load = new_rd_ia32_Load(dbg, irg, block, get_irg_frame(irg), noreg, fist);
2270 set_ia32_use_frame(load);
2271 set_ia32_am_support(load, ia32_am_Source);
2272 set_ia32_op_type(load, ia32_AddrModeS);
2273 set_ia32_am_flavour(load, ia32_am_B);
2274 set_ia32_ls_mode(load, mode_Iu);
2275 SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(cg, node));
2277 return new_r_Proj(irg, block, load, mode_Iu, pn_ia32_Load_res);
2281 * Create a conversion from general purpose to x87 register
2283 static ir_node *gen_x87_gp_to_fp(ia32_transform_env_t *env, ir_node *node, ir_mode *src_mode) {
2284 ia32_code_gen_t *cg = env->cg;
2285 ir_graph *irg = env->irg;
2286 dbg_info *dbg = get_irn_dbg_info(node);
2287 ir_mode *mode = get_irn_mode(node);
2288 ir_node *block = transform_node(env, get_nodes_block(node));
2289 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2290 ir_node *nomem = new_NoMem();
2291 ir_node *op = get_Conv_op(node);
2292 ir_node *new_op = transform_node(env, op);
2293 ir_node *fild, *store;
2296 /* first convert to 32 bit if necessary */
2297 src_bits = get_mode_size_bits(src_mode);
2298 if (src_bits == 8) {
2299 new_op = new_rd_ia32_Conv_I2I8Bit(dbg, irg, block, noreg, noreg, new_op, nomem);
2300 set_ia32_am_support(new_op, ia32_am_Source);
2301 set_ia32_ls_mode(new_op, src_mode);
2302 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
2303 } else if (src_bits < 32) {
2304 new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, new_op, nomem);
2305 set_ia32_am_support(new_op, ia32_am_Source);
2306 set_ia32_ls_mode(new_op, src_mode);
2307 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node));
2311 store = new_rd_ia32_Store(dbg, irg, block, get_irg_frame(irg), noreg, new_op, nomem);
2313 set_ia32_use_frame(store);
2314 set_ia32_am_support(store, ia32_am_Dest);
2315 set_ia32_op_type(store, ia32_AddrModeD);
2316 set_ia32_am_flavour(store, ia32_am_OB);
2317 set_ia32_ls_mode(store, mode_Iu);
2320 fild = new_rd_ia32_vfild(dbg, irg, block, get_irg_frame(irg), noreg, store);
2322 set_ia32_use_frame(fild);
2323 set_ia32_am_support(fild, ia32_am_Source);
2324 set_ia32_op_type(fild, ia32_AddrModeS);
2325 set_ia32_am_flavour(fild, ia32_am_OB);
2326 set_ia32_ls_mode(fild, mode);
2328 return new_r_Proj(irg, block, fild, mode_F, pn_ia32_vfild_res);
2332 * Transforms a Conv node.
2334 * @param env The transformation environment
2335 * @return The created ia32 Conv node
2337 static ir_node *gen_Conv(ia32_transform_env_t *env, ir_node *node) {
2338 ir_graph *irg = env->irg;
2339 dbg_info *dbg = get_irn_dbg_info(node);
2340 ir_node *op = get_Conv_op(node);
2341 ir_mode *src_mode = get_irn_mode(op);
2342 ir_mode *tgt_mode = get_irn_mode(node);
2343 int src_bits = get_mode_size_bits(src_mode);
2344 int tgt_bits = get_mode_size_bits(tgt_mode);
2345 ir_node *block = transform_node(env, get_nodes_block(node));
2347 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2348 ir_node *nomem = new_rd_NoMem(irg);
2349 ir_node *new_op = transform_node(env, op);
2350 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
2352 if (src_mode == tgt_mode) {
2353 /* this should be optimized already, but who knows... */
2354 DEBUG_ONLY(ir_fprintf(stderr, "Debug warning: conv %+F is pointless\n", node));
2355 DB((mod, LEVEL_1, "killed Conv(mode, mode) ..."));
2359 if (mode_is_float(src_mode)) {
2360 /* we convert from float ... */
2361 if (mode_is_float(tgt_mode)) {
2363 if (USE_SSE2(env->cg)) {
2364 DB((mod, LEVEL_1, "create Conv(float, float) ..."));
2365 res = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, new_op, nomem);
2366 set_ia32_ls_mode(res, tgt_mode);
2368 // Matze: TODO what about strict convs?
2369 DB((mod, LEVEL_1, "killed Conv(float, float) ..."));
2374 DB((mod, LEVEL_1, "create Conv(float, int) ..."));
2375 if (USE_SSE2(env->cg)) {
2376 res = new_rd_ia32_Conv_FP2I(dbg, irg, block, noreg, noreg, new_op, nomem);
2377 set_ia32_ls_mode(res, src_mode);
2379 return gen_x87_fp_to_gp(env, node);
2383 /* we convert from int ... */
2384 if (mode_is_float(tgt_mode)) {
2387 DB((mod, LEVEL_1, "create Conv(int, float) ..."));
2388 if (USE_SSE2(env->cg)) {
2389 res = new_rd_ia32_Conv_I2FP(dbg, irg, block, noreg, noreg, new_op, nomem);
2390 set_ia32_ls_mode(res, tgt_mode);
2391 if(src_bits == 32) {
2392 set_ia32_am_support(res, ia32_am_Source);
2395 return gen_x87_gp_to_fp(env, node, src_mode);
2399 ir_mode *smaller_mode;
2402 if (src_bits == tgt_bits) {
2403 DB((mod, LEVEL_1, "omitting unnecessary Conv(%+F, %+F) ...", src_mode, tgt_mode));
2407 if(src_bits < tgt_bits) {
2408 smaller_mode = src_mode;
2409 smaller_bits = src_bits;
2411 smaller_mode = tgt_mode;
2412 smaller_bits = tgt_bits;
2415 // The following is not correct, we can't change the mode,
2416 // maybe others are using the load too
2417 // better move this to a separate phase!
2420 if(is_Proj(new_op)) {
2421 /* load operations do already sign/zero extend, so we have
2422 * nothing left to do */
2423 ir_node *pred = get_Proj_pred(new_op);
2424 if(is_ia32_Load(pred)) {
2425 set_ia32_ls_mode(pred, smaller_mode);
2431 DB((mod, LEVEL_1, "create Conv(int, int) ...", src_mode, tgt_mode));
2432 if (smaller_bits == 8) {
2433 res = new_rd_ia32_Conv_I2I8Bit(dbg, irg, block, noreg, noreg, new_op, nomem);
2434 set_ia32_ls_mode(res, smaller_mode);
2436 res = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, new_op, nomem);
2437 set_ia32_ls_mode(res, smaller_mode);
2439 set_ia32_am_support(res, ia32_am_Source);
2443 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node));
2450 /********************************************
2453 * | |__ ___ _ __ ___ __| | ___ ___
2454 * | '_ \ / _ \ '_ \ / _ \ / _` |/ _ \/ __|
2455 * | |_) | __/ | | | (_) | (_| | __/\__ \
2456 * |_.__/ \___|_| |_|\___/ \__,_|\___||___/
2458 ********************************************/
2460 static ir_node *gen_be_StackParam(ia32_transform_env_t *env, ir_node *node) {
2461 ir_node *new_op = NULL;
2462 ir_graph *irg = env->irg;
2463 dbg_info *dbg = get_irn_dbg_info(node);
2464 ir_node *block = transform_node(env, get_nodes_block(node));
2465 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2466 ir_node *nomem = new_rd_NoMem(env->irg);
2467 ir_node *ptr = get_irn_n(node, 0);
2468 ir_node *new_ptr = transform_node(env, ptr);
2469 ir_entity *ent = arch_get_frame_entity(env->cg->arch_env, node);
2470 ir_mode *load_mode = get_irn_mode(node);
2474 if (mode_is_float(load_mode)) {
2476 if (USE_SSE2(env->cg)) {
2477 new_op = new_rd_ia32_xLoad(dbg, irg, block, new_ptr, noreg, nomem);
2478 pn_res = pn_ia32_xLoad_res;
2480 new_op = new_rd_ia32_vfld(dbg, irg, block, new_ptr, noreg, nomem);
2481 pn_res = pn_ia32_vfld_res;
2486 new_op = new_rd_ia32_Load(dbg, irg, block, new_ptr, noreg, nomem);
2487 proj_mode = mode_Iu;
2488 pn_res = pn_ia32_Load_res;
2491 set_ia32_frame_ent(new_op, ent);
2492 set_ia32_use_frame(new_op);
2494 set_ia32_am_support(new_op, ia32_am_Source);
2495 set_ia32_op_type(new_op, ia32_AddrModeS);
2496 set_ia32_am_flavour(new_op, ia32_am_B);
2497 set_ia32_ls_mode(new_op, load_mode);
2498 set_ia32_flags(new_op, get_ia32_flags(new_op) | arch_irn_flags_rematerializable);
2500 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
2502 return new_rd_Proj(dbg, irg, block, new_op, proj_mode, pn_res);
2506 * Transforms a FrameAddr into an ia32 Add.
2508 static ir_node *gen_be_FrameAddr(ia32_transform_env_t *env, ir_node *node) {
2509 ir_graph *irg = env->irg;
2510 dbg_info *dbg = get_irn_dbg_info(node);
2511 ir_node *block = transform_node(env, get_nodes_block(node));
2512 ir_node *op = get_irn_n(node, 0);
2513 ir_node *new_op = transform_node(env, op);
2515 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2517 res = new_rd_ia32_Lea(dbg, irg, block, new_op, noreg);
2518 set_ia32_frame_ent(res, arch_get_frame_entity(env->cg->arch_env, node));
2519 set_ia32_am_support(res, ia32_am_Full);
2520 set_ia32_use_frame(res);
2521 set_ia32_am_flavour(res, ia32_am_OB);
2523 //set_ia32_immop_type(res, ia32_ImmConst);
2524 //set_ia32_commutative(res);
2526 SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node));
2532 * Transforms a FrameLoad into an ia32 Load.
2534 static ir_node *gen_be_FrameLoad(ia32_transform_env_t *env, ir_node *node) {
2535 ir_node *new_op = NULL;
2536 ir_graph *irg = env->irg;
2537 dbg_info *dbg = get_irn_dbg_info(node);
2538 ir_node *block = transform_node(env, get_nodes_block(node));
2539 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2540 ir_node *mem = get_irn_n(node, 0);
2541 ir_node *ptr = get_irn_n(node, 1);
2542 ir_node *new_mem = transform_node(env, mem);
2543 ir_node *new_ptr = transform_node(env, ptr);
2544 ir_entity *ent = arch_get_frame_entity(env->cg->arch_env, node);
2545 ir_mode *mode = get_type_mode(get_entity_type(ent));
2546 ir_node *projs[pn_Load_max];
2548 ia32_collect_Projs(node, projs, pn_Load_max);
2550 if (mode_is_float(mode)) {
2552 if (USE_SSE2(env->cg)) {
2553 new_op = new_rd_ia32_xLoad(dbg, irg, block, new_ptr, noreg, new_mem);
2556 new_op = new_rd_ia32_vfld(dbg, irg, block, new_ptr, noreg, new_mem);
2560 new_op = new_rd_ia32_Load(dbg, irg, block, new_ptr, noreg, new_mem);
2563 set_ia32_frame_ent(new_op, ent);
2564 set_ia32_use_frame(new_op);
2566 set_ia32_am_support(new_op, ia32_am_Source);
2567 set_ia32_op_type(new_op, ia32_AddrModeS);
2568 set_ia32_am_flavour(new_op, ia32_am_B);
2569 set_ia32_ls_mode(new_op, mode);
2571 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
2578 * Transforms a FrameStore into an ia32 Store.
2580 static ir_node *gen_be_FrameStore(ia32_transform_env_t *env, ir_node *node) {
2581 ir_node *new_op = NULL;
2582 ir_graph *irg = env->irg;
2583 dbg_info *dbg = get_irn_dbg_info(node);
2584 ir_node *block = transform_node(env, get_nodes_block(node));
2585 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2586 ir_node *mem = get_irn_n(node, 0);
2587 ir_node *ptr = get_irn_n(node, 1);
2588 ir_node *val = get_irn_n(node, 2);
2589 ir_node *new_mem = transform_node(env, mem);
2590 ir_node *new_ptr = transform_node(env, ptr);
2591 ir_node *new_val = transform_node(env, val);
2592 ir_entity *ent = arch_get_frame_entity(env->cg->arch_env, node);
2593 ir_mode *mode = get_irn_mode(val);
2595 if (mode_is_float(mode)) {
2597 if (USE_SSE2(env->cg)) {
2598 new_op = new_rd_ia32_xStore(dbg, irg, block, new_ptr, noreg, new_val, new_mem);
2601 new_op = new_rd_ia32_vfst(dbg, irg, block, new_ptr, noreg, new_val, new_mem);
2604 else if (get_mode_size_bits(mode) == 8) {
2605 new_op = new_rd_ia32_Store8Bit(dbg, irg, block, new_ptr, noreg, new_val, new_mem);
2608 new_op = new_rd_ia32_Store(dbg, irg, block, new_ptr, noreg, new_val, new_mem);
2611 set_ia32_frame_ent(new_op, ent);
2612 set_ia32_use_frame(new_op);
2614 set_ia32_am_support(new_op, ia32_am_Dest);
2615 set_ia32_op_type(new_op, ia32_AddrModeD);
2616 set_ia32_am_flavour(new_op, ia32_am_B);
2617 set_ia32_ls_mode(new_op, mode);
2619 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
2625 * In case SSE is used we need to copy the result from FPU TOS.
2627 static ir_node *gen_be_Call(ia32_transform_env_t *env, ir_node *node) {
2628 ir_graph *irg = env->irg;
2629 dbg_info *dbg = get_irn_dbg_info(node);
2630 ir_node *block = transform_node(env, get_nodes_block(node));
2631 ir_node *call_res = be_get_Proj_for_pn(node, pn_be_Call_first_res);
2632 ir_node *call_mem = be_get_Proj_for_pn(node, pn_be_Call_M_regular);
2634 ir_node *nomem = new_NoMem();
2635 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2637 if (! call_res || ! USE_SSE2(env->cg)) {
2638 return duplicate_node(env, node);
2641 mode = get_irn_mode(call_res);
2643 /* in case there is no memory output: create one to serialize the copy FPU -> SSE */
2644 if (call_mem == NULL)
2645 call_mem = new_rd_Proj(dbg, irg, block, node, mode_M, pn_be_Call_M_regular);
2647 if (mode_is_float(mode)) {
2648 /* store st(0) onto stack */
2649 ir_node *frame = get_irg_frame(irg);
2650 ir_node *fstp = new_rd_ia32_GetST0(dbg, irg, block, frame, noreg, nomem);
2651 ir_entity *ent = frame_alloc_area(get_irg_frame_type(irg), get_mode_size_bytes(mode), 16, 0);
2652 ir_node *sse_load, *p, *bad, *keep;
2657 // Matze: TODO, fix this for new transform code...
2660 set_ia32_ls_mode(fstp, mode);
2661 set_ia32_op_type(fstp, ia32_AddrModeD);
2662 set_ia32_use_frame(fstp);
2663 set_ia32_frame_ent(fstp, ent);
2664 set_ia32_am_flavour(fstp, ia32_am_B);
2665 set_ia32_am_support(fstp, ia32_am_Dest);
2667 /* load into SSE register */
2668 sse_load = new_rd_ia32_xLoad(dbg, irg, block, frame, ia32_new_NoReg_gp(env->cg), fstp);
2669 set_ia32_ls_mode(sse_load, mode);
2670 set_ia32_op_type(sse_load, ia32_AddrModeS);
2671 set_ia32_use_frame(sse_load);
2672 set_ia32_frame_ent(sse_load, ent);
2673 set_ia32_am_flavour(sse_load, ia32_am_B);
2674 set_ia32_am_support(sse_load, ia32_am_Source);
2675 mproj = new_rd_Proj(dbg, irg, block, sse_load, mode_M, pn_ia32_xLoad_M);
2676 sse_load = new_rd_Proj(dbg, irg, block, sse_load, mode, pn_ia32_xLoad_res);
2678 /* reroute all users of the result proj to the sse load */
2679 edges_reroute(call_res, sse_load, irg);
2680 edges_reroute_kind(call_res, sse_load, EDGE_KIND_DEP, irg);
2682 /* reroute all users of the old call memory to the sse load memory */
2683 edges_reroute(call_mem, mproj, irg);
2684 edges_reroute_kind(call_mem, mproj, EDGE_KIND_DEP, irg);
2686 /* now, we can set the old call mem as input of GetST0 */
2687 set_irn_n(fstp, 1, call_mem);
2689 /* now: create new Keep whith all former ins and one additional in - the result Proj */
2691 /* get a Proj representing a caller save register */
2692 p = be_get_Proj_for_pn(node, pn_be_Call_first_res + 1);
2693 assert(is_Proj(p) && "Proj expected.");
2695 /* user of the the proj is the Keep */
2696 p = get_edge_src_irn(get_irn_out_edge_first(p));
2697 assert(be_is_Keep(p) && "Keep expected.");
2699 /* copy in array of the old keep and set the result proj as additional in */
2700 keep_arity = get_irn_arity(p) + 1;
2701 NEW_ARR_A(ir_node *, in_keep, keep_arity);
2702 in_keep[keep_arity - 1] = call_res;
2703 for (i = 0; i < keep_arity - 1; ++i)
2704 in_keep[i] = get_irn_n(p, i);
2706 /* create new keep and set the in class requirements properly */
2707 keep = be_new_Keep(NULL, irg, block, keep_arity, in_keep);
2708 for(i = 0; i < keep_arity; ++i) {
2709 const arch_register_class_t *cls = arch_get_irn_reg_class(env->cg->arch_env, in_keep[i], -1);
2710 be_node_set_reg_class(keep, i, cls);
2713 /* kill the old keep */
2714 bad = get_irg_bad(irg);
2715 for (i = 0; i < keep_arity - 1; i++)
2716 set_irn_n(p, i, bad);
2717 remove_End_keepalive(get_irg_end(irg), p);
2720 return duplicate_node(env, node);
2724 * In case SSE is used we need to copy the result from XMM0 to FPU TOS before return.
2726 static ir_node *gen_be_Return(ia32_transform_env_t *env, ir_node *node) {
2727 ir_graph *irg = env->irg;
2730 ir_node *ret_val = get_irn_n(node, be_pos_Return_val);
2731 ir_node *ret_mem = get_irn_n(node, be_pos_Return_mem);
2732 ir_entity *ent = get_irg_entity(irg);
2733 ir_type *tp = get_entity_type(ent);
2736 ir_node *frame, *sse_store, *fld, *mproj, *barrier;
2737 ir_node *new_barrier, *new_frame, *new_ret_val, *new_ret_mem;
2739 int pn_ret_val, pn_ret_mem, arity, i;
2741 assert(ret_val != NULL);
2742 if (be_Return_get_n_rets(node) < 1 || ! USE_SSE2(env->cg)) {
2743 return duplicate_node(env, node);
2746 res_type = get_method_res_type(tp, 0);
2748 if (!is_Primitive_type(res_type)) {
2749 return duplicate_node(env, node);
2752 mode = get_type_mode(res_type);
2753 if (!mode_is_float(mode)) {
2754 return duplicate_node(env, node);
2757 assert(get_method_n_ress(tp) == 1);
2760 pn_ret_val = get_Proj_proj(ret_val);
2761 pn_ret_mem = get_Proj_proj(ret_mem);
2763 /* get the Barrier */
2764 barrier = get_Proj_pred(ret_val);
2766 /* get result input of the Barrier */
2767 ret_val = get_irn_n(barrier, pn_ret_val);
2768 new_ret_val = transform_node(env, ret_val);
2770 /* get memory input of the Barrier */
2771 ret_mem = get_irn_n(barrier, pn_ret_mem);
2772 new_ret_mem = transform_node(env, ret_mem);
2774 frame = get_irg_frame(irg);
2775 new_frame = transform_node(env, frame);
2777 dbg = get_irn_dbg_info(barrier);
2778 block = transform_node(env, get_nodes_block(barrier));
2780 /* store xmm0 onto stack */
2781 sse_store = new_rd_ia32_xStoreSimple(dbg, irg, block, new_frame, new_ret_val, new_ret_mem);
2782 set_ia32_ls_mode(sse_store, mode);
2783 set_ia32_op_type(sse_store, ia32_AddrModeD);
2784 set_ia32_use_frame(sse_store);
2785 set_ia32_am_flavour(sse_store, ia32_am_B);
2786 set_ia32_am_support(sse_store, ia32_am_Dest);
2789 fld = new_rd_ia32_SetST0(dbg, irg, block, new_frame, sse_store);
2790 set_ia32_ls_mode(fld, mode);
2791 set_ia32_op_type(fld, ia32_AddrModeS);
2792 set_ia32_use_frame(fld);
2793 set_ia32_am_flavour(fld, ia32_am_B);
2794 set_ia32_am_support(fld, ia32_am_Source);
2796 mproj = new_r_Proj(irg, block, fld, mode_M, pn_ia32_SetST0_M);
2797 fld = new_r_Proj(irg, block, fld, mode_D, pn_ia32_SetST0_res);
2798 arch_set_irn_register(env->cg->arch_env, fld, &ia32_vfp_regs[REG_VF0]);
2800 /* create a new barrier */
2801 arity = get_irn_arity(barrier);
2802 in = alloca(arity * sizeof(in[0]));
2803 for(i = 0; i < arity; ++i) {
2805 if(i == pn_ret_val) {
2807 } else if(i == pn_ret_mem) {
2810 ir_node *in = get_irn_n(barrier, i);
2811 new_in = transform_node(env, in);
2816 new_barrier = new_ir_node(dbg, irg, block,
2817 get_irn_op(barrier), get_irn_mode(barrier),
2819 copy_node_attr(barrier, new_barrier);
2820 duplicate_deps(env, barrier, new_barrier);
2821 set_new_node(barrier, new_barrier);
2822 mark_irn_visited(barrier);
2824 /* transform normally */
2825 return duplicate_node(env, node);
2829 * Transform a be_AddSP into an ia32_AddSP. Eat up const sizes.
2831 static ir_node *gen_be_AddSP(ia32_transform_env_t *env, ir_node *node) {
2833 ir_graph *irg = env->irg;
2834 dbg_info *dbg = get_irn_dbg_info(node);
2835 ir_node *block = transform_node(env, get_nodes_block(node));
2836 ir_node *sz = get_irn_n(node, be_pos_AddSP_size);
2837 ir_node *new_sz = transform_node(env, sz);
2838 ir_node *sp = get_irn_n(node, be_pos_AddSP_old_sp);
2839 ir_node *new_sp = transform_node(env, sp);
2841 new_op = new_rd_ia32_AddSP(dbg, irg, block, new_sp, new_sz);
2842 fold_immediate(env, new_op, 0, 1);
2844 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
2850 * Transform a be_SubSP into an ia32_SubSP. Eat up const sizes.
2852 static ir_node *gen_be_SubSP(ia32_transform_env_t *env, ir_node *node) {
2854 ir_graph *irg = env->irg;
2855 dbg_info *dbg = get_irn_dbg_info(node);
2856 ir_node *block = transform_node(env, get_nodes_block(node));
2857 ir_node *sz = get_irn_n(node, be_pos_SubSP_size);
2858 ir_node *new_sz = transform_node(env, sz);
2859 ir_node *sp = get_irn_n(node, be_pos_SubSP_old_sp);
2860 ir_node *new_sp = transform_node(env, sp);
2862 new_op = new_rd_ia32_SubSP(dbg, irg, block, new_sp, new_sz);
2863 fold_immediate(env, new_op, 0, 1);
2865 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
2871 * This function just sets the register for the Unknown node
2872 * as this is not done during register allocation because Unknown
2873 * is an "ignore" node.
2875 static ir_node *gen_Unknown(ia32_transform_env_t *env, ir_node *node) {
2876 ir_mode *mode = get_irn_mode(node);
2878 if (mode_is_float(mode)) {
2879 if (USE_SSE2(env->cg))
2880 return ia32_new_Unknown_xmm(env->cg);
2882 return ia32_new_Unknown_vfp(env->cg);
2883 } else if (mode_is_int(mode) || mode_is_reference(mode)) {
2884 return ia32_new_Unknown_gp(env->cg);
2886 assert(0 && "unsupported Unknown-Mode");
2893 * Change some phi modes
2895 static ir_node *gen_Phi(ia32_transform_env_t *env, ir_node *node) {
2896 ir_graph *irg = env->irg;
2897 dbg_info *dbg = get_irn_dbg_info(node);
2898 ir_mode *mode = get_irn_mode(node);
2899 ir_node *block = transform_node(env, get_nodes_block(node));
2903 if(mode_is_int(mode) || mode_is_reference(mode)) {
2904 // we shouldn't have any 64bit stuff around anymore
2905 assert(get_mode_size_bits(mode) <= 32);
2906 // all integer operations are on 32bit registers now
2908 } else if(mode_is_float(mode)) {
2909 assert(mode == mode_D || mode == mode_F);
2910 // all float operations are on mode_D registers
2914 /* phi nodes allow loops, so we use the old arguments for now
2915 * and fix this later */
2916 phi = new_ir_node(dbg, irg, block, op_Phi, mode, get_irn_arity(node),
2917 get_irn_in(node) + 1);
2918 copy_node_attr(node, phi);
2919 duplicate_deps(env, node, phi);
2921 set_new_node(node, phi);
2923 /* put the preds in the worklist */
2924 arity = get_irn_arity(node);
2925 for(i = 0; i < arity; ++i) {
2926 ir_node *pred = get_irn_n(node, i);
2927 pdeq_putr(env->worklist, pred);
2933 /**********************************************************************
2936 * | | _____ _____ _ __ ___ __| | _ __ ___ __| | ___ ___
2937 * | |/ _ \ \ /\ / / _ \ '__/ _ \/ _` | | '_ \ / _ \ / _` |/ _ \/ __|
2938 * | | (_) \ V V / __/ | | __/ (_| | | | | | (_) | (_| | __/\__ \
2939 * |_|\___/ \_/\_/ \___|_| \___|\__,_| |_| |_|\___/ \__,_|\___||___/
2941 **********************************************************************/
2943 /* These nodes are created in intrinsic lowering (64bit -> 32bit) */
2945 typedef ir_node *construct_load_func(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, \
2948 typedef ir_node *construct_store_func(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, \
2949 ir_node *val, ir_node *mem);
2952 * Transforms a lowered Load into a "real" one.
2954 static ir_node *gen_lowered_Load(ia32_transform_env_t *env, ir_node *node, construct_load_func func, char fp_unit) {
2955 ir_graph *irg = env->irg;
2956 dbg_info *dbg = get_irn_dbg_info(node);
2957 ir_node *block = transform_node(env, get_nodes_block(node));
2958 ir_mode *mode = get_ia32_ls_mode(node);
2960 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
2961 ir_node *ptr = get_irn_n(node, 0);
2962 ir_node *mem = get_irn_n(node, 1);
2963 ir_node *new_ptr = transform_node(env, ptr);
2964 ir_node *new_mem = transform_node(env, mem);
2967 Could be that we have SSE2 unit, but due to 64Bit Div/Conv
2968 lowering we have x87 nodes, so we need to enforce simulation.
2970 if (mode_is_float(mode)) {
2972 if (fp_unit == fp_x87)
2976 new_op = func(dbg, irg, block, new_ptr, noreg, new_mem);
2978 set_ia32_am_support(new_op, ia32_am_Source);
2979 set_ia32_op_type(new_op, ia32_AddrModeS);
2980 set_ia32_am_flavour(new_op, ia32_am_OB);
2981 set_ia32_am_offs_int(new_op, 0);
2982 set_ia32_am_scale(new_op, 1);
2983 set_ia32_am_sc(new_op, get_ia32_am_sc(node));
2984 if(is_ia32_am_sc_sign(node))
2985 set_ia32_am_sc_sign(new_op);
2986 set_ia32_ls_mode(new_op, get_ia32_ls_mode(node));
2987 if(is_ia32_use_frame(node)) {
2988 set_ia32_frame_ent(new_op, get_ia32_frame_ent(node));
2989 set_ia32_use_frame(new_op);
2992 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
2998 * Transforms a lowered Store into a "real" one.
3000 static ir_node *gen_lowered_Store(ia32_transform_env_t *env, ir_node *node, construct_store_func func, char fp_unit) {
3001 ir_graph *irg = env->irg;
3002 dbg_info *dbg = get_irn_dbg_info(node);
3003 ir_node *block = transform_node(env, get_nodes_block(node));
3004 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
3005 ir_mode *mode = get_ia32_ls_mode(node);
3008 ia32_am_flavour_t am_flav = ia32_B;
3009 ir_node *ptr = get_irn_n(node, 0);
3010 ir_node *val = get_irn_n(node, 1);
3011 ir_node *mem = get_irn_n(node, 2);
3012 ir_node *new_ptr = transform_node(env, ptr);
3013 ir_node *new_val = transform_node(env, val);
3014 ir_node *new_mem = transform_node(env, mem);
3017 Could be that we have SSE2 unit, but due to 64Bit Div/Conv
3018 lowering we have x87 nodes, so we need to enforce simulation.
3020 if (mode_is_float(mode)) {
3022 if (fp_unit == fp_x87)
3026 new_op = func(dbg, irg, block, new_ptr, noreg, new_val, new_mem);
3028 if ((am_offs = get_ia32_am_offs_int(node)) != 0) {
3030 add_ia32_am_offs_int(new_op, am_offs);
3033 set_ia32_am_support(new_op, ia32_am_Dest);
3034 set_ia32_op_type(new_op, ia32_AddrModeD);
3035 set_ia32_am_flavour(new_op, am_flav);
3036 set_ia32_ls_mode(new_op, mode);
3037 set_ia32_frame_ent(new_op, get_ia32_frame_ent(node));
3038 set_ia32_use_frame(new_op);
3040 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
3047 * Transforms an ia32_l_XXX into a "real" XXX node
3049 * @param env The transformation environment
3050 * @return the created ia32 XXX node
3052 #define GEN_LOWERED_OP(op) \
3053 static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env, ir_node *node) {\
3054 ir_mode *mode = get_irn_mode(node); \
3055 if (mode_is_float(mode)) \
3057 return gen_binop(env, node, get_binop_left(node), \
3058 get_binop_right(node), new_rd_ia32_##op); \
3061 #define GEN_LOWERED_x87_OP(op) \
3062 static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env, ir_node *node) {\
3064 FORCE_x87(env->cg); \
3065 new_op = gen_binop_float(env, node, get_binop_left(node), \
3066 get_binop_right(node), new_rd_ia32_##op); \
3070 #define GEN_LOWERED_UNOP(op) \
3071 static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env, ir_node *node) {\
3072 return gen_unop(env, node, get_unop_op(node), new_rd_ia32_##op); \
3075 #define GEN_LOWERED_SHIFT_OP(op) \
3076 static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env, ir_node *node) {\
3077 return gen_shift_binop(env, node, get_binop_left(node), \
3078 get_binop_right(node), new_rd_ia32_##op); \
3081 #define GEN_LOWERED_LOAD(op, fp_unit) \
3082 static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env, ir_node *node) {\
3083 return gen_lowered_Load(env, node, new_rd_ia32_##op, fp_unit); \
3086 #define GEN_LOWERED_STORE(op, fp_unit) \
3087 static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env, ir_node *node) {\
3088 return gen_lowered_Store(env, node, new_rd_ia32_##op, fp_unit); \
3091 GEN_LOWERED_OP(AddC)
3093 GEN_LOWERED_OP(SubC)
3097 GEN_LOWERED_x87_OP(vfprem)
3098 GEN_LOWERED_x87_OP(vfmul)
3099 GEN_LOWERED_x87_OP(vfsub)
3101 GEN_LOWERED_UNOP(Minus)
3103 GEN_LOWERED_LOAD(vfild, fp_x87)
3104 GEN_LOWERED_LOAD(Load, fp_none)
3105 GEN_LOWERED_STORE(vfist, fp_x87)
3106 GEN_LOWERED_STORE(Store, fp_none)
3108 static ir_node *gen_ia32_l_vfdiv(ia32_transform_env_t *env, ir_node *node) {
3109 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
3110 ir_graph *irg = env->irg;
3111 dbg_info *dbg = get_irn_dbg_info(node);
3112 ir_node *block = transform_node(env, get_nodes_block(node));
3113 ir_node *left = get_binop_left(node);
3114 ir_node *right = get_binop_right(node);
3115 ir_node *new_left = transform_node(env, left);
3116 ir_node *new_right = transform_node(env, right);
3119 vfdiv = new_rd_ia32_vfdiv(dbg, irg, block, noreg, noreg, new_left, new_right, new_NoMem());
3120 clear_ia32_commutative(vfdiv);
3121 set_ia32_am_support(vfdiv, ia32_am_Source);
3122 fold_immediate(env, vfdiv, 2, 3);
3124 SET_IA32_ORIG_NODE(vfdiv, ia32_get_old_node_name(env->cg, node));
3132 * Transforms a l_MulS into a "real" MulS node.
3134 * @param env The transformation environment
3135 * @return the created ia32 MulS node
3137 static ir_node *gen_ia32_l_MulS(ia32_transform_env_t *env, ir_node *node) {
3138 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
3139 ir_graph *irg = env->irg;
3140 dbg_info *dbg = get_irn_dbg_info(node);
3141 ir_node *block = transform_node(env, get_nodes_block(node));
3142 ir_node *left = get_binop_left(node);
3143 ir_node *right = get_binop_right(node);
3146 /* l_MulS is already a mode_T node, so we create the MulS in the normal way */
3147 /* and then skip the result Proj, because all needed Projs are already there. */
3149 ir_node *muls = new_rd_ia32_MulS(dbg, irg, block, noreg, noreg, left, right, new_NoMem());
3150 clear_ia32_commutative(muls);
3151 set_ia32_am_support(muls, ia32_am_Source);
3152 fold_immediate(env, muls, 2, 3);
3154 /* check if EAX and EDX proj exist, add missing one */
3155 in[0] = new_rd_Proj(dbg, irg, block, muls, mode_Iu, pn_EAX);
3156 in[1] = new_rd_Proj(dbg, irg, block, muls, mode_Iu, pn_EDX);
3157 be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 2, in);
3159 SET_IA32_ORIG_NODE(muls, ia32_get_old_node_name(env->cg, node));
3164 GEN_LOWERED_SHIFT_OP(Shl)
3165 GEN_LOWERED_SHIFT_OP(Shr)
3166 GEN_LOWERED_SHIFT_OP(Shrs)
3169 * Transforms a l_ShlD/l_ShrD into a ShlD/ShrD. Those nodes have 3 data inputs:
3170 * op1 - target to be shifted
3171 * op2 - contains bits to be shifted into target
3173 * Only op3 can be an immediate.
3175 static ir_node *gen_lowered_64bit_shifts(ia32_transform_env_t *env, ir_node *node,
3176 ir_node *op1, ir_node *op2,
3178 ir_node *new_op = NULL;
3179 ir_graph *irg = env->irg;
3180 ir_mode *mode = get_irn_mode(node);
3181 dbg_info *dbg = get_irn_dbg_info(node);
3182 ir_node *block = transform_node(env, get_nodes_block(node));
3183 ir_node *noreg = ia32_new_NoReg_gp(env->cg);
3184 ir_node *nomem = new_NoMem();
3186 ir_node *new_op1 = transform_node(env, op1);
3187 ir_node *new_op2 = transform_node(env, op2);
3188 ir_node *new_count = transform_node(env, count);
3190 DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;)
3192 assert(! mode_is_float(mode) && "Shift/Rotate with float not supported");
3194 /* Check if immediate optimization is on and */
3195 /* if it's an operation with immediate. */
3196 imm_op = (env->cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(NULL, new_count) : NULL;
3198 /* Limit imm_op within range imm8 */
3200 tv = get_ia32_Immop_tarval(imm_op);
3203 tv = tarval_mod(tv, new_tarval_from_long(32, get_tarval_mode(tv)));
3204 set_ia32_Immop_tarval(imm_op, tv);
3211 /* integer operations */
3213 /* This is ShiftD with const */
3214 DB((mod, LEVEL_1, "ShiftD with immediate ..."));
3216 if (is_ia32_l_ShlD(node))
3217 new_op = new_rd_ia32_ShlD(dbg, irg, block, noreg, noreg,
3218 new_op1, new_op2, noreg, nomem);
3220 new_op = new_rd_ia32_ShrD(dbg, irg, block, noreg, noreg,
3221 new_op1, new_op2, noreg, nomem);
3222 set_ia32_Immop_attr(new_op, imm_op);
3225 /* This is a normal ShiftD */
3226 DB((mod, LEVEL_1, "ShiftD binop ..."));
3227 if (is_ia32_l_ShlD(node))
3228 new_op = new_rd_ia32_ShlD(dbg, irg, block, noreg, noreg,
3229 new_op1, new_op2, new_count, nomem);
3231 new_op = new_rd_ia32_ShrD(dbg, irg, block, noreg, noreg,
3232 new_op1, new_op2, new_count, nomem);
3235 /* set AM support */
3236 // Matze: node has unsupported format (6inputs)
3237 //set_ia32_am_support(new_op, ia32_am_Dest);
3239 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node));
3241 set_ia32_emit_cl(new_op);
3246 static ir_node *gen_ia32_l_ShlD(ia32_transform_env_t *env, ir_node *node) {
3247 return gen_lowered_64bit_shifts(env, node, get_irn_n(node, 0),
3248 get_irn_n(node, 1), get_irn_n(node, 2));
3251 static ir_node *gen_ia32_l_ShrD(ia32_transform_env_t *env, ir_node *node) {
3252 return gen_lowered_64bit_shifts(env, node, get_irn_n(node, 0),
3253 get_irn_n(node, 1), get_irn_n(node, 2));
3257 * In case SSE Unit is used, the node is transformed into a vfst + xLoad.
3259 static ir_node *gen_ia32_l_X87toSSE(ia32_transform_env_t *env, ir_node *node) {
3260 ia32_code_gen_t *cg = env->cg;
3261 ir_node *res = NULL;
3262 ir_graph *irg = env->irg;
3263 dbg_info *dbg = get_irn_dbg_info(node);
3264 ir_node *block = transform_node(env, get_nodes_block(node));
3265 ir_node *ptr = get_irn_n(node, 0);
3266 ir_node *val = get_irn_n(node, 1);
3267 ir_node *new_val = transform_node(env, val);
3268 ir_node *mem = get_irn_n(node, 2);
3269 ir_node *noreg, *new_ptr, *new_mem;
3275 noreg = ia32_new_NoReg_gp(cg);
3276 new_mem = transform_node(env, mem);
3277 new_ptr = transform_node(env, ptr);
3279 /* Store x87 -> MEM */
3280 res = new_rd_ia32_vfst(dbg, irg, block, new_ptr, noreg, new_val, new_mem);
3281 set_ia32_frame_ent(res, get_ia32_frame_ent(node));
3282 set_ia32_use_frame(res);
3283 set_ia32_ls_mode(res, get_ia32_ls_mode(node));
3284 set_ia32_am_support(res, ia32_am_Dest);
3285 set_ia32_am_flavour(res, ia32_B);
3286 set_ia32_op_type(res, ia32_AddrModeD);
3288 /* Load MEM -> SSE */
3289 res = new_rd_ia32_xLoad(dbg, irg, block, new_ptr, noreg, res);
3290 set_ia32_frame_ent(res, get_ia32_frame_ent(node));
3291 set_ia32_use_frame(res);
3292 set_ia32_ls_mode(res, get_ia32_ls_mode(node));
3293 set_ia32_am_support(res, ia32_am_Source);
3294 set_ia32_am_flavour(res, ia32_B);
3295 set_ia32_op_type(res, ia32_AddrModeS);
3296 res = new_rd_Proj(dbg, irg, block, res, mode_D, pn_ia32_xLoad_res);
3302 * In case SSE Unit is used, the node is transformed into a xStore + vfld.
3304 static ir_node *gen_ia32_l_SSEtoX87(ia32_transform_env_t *env, ir_node *node) {
3305 ia32_code_gen_t *cg = env->cg;
3306 ir_graph *irg = env->irg;
3307 dbg_info *dbg = get_irn_dbg_info(node);
3308 ir_node *block = transform_node(env, get_nodes_block(node));
3309 ir_node *res = NULL;
3310 ir_node *ptr = get_irn_n(node, 0);
3311 ir_node *val = get_irn_n(node, 1);
3312 ir_node *mem = get_irn_n(node, 2);
3313 ir_entity *fent = get_ia32_frame_ent(node);
3314 ir_mode *lsmode = get_ia32_ls_mode(node);
3315 ir_node *new_val = transform_node(env, val);
3316 ir_node *noreg, *new_ptr, *new_mem;
3319 if (!USE_SSE2(cg)) {
3320 /* SSE unit is not used -> skip this node. */
3324 noreg = ia32_new_NoReg_gp(cg);
3325 new_val = transform_node(env, val);
3326 new_ptr = transform_node(env, ptr);
3327 new_mem = transform_node(env, mem);
3329 /* Store SSE -> MEM */
3330 if (is_ia32_xLoad(skip_Proj(new_val))) {
3331 ir_node *ld = skip_Proj(new_val);
3333 /* we can vfld the value directly into the fpu */
3334 fent = get_ia32_frame_ent(ld);
3335 ptr = get_irn_n(ld, 0);
3336 offs = get_ia32_am_offs_int(ld);
3338 res = new_rd_ia32_xStore(dbg, irg, block, new_ptr, noreg, new_val, new_mem);
3339 set_ia32_frame_ent(res, fent);
3340 set_ia32_use_frame(res);
3341 set_ia32_ls_mode(res, lsmode);
3342 set_ia32_am_support(res, ia32_am_Dest);
3343 set_ia32_am_flavour(res, ia32_B);
3344 set_ia32_op_type(res, ia32_AddrModeD);
3348 /* Load MEM -> x87 */
3349 res = new_rd_ia32_vfld(dbg, irg, block, new_ptr, noreg, new_mem);
3350 set_ia32_frame_ent(res, fent);
3351 set_ia32_use_frame(res);
3352 set_ia32_ls_mode(res, lsmode);
3353 add_ia32_am_offs_int(res, offs);
3354 set_ia32_am_support(res, ia32_am_Source);
3355 set_ia32_am_flavour(res, ia32_B);
3356 set_ia32_op_type(res, ia32_AddrModeS);
3357 res = new_rd_Proj(dbg, irg, block, res, lsmode, pn_ia32_vfld_res);
3362 /*********************************************************
3365 * _ __ ___ __ _ _ _ __ __| |_ __ ___ _____ _ __
3366 * | '_ ` _ \ / _` | | '_ \ / _` | '__| \ \ / / _ \ '__|
3367 * | | | | | | (_| | | | | | | (_| | | | |\ V / __/ |
3368 * |_| |_| |_|\__,_|_|_| |_| \__,_|_| |_| \_/ \___|_|
3370 *********************************************************/
3373 * the BAD transformer.
3375 static ir_node *bad_transform(ia32_transform_env_t *env, ir_node *node) {
3376 panic("No transform function for %+F available.\n", node);
3380 static ir_node *gen_End(ia32_transform_env_t *env, ir_node *node) {
3381 /* end has to be duplicated manually because we need a dynamic in array */
3382 ir_graph *irg = env->irg;
3383 dbg_info *dbg = get_irn_dbg_info(node);
3384 ir_node *block = transform_node(env, get_nodes_block(node));
3388 new_end = new_ir_node(dbg, irg, block, op_End, mode_X, -1, NULL);
3389 copy_node_attr(node, new_end);
3390 duplicate_deps(env, node, new_end);
3392 set_irg_end(irg, new_end);
3393 set_new_node(new_end, new_end);
3395 /* transform preds */
3396 arity = get_irn_arity(node);
3397 for(i = 0; i < arity; ++i) {
3398 ir_node *in = get_irn_n(node, i);
3399 ir_node *new_in = transform_node(env, in);
3401 add_End_keepalive(new_end, new_in);
3407 static ir_node *gen_Block(ia32_transform_env_t *env, ir_node *node) {
3408 ir_graph *irg = env->irg;
3409 dbg_info *dbg = get_irn_dbg_info(node);
3410 ir_node *start_block = env->old_anchors[anchor_start_block];
3415 * We replace the ProjX from the start node with a jump,
3416 * so the startblock has no preds anymore now
3418 if(node == start_block) {
3419 return new_rd_Block(dbg, irg, 0, NULL);
3422 /* we use the old blocks for now, because jumps allow cycles in the graph
3423 * we have to fix this later */
3424 block = new_ir_node(dbg, irg, NULL, get_irn_op(node), get_irn_mode(node),
3425 get_irn_arity(node), get_irn_in(node) + 1);
3426 copy_node_attr(node, block);
3428 #ifdef DEBUG_libfirm
3429 block->node_nr = node->node_nr;
3431 set_new_node(node, block);
3433 /* put the preds in the worklist */
3434 arity = get_irn_arity(node);
3435 for(i = 0; i < arity; ++i) {
3436 ir_node *in = get_irn_n(node, i);
3437 pdeq_putr(env->worklist, in);
3443 static ir_node *gen_Proj_be_AddSP(ia32_transform_env_t *env, ir_node *node) {
3444 ir_graph *irg = env->irg;
3445 ir_node *block = transform_node(env, get_nodes_block(node));
3446 dbg_info *dbg = get_irn_dbg_info(node);
3447 ir_node *pred = get_Proj_pred(node);
3448 ir_node *new_pred = transform_node(env, pred);
3449 int proj = get_Proj_proj(node);
3451 if(proj == pn_be_AddSP_res) {
3452 ir_node *res = new_rd_Proj(dbg, irg, block, new_pred, mode_Iu, pn_ia32_AddSP_stack);
3453 arch_set_irn_register(env->cg->arch_env, res, &ia32_gp_regs[REG_ESP]);
3455 } else if(proj == pn_be_AddSP_M) {
3456 return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_AddSP_M);
3460 return new_rd_Unknown(irg, get_irn_mode(node));
3463 static ir_node *gen_Proj_be_SubSP(ia32_transform_env_t *env, ir_node *node) {
3464 ir_graph *irg = env->irg;
3465 ir_node *block = transform_node(env, get_nodes_block(node));
3466 dbg_info *dbg = get_irn_dbg_info(node);
3467 ir_node *pred = get_Proj_pred(node);
3468 ir_node *new_pred = transform_node(env, pred);
3469 int proj = get_Proj_proj(node);
3471 if(proj == pn_be_SubSP_res) {
3472 ir_node *res = new_rd_Proj(dbg, irg, block, new_pred, mode_Iu, pn_ia32_AddSP_stack);
3473 arch_set_irn_register(env->cg->arch_env, res, &ia32_gp_regs[REG_ESP]);
3475 } else if(proj == pn_be_SubSP_M) {
3476 return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_SubSP_M);
3480 return new_rd_Unknown(irg, get_irn_mode(node));
3483 static ir_node *gen_Proj_Load(ia32_transform_env_t *env, ir_node *node) {
3484 ir_graph *irg = env->irg;
3485 ir_node *block = transform_node(env, get_nodes_block(node));
3486 dbg_info *dbg = get_irn_dbg_info(node);
3487 ir_node *pred = get_Proj_pred(node);
3488 ir_node *new_pred = transform_node(env, pred);
3489 int proj = get_Proj_proj(node);
3491 /* renumber the proj */
3492 if(is_ia32_Load(new_pred)) {
3493 if(proj == pn_Load_res) {
3494 return new_rd_Proj(dbg, irg, block, new_pred, mode_Iu, pn_ia32_Load_res);
3495 } else if(proj == pn_Load_M) {
3496 return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_Load_M);
3498 } else if(is_ia32_xLoad(new_pred)) {
3499 if(proj == pn_Load_res) {
3500 return new_rd_Proj(dbg, irg, block, new_pred, mode_D, pn_ia32_xLoad_res);
3501 } else if(proj == pn_Load_M) {
3502 return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_xLoad_M);
3504 } else if(is_ia32_vfld(new_pred)) {
3505 if(proj == pn_Load_res) {
3506 return new_rd_Proj(dbg, irg, block, new_pred, mode_D, pn_ia32_vfld_res);
3507 } else if(proj == pn_Load_M) {
3508 return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_vfld_M);
3513 return new_rd_Unknown(irg, get_irn_mode(node));
3516 static ir_node *gen_Proj_DivMod(ia32_transform_env_t *env, ir_node *node) {
3517 ir_graph *irg = env->irg;
3518 dbg_info *dbg = get_irn_dbg_info(node);
3519 ir_node *block = transform_node(env, get_nodes_block(node));
3520 ir_mode *mode = get_irn_mode(node);
3522 ir_node *pred = get_Proj_pred(node);
3523 ir_node *new_pred = transform_node(env, pred);
3524 int proj = get_Proj_proj(node);
3526 assert(is_ia32_Div(new_pred) || is_ia32_IDiv(new_pred));
3528 switch(get_irn_opcode(pred)) {
3532 return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_Div_M);
3534 return new_rd_Proj(dbg, irg, block, new_pred, mode_Iu, pn_ia32_Div_div_res);
3542 return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_Div_M);
3544 return new_rd_Proj(dbg, irg, block, new_pred, mode_Iu, pn_ia32_Div_mod_res);
3552 return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_Div_M);
3553 case pn_DivMod_res_div:
3554 return new_rd_Proj(dbg, irg, block, new_pred, mode_Iu, pn_ia32_Div_div_res);
3555 case pn_DivMod_res_mod:
3556 return new_rd_Proj(dbg, irg, block, new_pred, mode_Iu, pn_ia32_Div_mod_res);
3566 return new_rd_Unknown(irg, mode);
3569 static ir_node *gen_Proj_CopyB(ia32_transform_env_t *env, ir_node *node)
3571 ir_graph *irg = env->irg;
3572 dbg_info *dbg = get_irn_dbg_info(node);
3573 ir_node *block = transform_node(env, get_nodes_block(node));
3574 ir_mode *mode = get_irn_mode(node);
3576 ir_node *pred = get_Proj_pred(node);
3577 ir_node *new_pred = transform_node(env, pred);
3578 int proj = get_Proj_proj(node);
3581 case pn_CopyB_M_regular:
3582 if(is_ia32_CopyB_i(new_pred)) {
3583 return new_rd_Proj(dbg, irg, block, new_pred, mode_M,
3585 } else if(is_ia32_CopyB(new_pred)) {
3586 return new_rd_Proj(dbg, irg, block, new_pred, mode_M,
3595 return new_rd_Unknown(irg, mode);
3598 static ir_node *gen_Proj_l_vfdiv(ia32_transform_env_t *env, ir_node *node)
3600 ir_graph *irg = env->irg;
3601 dbg_info *dbg = get_irn_dbg_info(node);
3602 ir_node *block = transform_node(env, get_nodes_block(node));
3603 ir_mode *mode = get_irn_mode(node);
3605 ir_node *pred = get_Proj_pred(node);
3606 ir_node *new_pred = transform_node(env, pred);
3607 int proj = get_Proj_proj(node);
3610 case pn_ia32_l_vfdiv_M:
3611 return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_vfdiv_M);
3612 case pn_ia32_l_vfdiv_res:
3613 return new_rd_Proj(dbg, irg, block, new_pred, mode_D, pn_ia32_vfdiv_res);
3618 return new_rd_Unknown(irg, mode);
3621 static ir_node *gen_Proj_Quot(ia32_transform_env_t *env, ir_node *node)
3623 ir_graph *irg = env->irg;
3624 dbg_info *dbg = get_irn_dbg_info(node);
3625 ir_node *block = transform_node(env, get_nodes_block(node));
3626 ir_mode *mode = get_irn_mode(node);
3628 ir_node *pred = get_Proj_pred(node);
3629 ir_node *new_pred = transform_node(env, pred);
3630 int proj = get_Proj_proj(node);
3634 if(is_ia32_xDiv(new_pred)) {
3635 return new_rd_Proj(dbg, irg, block, new_pred, mode_M,
3637 } else if(is_ia32_vfdiv(new_pred)) {
3638 return new_rd_Proj(dbg, irg, block, new_pred, mode_M,
3643 if(is_ia32_xDiv(new_pred)) {
3644 return new_rd_Proj(dbg, irg, block, new_pred, mode,
3646 } else if(is_ia32_vfdiv(new_pred)) {
3647 return new_rd_Proj(dbg, irg, block, new_pred, mode,
3656 return new_rd_Unknown(irg, mode);
3659 static ir_node *gen_Proj(ia32_transform_env_t *env, ir_node *node) {
3660 ir_graph *irg = env->irg;
3661 dbg_info *dbg = get_irn_dbg_info(node);
3662 ir_node *pred = get_Proj_pred(node);
3663 int proj = get_Proj_proj(node);
3665 if(is_Store(pred) || be_is_FrameStore(pred)) {
3666 if(proj == pn_Store_M) {
3667 return transform_node(env, pred);
3670 return new_r_Bad(irg);
3672 } else if(is_Load(pred) || be_is_FrameLoad(pred)) {
3673 return gen_Proj_Load(env, node);
3674 } else if(is_Div(pred) || is_Mod(pred) || is_DivMod(pred)) {
3675 return gen_Proj_DivMod(env, node);
3676 } else if(is_CopyB(pred)) {
3677 return gen_Proj_CopyB(env, node);
3678 } else if(is_Quot(pred)) {
3679 return gen_Proj_Quot(env, node);
3680 } else if(is_ia32_l_vfdiv(pred)) {
3681 return gen_Proj_l_vfdiv(env, node);
3682 } else if(be_is_SubSP(pred)) {
3683 return gen_Proj_be_SubSP(env, node);
3684 } else if(be_is_AddSP(pred)) {
3685 return gen_Proj_be_AddSP(env, node);
3686 } else if(get_irn_op(pred) == op_Start && proj == pn_Start_X_initial_exec) {
3687 ir_node *block = get_nodes_block(pred);
3690 block = transform_node(env, block);
3691 // we exchange the ProjX with a jump
3692 jump = new_rd_Jmp(dbg, irg, block);
3693 ir_fprintf(stderr, "created jump: %+F\n", jump);
3697 return duplicate_node(env, node);
3701 * Enters all transform functions into the generic pointer
3703 static void register_transformers(void) {
3704 ir_op *op_Max, *op_Min, *op_Mulh;
3706 /* first clear the generic function pointer for all ops */
3707 clear_irp_opcodes_generic_func();
3709 #define GEN(a) { transform_func *func = gen_##a; op_##a->ops.generic = (op_func) func; }
3710 #define BAD(a) op_##a->ops.generic = (op_func)bad_transform
3749 /* transform ops from intrinsic lowering */
3771 GEN(ia32_l_X87toSSE);
3772 GEN(ia32_l_SSEtoX87);
3777 /* we should never see these nodes */
3792 /* handle generic backend nodes */
3802 /* set the register for all Unknown nodes */
3805 op_Max = get_op_Max();
3808 op_Min = get_op_Min();
3811 op_Mulh = get_op_Mulh();
3819 static void duplicate_deps(ia32_transform_env_t *env, ir_node *old_node,
3823 int deps = get_irn_deps(old_node);
3825 for(i = 0; i < deps; ++i) {
3826 ir_node *dep = get_irn_dep(old_node, i);
3827 ir_node *new_dep = transform_node(env, dep);
3829 add_irn_dep(new_node, new_dep);
3833 static ir_node *duplicate_node(ia32_transform_env_t *env, ir_node *node)
3835 ir_graph *irg = env->irg;
3836 dbg_info *dbg = get_irn_dbg_info(node);
3837 ir_mode *mode = get_irn_mode(node);
3838 ir_op *op = get_irn_op(node);
3844 block = transform_node(env, get_nodes_block(node));
3846 arity = get_irn_arity(node);
3847 ins = alloca(arity * sizeof(ins[0]));
3848 for(i = 0; i < arity; ++i) {
3849 ir_node *in = get_irn_n(node, i);
3850 ins[i] = transform_node(env, in);
3853 new_node = new_ir_node(dbg, irg, block,
3854 op, mode, arity, ins);
3855 copy_node_attr(node, new_node);
3856 duplicate_deps(env, node, new_node);
3861 static ir_node *transform_node(ia32_transform_env_t *env, ir_node *node)
3864 ir_op *op = get_irn_op(node);
3866 if(irn_visited(node)) {
3867 assert(get_new_node(node) != NULL);
3868 return get_new_node(node);
3871 mark_irn_visited(node);
3872 DEBUG_ONLY(set_new_node(node, NULL));
3874 if (op->ops.generic) {
3875 transform_func *transform = (transform_func *)op->ops.generic;
3877 new_node = (*transform)(env, node);
3878 assert(new_node != NULL);
3880 new_node = duplicate_node(env, node);
3882 //ir_fprintf(stderr, "%+F -> %+F\n", node, new_node);
3884 set_new_node(node, new_node);
3885 mark_irn_visited(new_node);
3886 hook_dead_node_elim_subst(current_ir_graph, node, new_node);
3890 static void fix_loops(ia32_transform_env_t *env, ir_node *node)
3894 if(irn_visited(node))
3896 mark_irn_visited(node);
3898 assert(node_is_in_irgs_storage(env->irg, node));
3900 if(!is_Block(node)) {
3901 ir_node *block = get_nodes_block(node);
3902 ir_node *new_block = (ir_node*) get_irn_link(block);
3904 if(new_block != NULL) {
3905 set_nodes_block(node, new_block);
3909 fix_loops(env, block);
3912 arity = get_irn_arity(node);
3913 for(i = 0; i < arity; ++i) {
3914 ir_node *in = get_irn_n(node, i);
3915 ir_node *new = (ir_node*) get_irn_link(in);
3917 if(new != NULL && new != in) {
3918 set_irn_n(node, i, new);
3925 arity = get_irn_deps(node);
3926 for(i = 0; i < arity; ++i) {
3927 ir_node *in = get_irn_dep(node, i);
3928 ir_node *new = (ir_node*) get_irn_link(in);
3930 if(new != NULL && new != in) {
3931 set_irn_dep(node, i, new);
3939 static void pre_transform_node(ir_node **place, ia32_transform_env_t *env)
3944 *place = transform_node(env, *place);
3947 static void transform_nodes(ia32_code_gen_t *cg)
3950 ir_graph *irg = cg->irg;
3952 ia32_transform_env_t env;
3954 hook_dead_node_elim(irg, 1);
3956 inc_irg_visited(irg);
3960 env.visited = get_irg_visited(irg);
3961 env.worklist = new_pdeq();
3962 env.old_anchors = alloca(anchor_max * sizeof(env.old_anchors[0]));
3963 DEBUG_ONLY(env.mod = cg->mod);
3965 old_end = get_irg_end(irg);
3967 /* put all anchor nodes in the worklist */
3968 for(i = 0; i < anchor_max; ++i) {
3969 ir_node *anchor = irg->anchors[i];
3972 pdeq_putr(env.worklist, anchor);
3975 env.old_anchors[i] = anchor;
3976 // and set it to NULL to make sure we don't accidently use it
3977 irg->anchors[i] = NULL;
3980 // pre transform some anchors (so they are available in the other transform
3982 set_irg_bad(irg, transform_node(&env, env.old_anchors[anchor_bad]));
3983 set_irg_no_mem(irg, transform_node(&env, env.old_anchors[anchor_no_mem]));
3984 set_irg_start_block(irg, transform_node(&env, env.old_anchors[anchor_start_block]));
3985 set_irg_start(irg, transform_node(&env, env.old_anchors[anchor_start]));
3986 set_irg_frame(irg, transform_node(&env, env.old_anchors[anchor_frame]));
3988 pre_transform_node(&cg->unknown_gp, &env);
3989 pre_transform_node(&cg->unknown_vfp, &env);
3990 pre_transform_node(&cg->unknown_xmm, &env);
3991 pre_transform_node(&cg->noreg_gp, &env);
3992 pre_transform_node(&cg->noreg_vfp, &env);
3993 pre_transform_node(&cg->noreg_xmm, &env);
3995 /* process worklist (this should transform all nodes in the graph) */
3996 while(!pdeq_empty(env.worklist)) {
3997 ir_node *node = pdeq_getl(env.worklist);
3998 transform_node(&env, node);
4001 /* fix loops and set new anchors*/
4002 inc_irg_visited(irg);
4003 for(i = 0; i < anchor_max; ++i) {
4004 ir_node *anchor = env.old_anchors[i];
4008 anchor = get_irn_link(anchor);
4009 fix_loops(&env, anchor);
4010 assert(irg->anchors[i] == NULL || irg->anchors[i] == anchor);
4011 irg->anchors[i] = anchor;
4014 del_pdeq(env.worklist);
4016 hook_dead_node_elim(irg, 0);
4019 void ia32_transform_graph(ia32_code_gen_t *cg)
4021 ir_graph *irg = cg->irg;
4022 be_irg_t *birg = cg->birg;
4023 ir_graph *old_current_ir_graph = current_ir_graph;
4024 int old_interprocedural_view = get_interprocedural_view();
4025 struct obstack *old_obst = NULL;
4026 struct obstack *new_obst = NULL;
4028 current_ir_graph = irg;
4029 set_interprocedural_view(0);
4030 register_transformers();
4032 /* most analysis info is wrong after transformation */
4033 free_callee_info(irg);
4035 irg->outs_state = outs_none;
4037 free_loop_information(irg);
4038 set_irg_doms_inconsistent(irg);
4039 be_invalidate_liveness(birg);
4040 be_invalidate_dom_front(birg);
4042 /* create a new obstack */
4043 old_obst = irg->obst;
4044 new_obst = xmalloc(sizeof(*new_obst));
4045 obstack_init(new_obst);
4046 irg->obst = new_obst;
4047 irg->last_node_idx = 0;
4049 /* create new value table for CSE */
4050 del_identities(irg->value_table);
4051 irg->value_table = new_identities();
4053 /* do the main transformation */
4054 transform_nodes(cg);
4056 /* we don't want the globals anchor anymore */
4057 set_irg_globals(irg, new_r_Bad(irg));
4059 /* free the old obstack */
4060 obstack_free(old_obst, 0);
4064 current_ir_graph = old_current_ir_graph;
4065 set_interprocedural_view(old_interprocedural_view);
4067 /* recalculate edges */
4068 edges_deactivate(irg);
4069 edges_activate(irg);
4073 * Transforms a psi condition.
4075 static void transform_psi_cond(ir_node *cond, ir_mode *mode, ia32_code_gen_t *cg) {
4078 /* if the mode is target mode, we have already seen this part of the tree */
4079 if (get_irn_mode(cond) == mode)
4082 assert(get_irn_mode(cond) == mode_b && "logical operator for condition must be mode_b");
4084 set_irn_mode(cond, mode);
4086 for (i = get_irn_arity(cond) - 1; i >= 0; i--) {
4087 ir_node *in = get_irn_n(cond, i);
4089 /* if in is a compare: transform into Set/xCmp */
4091 ir_node *new_op = NULL;
4092 ir_node *cmp = get_Proj_pred(in);
4093 ir_node *cmp_a = get_Cmp_left(cmp);
4094 ir_node *cmp_b = get_Cmp_right(cmp);
4095 dbg_info *dbg = get_irn_dbg_info(cmp);
4096 ir_graph *irg = get_irn_irg(cmp);
4097 ir_node *block = get_nodes_block(cmp);
4098 ir_node *noreg = ia32_new_NoReg_gp(cg);
4099 ir_node *nomem = new_rd_NoMem(irg);
4100 int pnc = get_Proj_proj(in);
4102 /* this is a compare */
4103 if (mode_is_float(mode)) {
4104 /* Psi is float, we need a floating point compare */
4107 ir_mode *m = get_irn_mode(cmp_a);
4109 if (! mode_is_float(m)) {
4110 cmp_a = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_a, cmp_a, mode);
4111 cmp_b = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_b, cmp_b, mode);
4113 else if (m == mode_F) {
4114 /* we convert cmp values always to double, to get correct bitmask with cmpsd */
4115 cmp_a = gen_sse_conv_f2d(cg, dbg, irg, block, cmp_a, cmp_a);
4116 cmp_b = gen_sse_conv_f2d(cg, dbg, irg, block, cmp_b, cmp_b);
4119 new_op = new_rd_ia32_xCmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem);
4120 set_ia32_pncode(new_op, pnc);
4121 SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, cmp));
4130 construct_binop_func *set_func = NULL;
4132 if (mode_is_float(get_irn_mode(cmp_a))) {
4133 /* 1st case: compare operands are floats */
4138 set_func = new_rd_ia32_xCmpSet;
4142 set_func = new_rd_ia32_vfCmpSet;
4145 pnc &= 7; /* fp compare -> int compare */
4148 /* 2nd case: compare operand are integer too */
4149 set_func = new_rd_ia32_CmpSet;
4152 new_op = set_func(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem);
4153 if(!mode_is_signed(mode))
4154 pnc |= ia32_pn_Cmp_Unsigned;
4156 set_ia32_pncode(new_op, pnc);
4157 set_ia32_am_support(new_op, ia32_am_Source);
4160 /* the the new compare as in */
4161 set_irn_n(cond, i, new_op);
4164 /* another complex condition */
4165 transform_psi_cond(in, mode, cg);
4171 * The Psi selector can be a tree of compares combined with "And"s and "Or"s.
4172 * We create a Set node, respectively a xCmp in case the Psi is a float, for each
4173 * compare, which causes the compare result to be stores in a register. The
4174 * "And"s and "Or"s are transformed later, we just have to set their mode right.
4176 void ia32_transform_psi_cond_tree(ir_node *node, void *env) {
4177 ia32_code_gen_t *cg = env;
4178 ir_node *psi_sel, *new_cmp, *block;
4183 if (get_irn_opcode(node) != iro_Psi)
4186 psi_sel = get_Psi_cond(node, 0);
4188 /* if psi_cond is a cmp: do nothing, this case is covered by gen_Psi */
4189 if (is_Proj(psi_sel))
4192 //mode = get_irn_mode(node);
4193 // TODO this is probably wrong...
4196 transform_psi_cond(psi_sel, mode, cg);
4198 irg = get_irn_irg(node);
4199 block = get_nodes_block(node);
4201 /* we need to compare the evaluated condition tree with 0 */
4202 mode = get_irn_mode(node);
4203 if (mode_is_float(mode)) {
4204 psi_sel = gen_sse_conv_int2float(cg, NULL, irg, block, psi_sel, NULL, mode);
4205 /* BEWARE: new_r_Const_long works for floating point as well */
4206 new_cmp = new_r_Cmp(irg, block, psi_sel, new_r_Const_long(irg, block, mode, 0));
4207 new_cmp = new_r_Proj(irg, block, new_cmp, mode_b, pn_Cmp_Ne);
4210 new_cmp = new_r_Cmp(irg, block, psi_sel, new_r_Const_long(irg, block, mode_Iu, 0));
4211 new_cmp = new_r_Proj(irg, block, new_cmp, mode_b, pn_Cmp_Gt | pn_Cmp_Lt);
4214 set_Psi_cond(node, 0, new_cmp);