X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fia32_transform.c;h=214711a0463b9d3640214d06af51702aca01ce39;hb=6946cd2478e77048ce672af347daf19d2fc7ec15;hp=c9574323f9953591fa37665be1917e314132d915;hpb=863d31d7a5c8210432fef88b30fc3e8353131538;p=libfirm diff --git a/ir/be/ia32/ia32_transform.c b/ir/be/ia32/ia32_transform.c index c9574323f..214711a04 100644 --- a/ir/be/ia32/ia32_transform.c +++ b/ir/be/ia32/ia32_transform.c @@ -1,7 +1,27 @@ +/* + * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved. + * + * This file is part of libFirm. + * + * This file may be distributed and/or modified under the terms of the + * GNU General Public License version 2 as published by the Free Software + * Foundation and appearing in the file LICENSE.GPL included in the + * packaging of this file. + * + * Licensees holding valid libFirm Professional Edition licenses may use + * this file in accordance with the libFirm Commercial License. + * Agreement provided with the Software. + * + * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE + * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE. + */ + /** - * This file implements the IR transformation from firm into ia32-Firm. - * @author Christian Wuerdig - * $Id$ + * @file + * @brief This file implements the IR transformation from firm into ia32-Firm. + * @author Christian Wuerdig, Matthias Braun + * @version $Id$ */ #ifdef HAVE_CONFIG_H #include "config.h" @@ -38,6 +58,7 @@ #include "../besched.h" #include "../beabi.h" #include "../beutil.h" +#include "../beirg_t.h" #include "bearch_ia32_t.h" #include "ia32_nodes_attr.h" @@ -65,6 +86,11 @@ #define ENT_SFP_ABS "IA32_SFP_ABS" #define ENT_DFP_ABS "IA32_DFP_ABS" +#define mode_vfp (ia32_reg_classes[CLASS_ia32_vfp].mode) +#define mode_xmm (ia32_reg_classes[CLASS_ia32_xmm].mode) + +DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) + typedef struct ia32_transform_env_t { ir_graph *irg; /**< The irg, the node should be created in */ ia32_code_gen_t *cg; /**< The code generator */ @@ -73,7 +99,6 @@ typedef struct ia32_transform_env_t { pdeq *worklist; /**< worklist of nodes that still need to be transformed */ ir_node **old_anchors;/**< the list of anchors nodes in the old irg*/ - DEBUG_ONLY(firm_dbg_module_t *mod;) /**< The firm debugger */ } ia32_transform_env_t; extern ir_op *get_op_Mulh(void); @@ -103,6 +128,14 @@ static ir_node *transform_node(ia32_transform_env_t *env, ir_node *node); static void duplicate_deps(ia32_transform_env_t *env, ir_node *old_node, ir_node *new_node); +static INLINE int mode_needs_gp_reg(ir_mode *mode) +{ + if(mode == mode_fpcw) + return 0; + + return mode_is_int(mode) || mode_is_character(mode) || mode_is_reference(mode); +} + static INLINE void set_new_node(ir_node *old_node, ir_node *new_node) { set_irn_link(old_node, new_node); @@ -225,66 +258,63 @@ static ir_entity *get_entity_for_tv(ia32_code_gen_t *cg, ir_node *cnst) current_ir_graph = rem; pmap_insert(cg->isa->tv_ent, tv, res); - } - else + } else { res = e->value; + } + return res; } /** * Transforms a Const. - * - * @param mod the debug module - * @param block the block the new node should belong to - * @param node the ir Const node - * @param mode mode of the Const - * @return the created ia32 Const node */ static ir_node *gen_Const(ia32_transform_env_t *env, ir_node *node) { ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); ir_node *block = transform_node(env, get_nodes_block(node)); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); if (mode_is_float(mode)) { - ir_node *res = NULL; + ir_node *res = NULL; + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *nomem = new_NoMem(); + ir_node *load; ir_entity *floatent; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_NoMem(); - ir_node *load; FP_USED(env->cg); if (! USE_SSE2(env->cg)) { cnst_classify_t clss = classify_Const(node); if (clss == CNST_NULL) { - load = new_rd_ia32_vfldz(dbg, irg, block); - res = load; + load = new_rd_ia32_vfldz(dbgi, irg, block); + res = load; } else if (clss == CNST_ONE) { - load = new_rd_ia32_vfld1(dbg, irg, block); - res = load; + load = new_rd_ia32_vfld1(dbgi, irg, block); + res = load; } else { floatent = get_entity_for_tv(env->cg, node); - load = new_rd_ia32_vfld(dbg, irg, block, noreg, noreg, nomem); + load = new_rd_ia32_vfld(dbgi, irg, block, noreg, noreg, nomem); set_ia32_am_support(load, ia32_am_Source); set_ia32_op_type(load, ia32_AddrModeS); set_ia32_am_flavour(load, ia32_am_N); - set_ia32_am_sc(load, ia32_get_ent_ident(floatent)); - res = new_r_Proj(irg, block, load, mode_E, pn_ia32_vfld_res); + set_ia32_am_sc(load, floatent); + res = new_r_Proj(irg, block, load, mode_vfp, pn_ia32_vfld_res); } + set_ia32_ls_mode(load, mode); } else { floatent = get_entity_for_tv(env->cg, node); - load = new_rd_ia32_xLoad(dbg, irg, block, noreg, noreg, nomem); + load = new_rd_ia32_xLoad(dbgi, irg, block, noreg, noreg, nomem); set_ia32_am_support(load, ia32_am_Source); set_ia32_op_type(load, ia32_AddrModeS); set_ia32_am_flavour(load, ia32_am_N); - set_ia32_am_sc(load, ia32_get_ent_ident(floatent)); - res = new_r_Proj(irg, block, load, mode_E, pn_ia32_xLoad_res); + set_ia32_am_sc(load, floatent); + set_ia32_ls_mode(load, mode); + + res = new_r_Proj(irg, block, load, mode_xmm, pn_ia32_xLoad_res); } - set_ia32_ls_mode(load, mode); SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(env->cg, node)); /* Const Nodes before the initial IncSP are a bad idea, because @@ -297,7 +327,7 @@ static ir_node *gen_Const(ia32_transform_env_t *env, ir_node *node) { SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(env->cg, node)); return res; } else { - ir_node *cnst = new_rd_ia32_Const(dbg, irg, block); + ir_node *cnst = new_rd_ia32_Const(dbgi, irg, block); /* see above */ if (get_irg_start_block(irg) == block) { @@ -315,29 +345,23 @@ static ir_node *gen_Const(ia32_transform_env_t *env, ir_node *node) { /** * Transforms a SymConst. - * - * @param mod the debug module - * @param block the block the new node should belong to - * @param node the ir SymConst node - * @param mode mode of the SymConst - * @return the created ia32 Const node */ static ir_node *gen_SymConst(ia32_transform_env_t *env, ir_node *node) { ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); ir_node *block = transform_node(env, get_nodes_block(node)); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); ir_node *cnst; if (mode_is_float(mode)) { FP_USED(env->cg); if (USE_SSE2(env->cg)) - cnst = new_rd_ia32_xConst(dbg, irg, block); + cnst = new_rd_ia32_xConst(dbgi, irg, block); else - cnst = new_rd_ia32_vfConst(dbg, irg, block); + cnst = new_rd_ia32_vfConst(dbgi, irg, block); set_ia32_ls_mode(cnst, mode); } else { - cnst = new_rd_ia32_Const(dbg, irg, block); + cnst = new_rd_ia32_Const(dbgi, irg, block); } /* Const Nodes before the initial IncSP are a bad idea, because @@ -356,19 +380,19 @@ static ir_node *gen_SymConst(ia32_transform_env_t *env, ir_node *node) { /** * SSE convert of an integer node into a floating point node. */ -static ir_node *gen_sse_conv_int2float(ia32_code_gen_t *cg, dbg_info *dbg, +static ir_node *gen_sse_conv_int2float(ia32_code_gen_t *cg, dbg_info *dbgi, ir_graph *irg, ir_node *block, ir_node *in, ir_node *old_node, ir_mode *tgt_mode) { - ir_node *noreg = ia32_new_NoReg_gp(cg); - ir_node *nomem = new_rd_NoMem(irg); + ir_node *noreg = ia32_new_NoReg_gp(cg); + ir_node *nomem = new_rd_NoMem(irg); ir_node *old_pred = get_Cmp_left(old_node); - ir_mode *in_mode = get_irn_mode(old_pred); - int in_bits = get_mode_size_bits(in_mode); + ir_mode *in_mode = get_irn_mode(old_pred); + int in_bits = get_mode_size_bits(in_mode); + ir_node *conv = new_rd_ia32_Conv_I2FP(dbgi, irg, block, noreg, noreg, in, nomem); - ir_node *conv = new_rd_ia32_Conv_I2FP(dbg, irg, block, noreg, noreg, in, nomem); set_ia32_ls_mode(conv, tgt_mode); - if(in_bits == 32) { + if (in_bits == 32) { set_ia32_am_support(conv, ia32_am_Source); } SET_IA32_ORIG_NODE(conv, ia32_get_old_node_name(cg, old_node)); @@ -377,25 +401,25 @@ static ir_node *gen_sse_conv_int2float(ia32_code_gen_t *cg, dbg_info *dbg, } /** -* SSE convert of an float node into a double node. -*/ -static ir_node *gen_sse_conv_f2d(ia32_code_gen_t *cg, dbg_info *dbg, + * SSE convert of an float node into a double node. + */ +static ir_node *gen_sse_conv_f2d(ia32_code_gen_t *cg, dbg_info *dbgi, ir_graph *irg, ir_node *block, ir_node *in, ir_node *old_node) { ir_node *noreg = ia32_new_NoReg_gp(cg); ir_node *nomem = new_rd_NoMem(irg); + ir_node *conv = new_rd_ia32_Conv_FP2FP(dbgi, irg, block, noreg, noreg, in, nomem); - ir_node *conv = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, in, nomem); set_ia32_am_support(conv, ia32_am_Source); - set_ia32_ls_mode(conv, mode_E); + set_ia32_ls_mode(conv, mode_xmm); SET_IA32_ORIG_NODE(conv, ia32_get_old_node_name(cg, old_node)); return conv; } /* Generates an entity for a known FP const (used for FP Neg + Abs) */ -ident *ia32_gen_fp_known_const(ia32_known_const_t kct) { +ir_entity *ia32_gen_fp_known_const(ia32_known_const_t kct) { static const struct { const char *tp_name; const char *ent_name; @@ -421,8 +445,8 @@ ident *ia32_gen_fp_known_const(ia32_known_const_t kct) { tp_name = names[kct].tp_name; cnst_str = names[kct].cnst_str; - //mode = kct == ia32_SSIGN || kct == ia32_SABS ? mode_Iu : mode_Lu; - mode = mode_LLu; + mode = kct == ia32_SSIGN || kct == ia32_SABS ? mode_Iu : mode_Lu; + //mode = mode_xmm; tv = new_tarval_from_str(cnst_str, strlen(cnst_str), mode); tp = new_type_primitive(new_id_from_str(tp_name), mode); ent = new_entity(get_glob_type(), new_id_from_str(ent_name), tp); @@ -445,7 +469,7 @@ ident *ia32_gen_fp_known_const(ia32_known_const_t kct) { ent_cache[kct] = ent; } - return get_entity_ident(ent_cache[kct]); + return ent_cache[kct]; } #ifndef NDEBUG @@ -463,9 +487,11 @@ const char *ia32_get_old_node_name(ia32_code_gen_t *cg, ir_node *irn) { /* determine if one operator is an Imm */ static ir_node *get_immediate_op(ir_node *op1, ir_node *op2) { - if (op1) + if (op1) { return is_ia32_Cnst(op1) ? op1 : (is_ia32_Cnst(op2) ? op2 : NULL); - else return is_ia32_Cnst(op2) ? op2 : NULL; + } else { + return is_ia32_Cnst(op2) ? op2 : NULL; + } } /* determine if one operator is not an Imm */ @@ -477,12 +503,12 @@ static void fold_immediate(ia32_transform_env_t *env, ir_node *node, int in1, in ir_node *left; ir_node *right; - if(! (env->cg->opt & IA32_OPT_IMMOPS)) + if (! (env->cg->opt & IA32_OPT_IMMOPS)) return; left = get_irn_n(node, in1); right = get_irn_n(node, in2); - if(!is_ia32_Cnst(right) && is_ia32_Cnst(left)) { + if (! is_ia32_Cnst(right) && is_ia32_Cnst(left)) { /* we can only set right operand to immediate */ if(!is_ia32_commutative(node)) return; @@ -511,18 +537,19 @@ static void fold_immediate(ia32_transform_env_t *env, ir_node *node, int in1, in */ static ir_node *gen_binop(ia32_transform_env_t *env, ir_node *node, ir_node *op1, ir_node *op2, - construct_binop_func *func) { + construct_binop_func *func) +{ + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *new_op1 = transform_node(env, op1); + ir_node *new_op2 = transform_node(env, op2); ir_node *new_node = NULL; ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); + dbg_info *dbgi = get_irn_dbg_info(node); ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg); ir_node *nomem = new_NoMem(); - ir_node *new_op1 = transform_node(env, op1); - ir_node *new_op2 = transform_node(env, op2); - new_node = func(dbg, irg, block, noreg_gp, noreg_gp, new_op1, new_op2, nomem); - if(func == new_rd_ia32_IMul) { + new_node = func(dbgi, irg, block, noreg_gp, noreg_gp, new_op1, new_op2, nomem); + if (func == new_rd_ia32_IMul) { set_ia32_am_support(new_node, ia32_am_Source); } else { set_ia32_am_support(new_node, ia32_am_Full); @@ -550,17 +577,17 @@ static ir_node *gen_binop_float(ia32_transform_env_t *env, ir_node *node, ir_node *op1, ir_node *op2, construct_binop_func *func) { + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *new_op1 = transform_node(env, op1); + ir_node *new_op2 = transform_node(env, op2); ir_node *new_node = NULL; - dbg_info *dbg = get_irn_dbg_info(node); + dbg_info *dbgi = get_irn_dbg_info(node); ir_graph *irg = env->irg; ir_mode *mode = get_irn_mode(node); - ir_node *block = transform_node(env, get_nodes_block(node)); ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg); ir_node *nomem = new_NoMem(); - ir_node *new_op1 = transform_node(env, op1); - ir_node *new_op2 = transform_node(env, op2); - new_node = func(dbg, irg, block, noreg_gp, noreg_gp, new_op1, new_op2, nomem); + new_node = func(dbgi, irg, block, noreg_gp, noreg_gp, new_op1, new_op2, nomem); set_ia32_am_support(new_node, ia32_am_Source); if (is_op_commutative(get_irn_op(node))) { set_ia32_commutative(new_node); @@ -586,19 +613,19 @@ static ir_node *gen_binop_float(ia32_transform_env_t *env, ir_node *node, */ static ir_node *gen_shift_binop(ia32_transform_env_t *env, ir_node *node, ir_node *op1, ir_node *op2, - construct_binop_func *func) { - ir_node *new_op = NULL; - dbg_info *dbg = get_irn_dbg_info(node); - ir_graph *irg = env->irg; - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_NoMem(); - ir_node *expr_op; - ir_node *imm_op; - DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;) - ir_node *new_op1 = transform_node(env, op1); - ir_node *new_op2 = transform_node(env, op2); - tarval *tv; + construct_binop_func *func) +{ + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *new_op1 = transform_node(env, op1); + ir_node *new_op2 = transform_node(env, op2); + ir_node *new_op = NULL; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_graph *irg = env->irg; + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *nomem = new_NoMem(); + ir_node *expr_op; + ir_node *imm_op; + tarval *tv; assert(! mode_is_float(get_irn_mode(node)) && "Shift/Rotate with float not supported"); @@ -631,14 +658,14 @@ static ir_node *gen_shift_binop(ia32_transform_env_t *env, ir_node *node, /* integer operations */ if (imm_op) { /* This is shift/rot with const */ - DB((mod, LEVEL_1, "Shift/Rot with immediate ...")); + DB((dbg, LEVEL_1, "Shift/Rot with immediate ...")); - new_op = func(dbg, irg, block, noreg, noreg, expr_op, noreg, nomem); + new_op = func(dbgi, irg, block, noreg, noreg, expr_op, noreg, nomem); copy_ia32_Immop_attr(new_op, imm_op); } else { /* This is a normal shift/rot */ - DB((mod, LEVEL_1, "Shift/Rot binop ...")); - new_op = func(dbg, irg, block, noreg, noreg, new_op1, new_op2, nomem); + DB((dbg, LEVEL_1, "Shift/Rot binop ...")); + new_op = func(dbgi, irg, block, noreg, noreg, new_op1, new_op2, nomem); } /* set AM support */ @@ -661,18 +688,18 @@ static ir_node *gen_shift_binop(ia32_transform_env_t *env, ir_node *node, * @return The constructed ia32 node. */ static ir_node *gen_unop(ia32_transform_env_t *env, ir_node *node, ir_node *op, - construct_unop_func *func) { - ir_node *new_node = NULL; - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_NoMem(); - ir_node *new_op = transform_node(env, op); - DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;) - - new_node = func(dbg, irg, block, noreg, noreg, new_op, nomem); - DB((mod, LEVEL_1, "INT unop ...")); + construct_unop_func *func) +{ + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *new_op = transform_node(env, op); + ir_node *new_node = NULL; + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *nomem = new_NoMem(); + + new_node = func(dbgi, irg, block, noreg, noreg, new_op, nomem); + DB((dbg, LEVEL_1, "INT unop ...")); set_ia32_am_support(new_node, ia32_am_Dest); SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env->cg, node)); @@ -688,18 +715,18 @@ static ir_node *gen_unop(ia32_transform_env_t *env, ir_node *node, ir_node *op, * @return the created ia32 Add node */ static ir_node *gen_Add(ia32_transform_env_t *env, ir_node *node) { - ir_node *new_op = NULL; - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_NoMem(); - ir_node *expr_op, *imm_op; - ir_node *op1 = get_Add_left(node); - ir_node *op2 = get_Add_right(node); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *op1 = get_Add_left(node); ir_node *new_op1 = transform_node(env, op1); + ir_node *op2 = get_Add_right(node); ir_node *new_op2 = transform_node(env, op2); + ir_node *new_op = NULL; + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *nomem = new_NoMem(); + ir_node *expr_op, *imm_op; /* Check if immediate optimization is on and */ /* if it's an operation with immediate. */ @@ -717,7 +744,7 @@ static ir_node *gen_Add(ia32_transform_env_t *env, ir_node *node) { } /* integer ADD */ - if (!expr_op) { + if (! expr_op) { ia32_immop_type_t tp1 = get_ia32_immop_type(new_op1); ia32_immop_type_t tp2 = get_ia32_immop_type(new_op2); @@ -731,7 +758,7 @@ static ir_node *gen_Add(ia32_transform_env_t *env, ir_node *node) { if (tp1 == ia32_ImmSymConst && tp2 == ia32_ImmSymConst) { /* this is the 2nd case */ - new_op = new_rd_ia32_Lea(dbg, irg, block, new_op1, noreg); + new_op = new_rd_ia32_Lea(dbgi, irg, block, new_op1, noreg); set_ia32_am_sc(new_op, get_ia32_Immop_symconst(new_op2)); set_ia32_am_flavour(new_op, ia32_am_OB); set_ia32_am_support(new_op, ia32_am_Source); @@ -742,7 +769,7 @@ static ir_node *gen_Add(ia32_transform_env_t *env, ir_node *node) { tarval *tv = get_ia32_Immop_tarval(new_op2); long offs = get_tarval_long(tv); - new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg); + new_op = new_rd_ia32_Lea(dbgi, irg, block, noreg, noreg); DBG_OPT_LEA3(new_op1, new_op2, node, new_op); set_ia32_am_sc(new_op, get_ia32_Immop_symconst(new_op1)); @@ -754,7 +781,7 @@ static ir_node *gen_Add(ia32_transform_env_t *env, ir_node *node) { tarval *tv = get_ia32_Immop_tarval(new_op1); long offs = get_tarval_long(tv); - new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg); + new_op = new_rd_ia32_Lea(dbgi, irg, block, noreg, noreg); DBG_OPT_LEA3(new_op1, new_op2, node, new_op); add_ia32_am_offs_int(new_op, offs); @@ -769,7 +796,7 @@ static ir_node *gen_Add(ia32_transform_env_t *env, ir_node *node) { DEBUG_ONLY(ir_fprintf(stderr, "Warning: add with 2 consts not folded: %+F\n", node)); - new_op = new_rd_ia32_Const(dbg, irg, block); + new_op = new_rd_ia32_Const(dbgi, irg, block); set_ia32_Const_tarval(new_op, restv); DBG_OPT_LEA3(new_op1, new_op2, node, new_op); } @@ -777,7 +804,7 @@ static ir_node *gen_Add(ia32_transform_env_t *env, ir_node *node) { SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); return new_op; } else if (imm_op) { - if((env->cg->opt & IA32_OPT_INCDEC) && get_ia32_immop_type(imm_op) == ia32_ImmConst) { + if ((env->cg->opt & IA32_OPT_INCDEC) && get_ia32_immop_type(imm_op) == ia32_ImmConst) { tarval_classification_t class_tv, class_negtv; tarval *tv = get_ia32_Immop_tarval(imm_op); @@ -786,13 +813,13 @@ static ir_node *gen_Add(ia32_transform_env_t *env, ir_node *node) { class_negtv = classify_tarval(tarval_neg(tv)); if (class_tv == TV_CLASSIFY_ONE) { /* + 1 == INC */ - DB((env->mod, LEVEL_2, "Add(1) to Inc ... ")); - new_op = new_rd_ia32_Inc(dbg, irg, block, noreg, noreg, expr_op, nomem); + DB((dbg, LEVEL_2, "Add(1) to Inc ... ")); + new_op = new_rd_ia32_Inc(dbgi, irg, block, noreg, noreg, expr_op, nomem); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); return new_op; } else if (class_tv == TV_CLASSIFY_ALL_ONE || class_negtv == TV_CLASSIFY_ONE) { /* + (-1) == DEC */ - DB((env->mod, LEVEL_2, "Add(-1) to Dec ... ")); - new_op = new_rd_ia32_Dec(dbg, irg, block, noreg, noreg, expr_op, nomem); + DB((dbg, LEVEL_2, "Add(-1) to Dec ... ")); + new_op = new_rd_ia32_Dec(dbgi, irg, block, noreg, noreg, expr_op, nomem); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); return new_op; } @@ -800,7 +827,7 @@ static ir_node *gen_Add(ia32_transform_env_t *env, ir_node *node) { } /* This is a normal add */ - new_op = new_rd_ia32_Add(dbg, irg, block, noreg, noreg, new_op1, new_op2, nomem); + new_op = new_rd_ia32_Add(dbgi, irg, block, noreg, noreg, new_op1, new_op2, nomem); /* set AM support */ set_ia32_am_support(new_op, ia32_am_Full); @@ -816,7 +843,7 @@ static ir_node *gen_Add(ia32_transform_env_t *env, ir_node *node) { #if 0 static ir_node *create_ia32_Mul(ia32_transform_env_t *env, ir_node *node) { ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); + dbg_info *dbgi = get_irn_dbg_info(node); ir_node *block = transform_node(env, get_nodes_block(node)); ir_node *op1 = get_Mul_left(node); ir_node *op2 = get_Mul_right(node); @@ -826,13 +853,13 @@ static ir_node *create_ia32_Mul(ia32_transform_env_t *env, ir_node *node) { ir_node *proj_EAX, *proj_EDX, *res; ir_node *in[1]; - res = new_rd_ia32_Mul(dbg, irg, block, noreg, noreg, new_op1, new_op2, new_NoMem()); + res = new_rd_ia32_Mul(dbgi, irg, block, noreg, noreg, new_op1, new_op2, new_NoMem()); set_ia32_commutative(res); set_ia32_am_support(res, ia32_am_Source); /* imediates are not supported, so no fold_immediate */ - proj_EAX = new_rd_Proj(dbg, irg, block, res, mode_Iu, pn_EAX); - proj_EDX = new_rd_Proj(dbg, irg, block, res, mode_Iu, pn_EDX); + proj_EAX = new_rd_Proj(dbgi, irg, block, res, mode_Iu, pn_EAX); + proj_EDX = new_rd_Proj(dbgi, irg, block, res, mode_Iu, pn_EDX); /* keep EAX */ in[0] = proj_EDX; @@ -840,7 +867,7 @@ static ir_node *create_ia32_Mul(ia32_transform_env_t *env, ir_node *node) { return proj_EAX; } -#endif +#endif /* if 0 */ /** @@ -850,8 +877,8 @@ static ir_node *create_ia32_Mul(ia32_transform_env_t *env, ir_node *node) { * @return the created ia32 Mul node */ static ir_node *gen_Mul(ia32_transform_env_t *env, ir_node *node) { - ir_node *op1 = get_Mul_left(node); - ir_node *op2 = get_Mul_right(node); + ir_node *op1 = get_Mul_left(node); + ir_node *op2 = get_Mul_right(node); ir_mode *mode = get_irn_mode(node); if (mode_is_float(mode)) { @@ -862,9 +889,11 @@ static ir_node *gen_Mul(ia32_transform_env_t *env, ir_node *node) { return gen_binop_float(env, node, op1, op2, new_rd_ia32_vfmul); } - // for the lower 32bit of the result it doesn't matter whether we use - // signed or unsigned multiplication so we use IMul as it has fewer - // constraints + /* + for the lower 32bit of the result it doesn't matter whether we use + signed or unsigned multiplication so we use IMul as it has fewer + constraints + */ return gen_binop(env, node, op1, op2, new_rd_ia32_IMul); } @@ -877,23 +906,23 @@ static ir_node *gen_Mul(ia32_transform_env_t *env, ir_node *node) { * @return the created ia32 Mulh node */ static ir_node *gen_Mulh(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *op1 = get_irn_n(node, 0); - ir_node *op2 = get_irn_n(node, 1); - ir_node *new_op1 = transform_node(env, op1); - ir_node *new_op2 = transform_node(env, op2); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *proj_EAX, *proj_EDX, *res; - ir_mode *mode = get_irn_mode(node); - ir_node *in[1]; + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *op1 = get_irn_n(node, 0); + ir_node *new_op1 = transform_node(env, op1); + ir_node *op2 = get_irn_n(node, 1); + ir_node *new_op2 = transform_node(env, op2); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_mode *mode = get_irn_mode(node); + ir_node *proj_EAX, *proj_EDX, *res; + ir_node *in[1]; assert(!mode_is_float(mode) && "Mulh with float not supported"); - if(mode_is_signed(mode)) { - res = new_rd_ia32_IMul1OP(dbg, irg, block, noreg, noreg, new_op1, new_op2, new_NoMem()); + if (mode_is_signed(mode)) { + res = new_rd_ia32_IMul1OP(dbgi, irg, block, noreg, noreg, new_op1, new_op2, new_NoMem()); } else { - res = new_rd_ia32_Mul(dbg, irg, block, noreg, noreg, new_op1, new_op2, new_NoMem()); + res = new_rd_ia32_Mul(dbgi, irg, block, noreg, noreg, new_op1, new_op2, new_NoMem()); } set_ia32_commutative(res); @@ -901,8 +930,8 @@ static ir_node *gen_Mulh(ia32_transform_env_t *env, ir_node *node) { set_ia32_am_support(res, ia32_am_Source); - proj_EAX = new_rd_Proj(dbg, irg, block, res, mode_Iu, pn_EAX); - proj_EDX = new_rd_Proj(dbg, irg, block, res, mode_Iu, pn_EDX); + proj_EAX = new_rd_Proj(dbgi, irg, block, res, mode_Iu, pn_EAX); + proj_EDX = new_rd_Proj(dbgi, irg, block, res, mode_Iu, pn_EDX); /* keep EAX */ in[0] = proj_EAX; @@ -968,16 +997,16 @@ static ir_node *gen_Eor(ia32_transform_env_t *env, ir_node *node) { * @return the created ia32 Max node */ static ir_node *gen_Max(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - ir_node *new_op; - ir_mode *mode = get_irn_mode(node); - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *op1 = get_irn_n(node, 0); - ir_node *op2 = get_irn_n(node, 1); - ir_node *new_op1 = transform_node(env, op1); - ir_node *new_op2 = transform_node(env, op2); - ir_mode *op_mode = get_irn_mode(op1); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *op1 = get_irn_n(node, 0); + ir_node *new_op1 = transform_node(env, op1); + ir_node *op2 = get_irn_n(node, 1); + ir_node *new_op2 = transform_node(env, op2); + ir_graph *irg = env->irg; + ir_mode *mode = get_irn_mode(node); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *op_mode = get_irn_mode(op1); + ir_node *new_op; assert(get_mode_size_bits(mode) == 32); @@ -990,10 +1019,10 @@ static ir_node *gen_Max(ia32_transform_env_t *env, ir_node *node) { } } else { long pnc = pn_Cmp_Gt; - if(!mode_is_signed(op_mode)) { + if (! mode_is_signed(op_mode)) { pnc |= ia32_pn_Cmp_Unsigned; } - new_op = new_rd_ia32_CmpCMov(dbg, irg, block, new_op1, new_op2, new_op1, new_op2); + new_op = new_rd_ia32_CmpCMov(dbgi, irg, block, new_op1, new_op2, new_op1, new_op2); set_ia32_pncode(new_op, pnc); set_ia32_am_support(new_op, ia32_am_None); } @@ -1009,16 +1038,16 @@ static ir_node *gen_Max(ia32_transform_env_t *env, ir_node *node) { * @return the created ia32 Min node */ static ir_node *gen_Min(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - ir_node *new_op; - ir_mode *mode = get_irn_mode(node); - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *op1 = get_irn_n(node, 0); - ir_node *op2 = get_irn_n(node, 1); - ir_node *new_op1 = transform_node(env, op1); - ir_node *new_op2 = transform_node(env, op2); - ir_mode *op_mode = get_irn_mode(op1); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *op1 = get_irn_n(node, 0); + ir_node *new_op1 = transform_node(env, op1); + ir_node *op2 = get_irn_n(node, 1); + ir_node *new_op2 = transform_node(env, op2); + ir_graph *irg = env->irg; + ir_mode *mode = get_irn_mode(node); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *op_mode = get_irn_mode(op1); + ir_node *new_op; assert(get_mode_size_bits(mode) == 32); @@ -1031,10 +1060,10 @@ static ir_node *gen_Min(ia32_transform_env_t *env, ir_node *node) { } } else { long pnc = pn_Cmp_Lt; - if(!mode_is_signed(op_mode)) { + if (! mode_is_signed(op_mode)) { pnc |= ia32_pn_Cmp_Unsigned; } - new_op = new_rd_ia32_CmpCMov(dbg, irg, block, new_op1, new_op2, new_op1, new_op2); + new_op = new_rd_ia32_CmpCMov(dbgi, irg, block, new_op1, new_op2, new_op1, new_op2); set_ia32_pncode(new_op, pnc); set_ia32_am_support(new_op, ia32_am_None); } @@ -1051,17 +1080,17 @@ static ir_node *gen_Min(ia32_transform_env_t *env, ir_node *node) { * @return The created ia32 Sub node */ static ir_node *gen_Sub(ia32_transform_env_t *env, ir_node *node) { - ir_node *new_op = NULL; - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_NoMem(); - ir_node *op1 = get_Sub_left(node); - ir_node *op2 = get_Sub_right(node); - ir_node *new_op1 = transform_node(env, op1); - ir_node *new_op2 = transform_node(env, op2); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *op1 = get_Sub_left(node); + ir_node *new_op1 = transform_node(env, op1); + ir_node *op2 = get_Sub_right(node); + ir_node *new_op2 = transform_node(env, op2); + ir_node *new_op = NULL; + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *nomem = new_NoMem(); ir_node *expr_op, *imm_op; /* Check if immediate optimization is on and */ @@ -1093,7 +1122,7 @@ static ir_node *gen_Sub(ia32_transform_env_t *env, ir_node *node) { /* linker doesn't support two symconsts */ if (tp1 == ia32_ImmSymConst && tp2 == ia32_ImmSymConst) { /* this is the 2nd case */ - new_op = new_rd_ia32_Lea(dbg, irg, block, new_op1, noreg); + new_op = new_rd_ia32_Lea(dbgi, irg, block, new_op1, noreg); set_ia32_am_sc(new_op, get_ia32_Immop_symconst(op2)); set_ia32_am_sc_sign(new_op); set_ia32_am_flavour(new_op, ia32_am_OB); @@ -1103,7 +1132,7 @@ static ir_node *gen_Sub(ia32_transform_env_t *env, ir_node *node) { tarval *tv = get_ia32_Immop_tarval(new_op2); long offs = get_tarval_long(tv); - new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg); + new_op = new_rd_ia32_Lea(dbgi, irg, block, noreg, noreg); DBG_OPT_LEA3(op1, op2, node, new_op); set_ia32_am_sc(new_op, get_ia32_Immop_symconst(new_op1)); @@ -1115,7 +1144,7 @@ static ir_node *gen_Sub(ia32_transform_env_t *env, ir_node *node) { tarval *tv = get_ia32_Immop_tarval(new_op1); long offs = get_tarval_long(tv); - new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg); + new_op = new_rd_ia32_Lea(dbgi, irg, block, noreg, noreg); DBG_OPT_LEA3(op1, op2, node, new_op); add_ia32_am_offs_int(new_op, offs); @@ -1131,7 +1160,7 @@ static ir_node *gen_Sub(ia32_transform_env_t *env, ir_node *node) { DEBUG_ONLY(ir_fprintf(stderr, "Warning: sub with 2 consts not folded: %+F\n", node)); - new_op = new_rd_ia32_Const(dbg, irg, block); + new_op = new_rd_ia32_Const(dbgi, irg, block); set_ia32_Const_tarval(new_op, restv); DBG_OPT_LEA3(new_op1, new_op2, node, new_op); } @@ -1139,7 +1168,7 @@ static ir_node *gen_Sub(ia32_transform_env_t *env, ir_node *node) { SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); return new_op; } else if (imm_op) { - if((env->cg->opt & IA32_OPT_INCDEC) && get_ia32_immop_type(imm_op) == ia32_ImmConst) { + if ((env->cg->opt & IA32_OPT_INCDEC) && get_ia32_immop_type(imm_op) == ia32_ImmConst) { tarval_classification_t class_tv, class_negtv; tarval *tv = get_ia32_Immop_tarval(imm_op); @@ -1148,13 +1177,13 @@ static ir_node *gen_Sub(ia32_transform_env_t *env, ir_node *node) { class_negtv = classify_tarval(tarval_neg(tv)); if (class_tv == TV_CLASSIFY_ONE) { - DB((env->mod, LEVEL_2, "Sub(1) to Dec ... ")); - new_op = new_rd_ia32_Dec(dbg, irg, block, noreg, noreg, expr_op, nomem); + DB((dbg, LEVEL_2, "Sub(1) to Dec ... ")); + new_op = new_rd_ia32_Dec(dbgi, irg, block, noreg, noreg, expr_op, nomem); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); return new_op; } else if (class_tv == TV_CLASSIFY_ALL_ONE || class_negtv == TV_CLASSIFY_ONE) { - DB((env->mod, LEVEL_2, "Sub(-1) to Inc ... ")); - new_op = new_rd_ia32_Inc(dbg, irg, block, noreg, noreg, expr_op, nomem); + DB((dbg, LEVEL_2, "Sub(-1) to Inc ... ")); + new_op = new_rd_ia32_Inc(dbgi, irg, block, noreg, noreg, expr_op, nomem); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); return new_op; } @@ -1162,7 +1191,7 @@ static ir_node *gen_Sub(ia32_transform_env_t *env, ir_node *node) { } /* This is a normal sub */ - new_op = new_rd_ia32_Sub(dbg, irg, block, noreg, noreg, new_op1, new_op2, nomem); + new_op = new_rd_ia32_Sub(dbgi, irg, block, noreg, noreg, new_op1, new_op2, nomem); /* set AM support */ set_ia32_am_support(new_op, ia32_am_Full); @@ -1188,36 +1217,60 @@ static ir_node *gen_Sub(ia32_transform_env_t *env, ir_node *node) { */ static ir_node *generate_DivMod(ia32_transform_env_t *env, ir_node *node, ir_node *dividend, ir_node *divisor, - ia32_op_flavour_t dm_flav) { - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); - ir_node *block = transform_node(env, get_nodes_block(node)); + ia32_op_flavour_t dm_flav) +{ + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *new_dividend = transform_node(env, dividend); + ir_node *new_divisor = transform_node(env, divisor); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); ir_node *res, *proj_div, *proj_mod; ir_node *edx_node, *cltd; - ir_node *in_keep[1]; + ir_node *in_keep[2]; ir_node *mem, *new_mem; ir_node *projs[pn_DivMod_max]; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *new_dividend = transform_node(env, dividend); - ir_node *new_divisor = transform_node(env, divisor); + int i; ia32_collect_Projs(node, projs, pn_DivMod_max); switch (dm_flav) { case flavour_Div: mem = get_Div_mem(node); - mode = get_irn_mode(be_get_Proj_for_pn(node, pn_Div_res)); + proj_div = be_get_Proj_for_pn(node, pn_Div_res); + if (proj_div == NULL) { + /* this can happen when we have divs left that could + throw a division by zero exception... */ + mode = mode_Is; + } else { + mode = get_irn_mode(proj_div); + } break; case flavour_Mod: mem = get_Mod_mem(node); - mode = get_irn_mode(be_get_Proj_for_pn(node, pn_Mod_res)); + proj_mod = be_get_Proj_for_pn(node, pn_Mod_res); + if (proj_mod == NULL) { + /* this can happen when we have divs left that could + throw a division by zero exception... */ + mode = mode_Is; + } else { + mode = get_irn_mode(proj_mod); + } break; case flavour_DivMod: mem = get_DivMod_mem(node); proj_div = be_get_Proj_for_pn(node, pn_DivMod_res_div); proj_mod = be_get_Proj_for_pn(node, pn_DivMod_res_mod); - mode = proj_div ? get_irn_mode(proj_div) : get_irn_mode(proj_mod); + if (proj_div != NULL) { + mode = get_irn_mode(proj_div); + } else if(proj_mod != NULL) { + mode = get_irn_mode(proj_mod); + } else { + /* this can happen when we have divs left that could + throw a division by zero exception... */ + mode = mode_Is; + } break; default: panic("invalid divmod flavour!"); @@ -1226,19 +1279,19 @@ static ir_node *generate_DivMod(ia32_transform_env_t *env, ir_node *node, if (mode_is_signed(mode)) { /* in signed mode, we need to sign extend the dividend */ - cltd = new_rd_ia32_Cltd(dbg, irg, block, new_dividend); - new_dividend = new_rd_Proj(dbg, irg, block, cltd, mode_Iu, pn_ia32_Cltd_EAX); - edx_node = new_rd_Proj(dbg, irg, block, cltd, mode_Iu, pn_ia32_Cltd_EDX); + cltd = new_rd_ia32_Cltd(dbgi, irg, block, new_dividend); + new_dividend = new_rd_Proj(dbgi, irg, block, cltd, mode_Iu, pn_ia32_Cltd_EAX); + edx_node = new_rd_Proj(dbgi, irg, block, cltd, mode_Iu, pn_ia32_Cltd_EDX); } else { - edx_node = new_rd_ia32_Const(dbg, irg, block); + edx_node = new_rd_ia32_Const(dbgi, irg, block); add_irn_dep(edx_node, be_abi_get_start_barrier(env->cg->birg->abi)); set_ia32_Immop_tarval(edx_node, get_tarval_null(mode_Iu)); } - if(mode_is_signed(mode)) { - res = new_rd_ia32_IDiv(dbg, irg, block, noreg, noreg, new_dividend, edx_node, new_divisor, new_mem, dm_flav); + if (mode_is_signed(mode)) { + res = new_rd_ia32_IDiv(dbgi, irg, block, noreg, noreg, new_dividend, edx_node, new_divisor, new_mem, dm_flav); } else { - res = new_rd_ia32_Div(dbg, irg, block, noreg, noreg, new_dividend, edx_node, new_divisor, new_mem, dm_flav); + res = new_rd_ia32_Div(dbgi, irg, block, noreg, noreg, new_dividend, edx_node, new_divisor, new_mem, dm_flav); } /* Matze: code can't handle this at the moment... */ @@ -1249,48 +1302,20 @@ static ir_node *generate_DivMod(ia32_transform_env_t *env, ir_node *node, set_ia32_n_res(res, 2); - /* Only one proj is used -> We must add a second proj and */ - /* connect this one to a Keep node to eat up the second */ - /* destroyed register. */ - /* We also renumber the Firm projs into ia32 projs. */ - - switch (get_irn_opcode(node)) { - case iro_Div: - /* add Proj-Keep for mod res */ - in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode_Iu, pn_ia32_Div_mod_res); - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep); - break; - case iro_Mod: - /* add Proj-Keep for div res */ - in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode_Iu, pn_ia32_Div_div_res); - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep); - break; - case iro_DivMod: - /* check, which Proj-Keep, we need to add */ - proj_div = be_get_Proj_for_pn(node, pn_DivMod_res_div); - proj_mod = be_get_Proj_for_pn(node, pn_DivMod_res_mod); - - if (proj_div && proj_mod) { - /* nothing to be done */ - } - else if (! proj_div && ! proj_mod) { - assert(0 && "Missing DivMod result proj"); - } - else if (! proj_div) { - /* We have only mod result: add div res Proj-Keep */ - in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode_Iu, pn_ia32_Div_div_res); - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep); - } - else { - /* We have only div result: add mod res Proj-Keep */ - in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode_Iu, pn_ia32_Div_mod_res); - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep); - } - break; - default: - assert(0 && "Div, Mod, or DivMod expected."); - break; + /* check, which Proj-Keep, we need to add */ + i = 0; + if (proj_div == NULL) { + /* We have only mod result: add div res Proj-Keep */ + in_keep[i] = new_rd_Proj(dbgi, irg, block, res, mode_Iu, pn_ia32_Div_div_res); + ++i; + } + if (proj_mod == NULL) { + /* We have only div result: add mod res Proj-Keep */ + in_keep[i] = new_rd_Proj(dbgi, irg, block, res, mode_Iu, pn_ia32_Div_mod_res); + ++i; } + if(i > 0) + be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, i, in_keep); SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node)); @@ -1335,32 +1360,32 @@ static ir_node *gen_DivMod(ia32_transform_env_t *env, ir_node *node) { * @return The created ia32 xDiv node */ static ir_node *gen_Quot(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *new_op; - ir_node *nomem = new_rd_NoMem(env->irg); - ir_node *op1 = get_Quot_left(node); - ir_node *op2 = get_Quot_right(node); - ir_node *new_op1 = transform_node(env, op1); - ir_node *new_op2 = transform_node(env, op2); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *op1 = get_Quot_left(node); + ir_node *new_op1 = transform_node(env, op1); + ir_node *op2 = get_Quot_right(node); + ir_node *new_op2 = transform_node(env, op2); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *nomem = new_rd_NoMem(env->irg); + ir_node *new_op; FP_USED(env->cg); if (USE_SSE2(env->cg)) { ir_mode *mode = get_irn_mode(op1); if (is_ia32_xConst(new_op2)) { - new_op = new_rd_ia32_xDiv(dbg, irg, block, noreg, noreg, new_op1, noreg, nomem); + new_op = new_rd_ia32_xDiv(dbgi, irg, block, noreg, noreg, new_op1, noreg, nomem); set_ia32_am_support(new_op, ia32_am_None); copy_ia32_Immop_attr(new_op, new_op2); } else { - new_op = new_rd_ia32_xDiv(dbg, irg, block, noreg, noreg, new_op1, new_op2, nomem); + new_op = new_rd_ia32_xDiv(dbgi, irg, block, noreg, noreg, new_op1, new_op2, nomem); // Matze: disabled for now, spillslot coalescer fails //set_ia32_am_support(new_op, ia32_am_Source); } set_ia32_ls_mode(new_op, mode); } else { - new_op = new_rd_ia32_vfdiv(dbg, irg, block, noreg, noreg, new_op1, new_op2, nomem); + new_op = new_rd_ia32_vfdiv(dbgi, irg, block, noreg, noreg, new_op1, new_op2, nomem); // Matze: disabled for now (spillslot coalescer fails) //set_ia32_am_support(new_op, ia32_am_Source); } @@ -1460,15 +1485,15 @@ static ir_node *gen_Rot(ia32_transform_env_t *env, ir_node *node) { ir_node *left = get_Add_left(add); ir_node *right = get_Add_right(add); if (is_Const(right)) { - tarval *tv = get_Const_tarval(right); - ir_mode *mode = get_irn_mode(node); - long bits = get_mode_size_bits(mode); + tarval *tv = get_Const_tarval(right); + ir_mode *mode = get_irn_mode(node); + long bits = get_mode_size_bits(mode); if (get_irn_op(left) == op_Minus && tarval_is_long(tv) && get_tarval_long(tv) == bits) { - DB((env->mod, LEVEL_1, "RotL into RotR ... ")); + DB((dbg, LEVEL_1, "RotL into RotR ... ")); rotate = gen_RotR(env, node, op1, get_Minus_op(left)); } } @@ -1491,13 +1516,13 @@ static ir_node *gen_Rot(ia32_transform_env_t *env, ir_node *node) { * @return The created ia32 Minus node */ ir_node *gen_Minus_ex(ia32_transform_env_t *env, ir_node *node, ir_node *op) { - ident *name; - ir_node *res; - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_mode *mode = get_irn_mode(node); - int size; + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + ir_entity *ent; + ir_node *res; + int size; if (mode_is_float(mode)) { ir_node *new_op = transform_node(env, op); @@ -1507,16 +1532,16 @@ ir_node *gen_Minus_ex(ia32_transform_env_t *env, ir_node *node, ir_node *op) { ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg); ir_node *nomem = new_rd_NoMem(irg); - res = new_rd_ia32_xXor(dbg, irg, block, noreg_gp, noreg_gp, new_op, noreg_fp, nomem); + res = new_rd_ia32_xXor(dbgi, irg, block, noreg_gp, noreg_gp, new_op, noreg_fp, nomem); - size = get_mode_size_bits(mode); - name = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN); + size = get_mode_size_bits(mode); + ent = ia32_gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN); - set_ia32_am_sc(res, name); + set_ia32_am_sc(res, ent); set_ia32_op_type(res, ia32_AddrModeS); set_ia32_ls_mode(res, mode); } else { - res = new_rd_ia32_vfchs(dbg, irg, block, new_op); + res = new_rd_ia32_vfchs(dbgi, irg, block, new_op); } } else { res = gen_unop(env, node, op, new_rd_ia32_Neg); @@ -1560,28 +1585,28 @@ static ir_node *gen_Not(ia32_transform_env_t *env, ir_node *node) { * @return The created ia32 Abs node */ static ir_node *gen_Abs(ia32_transform_env_t *env, ir_node *node) { - ir_node *res, *p_eax, *p_edx; - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_mode *mode = get_irn_mode(node); - ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg); - ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg); - ir_node *nomem = new_NoMem(); - ir_node *op = get_Abs_op(node); - ir_node *new_op = transform_node(env, op); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *op = get_Abs_op(node); + ir_node *new_op = transform_node(env, op); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg); + ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg); + ir_node *nomem = new_NoMem(); + ir_node *res, *p_eax, *p_edx; int size; - ident *name; + ir_entity *ent; if (mode_is_float(mode)) { FP_USED(env->cg); if (USE_SSE2(env->cg)) { - res = new_rd_ia32_xAnd(dbg,irg, block, noreg_gp, noreg_gp, new_op, noreg_fp, nomem); + res = new_rd_ia32_xAnd(dbgi,irg, block, noreg_gp, noreg_gp, new_op, noreg_fp, nomem); - size = get_mode_size_bits(mode); - name = ia32_gen_fp_known_const(size == 32 ? ia32_SABS : ia32_DABS); + size = get_mode_size_bits(mode); + ent = ia32_gen_fp_known_const(size == 32 ? ia32_SABS : ia32_DABS); - set_ia32_am_sc(res, name); + set_ia32_am_sc(res, ent); SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node)); @@ -1589,21 +1614,21 @@ static ir_node *gen_Abs(ia32_transform_env_t *env, ir_node *node) { set_ia32_ls_mode(res, mode); } else { - res = new_rd_ia32_vfabs(dbg, irg, block, new_op); + res = new_rd_ia32_vfabs(dbgi, irg, block, new_op); SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node)); } } else { - res = new_rd_ia32_Cltd(dbg, irg, block, new_op); + res = new_rd_ia32_Cltd(dbgi, irg, block, new_op); SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node)); - p_eax = new_rd_Proj(dbg, irg, block, res, mode_Iu, pn_EAX); - p_edx = new_rd_Proj(dbg, irg, block, res, mode_Iu, pn_EDX); + p_eax = new_rd_Proj(dbgi, irg, block, res, mode_Iu, pn_EAX); + p_edx = new_rd_Proj(dbgi, irg, block, res, mode_Iu, pn_EDX); - res = new_rd_ia32_Xor(dbg, irg, block, noreg_gp, noreg_gp, p_eax, p_edx, nomem); + res = new_rd_ia32_Xor(dbgi, irg, block, noreg_gp, noreg_gp, p_eax, p_edx, nomem); SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node)); - res = new_rd_ia32_Sub(dbg, irg, block, noreg_gp, noreg_gp, res, p_edx, nomem); + res = new_rd_ia32_Sub(dbgi, irg, block, noreg_gp, noreg_gp, res, p_edx, nomem); SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node)); } @@ -1619,20 +1644,20 @@ static ir_node *gen_Abs(ia32_transform_env_t *env, ir_node *node) { * @return the created ia32 Load node */ static ir_node *gen_Load(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_mode *mode = get_Load_mode(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *ptr = get_Load_ptr(node); - ir_node *new_ptr = transform_node(env, ptr); - ir_node *lptr = new_ptr; - ir_node *mem = get_Load_mem(node); - ir_node *new_mem = transform_node(env, mem); - int is_imm = 0; - ir_node *new_op; + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *ptr = get_Load_ptr(node); + ir_node *new_ptr = transform_node(env, ptr); + ir_node *mem = get_Load_mem(node); + ir_node *new_mem = transform_node(env, mem); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_mode *mode = get_Load_mode(node); + ir_node *lptr = new_ptr; + int is_imm = 0; + ir_node *new_op; + ir_node *projs[pn_Load_max]; ia32_am_flavour_t am_flav = ia32_am_B; - ir_node *projs[pn_Load_max]; ia32_collect_Projs(node, projs, pn_Load_max); @@ -1655,12 +1680,12 @@ static ir_node *gen_Load(ia32_transform_env_t *env, ir_node *node) { if (mode_is_float(mode)) { FP_USED(env->cg); if (USE_SSE2(env->cg)) { - new_op = new_rd_ia32_xLoad(dbg, irg, block, lptr, noreg, new_mem); + new_op = new_rd_ia32_xLoad(dbgi, irg, block, lptr, noreg, new_mem); } else { - new_op = new_rd_ia32_vfld(dbg, irg, block, lptr, noreg, new_mem); + new_op = new_rd_ia32_vfld(dbgi, irg, block, lptr, noreg, new_mem); } } else { - new_op = new_rd_ia32_Load(dbg, irg, block, lptr, noreg, new_mem); + new_op = new_rd_ia32_Load(dbgi, irg, block, lptr, noreg, new_mem); } /* base is a constant address */ @@ -1682,10 +1707,10 @@ static ir_node *gen_Load(ia32_transform_env_t *env, ir_node *node) { set_ia32_am_flavour(new_op, am_flav); set_ia32_ls_mode(new_op, mode); - /* make sure we are scheduled behind the intial IncSP/Barrier + /* make sure we are scheduled behind the initial IncSP/Barrier * to avoid spills being placed before it */ - if(block == get_irg_start_block(irg)) { + if (block == get_irg_start_block(irg)) { add_irn_dep(new_op, get_irg_frame(irg)); } @@ -1703,21 +1728,21 @@ static ir_node *gen_Load(ia32_transform_env_t *env, ir_node *node) { * @return the created ia32 Store node */ static ir_node *gen_Store(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *ptr = get_Store_ptr(node); - ir_node *new_ptr = transform_node(env, ptr); - ir_node *sptr = new_ptr; - ir_node *val = get_Store_value(node); - ir_node *new_val = transform_node(env, val); - ir_node *mem = get_Store_mem(node); - ir_node *new_mem = transform_node(env, mem); - ir_mode *mode = get_irn_mode(val); - ir_node *sval = new_val; - int is_imm = 0; - ir_node *new_op; + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *ptr = get_Store_ptr(node); + ir_node *new_ptr = transform_node(env, ptr); + ir_node *val = get_Store_value(node); + ir_node *new_val = transform_node(env, val); + ir_node *mem = get_Store_mem(node); + ir_node *new_mem = transform_node(env, mem); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *sptr = new_ptr; + ir_mode *mode = get_irn_mode(val); + ir_node *sval = new_val; + int is_imm = 0; + ir_node *new_op; ia32_am_flavour_t am_flav = ia32_am_B; if (is_ia32_Const(new_val)) { @@ -1734,14 +1759,14 @@ static ir_node *gen_Store(ia32_transform_env_t *env, ir_node *node) { if (mode_is_float(mode)) { FP_USED(env->cg); if (USE_SSE2(env->cg)) { - new_op = new_rd_ia32_xStore(dbg, irg, block, sptr, noreg, sval, new_mem); + new_op = new_rd_ia32_xStore(dbgi, irg, block, sptr, noreg, sval, new_mem); } else { - new_op = new_rd_ia32_vfst(dbg, irg, block, sptr, noreg, sval, new_mem); + new_op = new_rd_ia32_vfst(dbgi, irg, block, sptr, noreg, sval, new_mem); } } else if (get_mode_size_bits(mode) == 8) { - new_op = new_rd_ia32_Store8Bit(dbg, irg, block, sptr, noreg, sval, new_mem); + new_op = new_rd_ia32_Store8Bit(dbgi, irg, block, sptr, noreg, sval, new_mem); } else { - new_op = new_rd_ia32_Store(dbg, irg, block, sptr, noreg, sval, new_mem); + new_op = new_rd_ia32_Store(dbgi, irg, block, sptr, noreg, sval, new_mem); } /* stored const is an immediate value */ @@ -1783,9 +1808,9 @@ static ir_node *gen_Store(ia32_transform_env_t *env, ir_node *node) { * @return The transformed node. */ static ir_node *gen_Cond(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); ir_node *block = transform_node(env, get_nodes_block(node)); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); ir_node *sel = get_Cond_selector(node); ir_mode *sel_mode = get_irn_mode(sel); ir_node *res = NULL; @@ -1793,13 +1818,13 @@ static ir_node *gen_Cond(ia32_transform_env_t *env, ir_node *node) { ir_node *cnst, *expr; if (is_Proj(sel) && sel_mode == mode_b) { - ir_node *nomem = new_NoMem(); - ir_node *pred = get_Proj_pred(sel); - ir_node *cmp_a = get_Cmp_left(pred); + ir_node *pred = get_Proj_pred(sel); + ir_node *cmp_a = get_Cmp_left(pred); ir_node *new_cmp_a = transform_node(env, cmp_a); - ir_node *cmp_b = get_Cmp_right(pred); + ir_node *cmp_b = get_Cmp_right(pred); ir_node *new_cmp_b = transform_node(env, cmp_b); - ir_mode *cmp_mode = get_irn_mode(cmp_a); + ir_mode *cmp_mode = get_irn_mode(cmp_a); + ir_node *nomem = new_NoMem(); int pnc = get_Proj_proj(sel); if(mode_is_float(cmp_mode) || !mode_is_signed(cmp_mode)) { @@ -1816,7 +1841,7 @@ static ir_node *gen_Cond(ia32_transform_env_t *env, ir_node *node) { pnc = get_inversed_pnc(pnc); } - if ((pnc == pn_Cmp_Eq || pnc == pn_Cmp_Lg) && mode_is_int(get_irn_mode(expr))) { + if ((pnc == pn_Cmp_Eq || pnc == pn_Cmp_Lg) && mode_needs_gp_reg(get_irn_mode(expr))) { if (get_ia32_immop_type(cnst) == ia32_ImmConst && classify_tarval(get_ia32_Immop_tarval(cnst)) == TV_CLASSIFY_NULL) { @@ -1832,7 +1857,7 @@ static ir_node *gen_Cond(ia32_transform_env_t *env, ir_node *node) { is_and = (is_ia32_ImmConst(expr) || is_ia32_ImmSymConst(expr)); } - res = new_rd_ia32_TestJmp(dbg, irg, block, op1, op2); + res = new_rd_ia32_TestJmp(dbgi, irg, block, op1, op2); set_ia32_pncode(res, pnc); if (is_and) { @@ -1847,14 +1872,15 @@ static ir_node *gen_Cond(ia32_transform_env_t *env, ir_node *node) { if (mode_is_float(cmp_mode)) { FP_USED(env->cg); if (USE_SSE2(env->cg)) { - res = new_rd_ia32_xCondJmp(dbg, irg, block, noreg, noreg, expr, noreg, nomem); + res = new_rd_ia32_xCondJmp(dbgi, irg, block, noreg, noreg, expr, noreg, nomem); set_ia32_ls_mode(res, cmp_mode); } else { assert(0); } } else { - res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, expr, noreg, nomem); + assert(get_mode_size_bits(cmp_mode) == 32); + res = new_rd_ia32_CondJmp(dbgi, irg, block, noreg, noreg, expr, noreg, nomem); } copy_ia32_Immop_attr(res, cnst); } @@ -1864,17 +1890,18 @@ static ir_node *gen_Cond(ia32_transform_env_t *env, ir_node *node) { if (mode_is_float(cmp_mode)) { FP_USED(env->cg); if (USE_SSE2(env->cg)) { - res = new_rd_ia32_xCondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); + res = new_rd_ia32_xCondJmp(dbgi, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); set_ia32_ls_mode(res, cmp_mode); } else { ir_node *proj_eax; - res = new_rd_ia32_vfCondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); + res = new_rd_ia32_vfCondJmp(dbgi, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); proj_eax = new_r_Proj(irg, block, res, mode_Iu, pn_ia32_vfCondJmp_temp_reg_eax); be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, &proj_eax); } } else { - res = new_rd_ia32_CondJmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); + assert(get_mode_size_bits(cmp_mode) == 32); + res = new_rd_ia32_CondJmp(dbgi, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); set_ia32_commutative(res); } } @@ -1888,9 +1915,9 @@ static ir_node *gen_Cond(ia32_transform_env_t *env, ir_node *node) { } else { /* determine the smallest switch case value */ + ir_node *new_sel = transform_node(env, sel); int switch_min = INT_MAX; const ir_edge_t *edge; - ir_node *new_sel = transform_node(env, sel); foreach_out_edge(node, edge) { int pn = get_Proj_proj(get_edge_src_irn(edge)); @@ -1899,7 +1926,7 @@ static ir_node *gen_Cond(ia32_transform_env_t *env, ir_node *node) { if (switch_min) { /* if smallest switch case is not 0 we need an additional sub */ - res = new_rd_ia32_Lea(dbg, irg, block, new_sel, noreg); + res = new_rd_ia32_Lea(dbgi, irg, block, new_sel, noreg); SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, node)); add_ia32_am_offs_int(res, -switch_min); set_ia32_am_flavour(res, ia32_am_OB); @@ -1907,7 +1934,7 @@ static ir_node *gen_Cond(ia32_transform_env_t *env, ir_node *node) { set_ia32_op_type(res, ia32_AddrModeS); } - res = new_rd_ia32_SwitchJmp(dbg, irg, block, switch_min ? res : new_sel, mode_T); + res = new_rd_ia32_SwitchJmp(dbgi, irg, block, switch_min ? res : new_sel, mode_T); set_ia32_pncode(res, get_Cond_defaultProj(node)); } @@ -1924,9 +1951,6 @@ static ir_node *gen_Cond(ia32_transform_env_t *env, ir_node *node) { * @return The transformed node. */ static ir_node *gen_CopyB(ia32_transform_env_t *env, ir_node *node) { - ir_node *res = NULL; - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); ir_node *block = transform_node(env, get_nodes_block(node)); ir_node *src = get_CopyB_src(node); ir_node *new_src = transform_node(env, src); @@ -1934,6 +1958,9 @@ static ir_node *gen_CopyB(ia32_transform_env_t *env, ir_node *node) { ir_node *new_dst = transform_node(env, dst); ir_node *mem = get_CopyB_mem(node); ir_node *new_mem = transform_node(env, mem); + ir_node *res = NULL; + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); int size = get_type_size_bytes(get_CopyB_type(node)); ir_mode *dst_mode = get_irn_mode(dst); ir_mode *src_mode = get_irn_mode(src); @@ -1946,11 +1973,11 @@ static ir_node *gen_CopyB(ia32_transform_env_t *env, ir_node *node) { rem = size & 0x3; /* size % 4 */ size >>= 2; - res = new_rd_ia32_Const(dbg, irg, block); + res = new_rd_ia32_Const(dbgi, irg, block); add_irn_dep(res, be_abi_get_start_barrier(env->cg->birg->abi)); set_ia32_Immop_tarval(res, new_tarval_from_long(size, mode_Is)); - res = new_rd_ia32_CopyB(dbg, irg, block, new_dst, new_src, res, new_mem); + res = new_rd_ia32_CopyB(dbgi, irg, block, new_dst, new_src, res, new_mem); set_ia32_Immop_tarval(res, new_tarval_from_long(rem, mode_Is)); /* ok: now attach Proj's because rep movsd will destroy esi, edi and ecx */ @@ -1960,7 +1987,7 @@ static ir_node *gen_CopyB(ia32_transform_env_t *env, ir_node *node) { be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 3, in); } else { - res = new_rd_ia32_CopyB_i(dbg, irg, block, new_dst, new_src, new_mem); + res = new_rd_ia32_CopyB_i(dbgi, irg, block, new_dst, new_src, new_mem); set_ia32_Immop_tarval(res, new_tarval_from_long(size, mode_Is)); /* ok: now attach Proj's because movsd will destroy esi and edi */ @@ -1983,7 +2010,7 @@ static ir_node *gen_CopyB(ia32_transform_env_t *env, ir_node *node) { * @return The transformed node. */ static ir_node *gen_Mux(ia32_transform_env_t *env, ir_node *node) { - ir_node *new_op = new_rd_ia32_CMov(env->dbg, env->irg, env->block, \ + ir_node *new_op = new_rd_ia32_CMov(env->dbgi, env->irg, env->block, \ get_Mux_sel(node), get_Mux_false(node), get_Mux_true(node), env->mode); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); @@ -2003,18 +2030,18 @@ typedef ir_node *cmov_func_t(dbg_info *db, ir_graph *irg, ir_node *block, * @return The transformed node. */ static ir_node *gen_Psi(ia32_transform_env_t *env, ir_node *node) { - ia32_code_gen_t *cg = env->cg; - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *cmp_proj = get_Mux_sel(node); - ir_node *psi_true = get_Psi_val(node, 0); - ir_node *psi_default = get_Psi_default(node); - ir_node *new_psi_true = transform_node(env, psi_true); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *psi_true = get_Psi_val(node, 0); + ir_node *new_psi_true = transform_node(env, psi_true); + ir_node *psi_default = get_Psi_default(node); ir_node *new_psi_default = transform_node(env, psi_default); - ir_node *noreg = ia32_new_NoReg_gp(cg); - ir_node *nomem = new_rd_NoMem(irg); + ia32_code_gen_t *cg = env->cg; + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + ir_node *cmp_proj = get_Mux_sel(node); + ir_node *noreg = ia32_new_NoReg_gp(cg); + ir_node *nomem = new_rd_NoMem(irg); ir_node *cmp, *cmp_a, *cmp_b, *and1, *and2, *new_op = NULL; ir_node *new_cmp_a, *new_cmp_b; ir_mode *cmp_mode; @@ -2022,10 +2049,10 @@ static ir_node *gen_Psi(ia32_transform_env_t *env, ir_node *node) { assert(get_irn_mode(cmp_proj) == mode_b && "Condition for Psi must have mode_b"); - cmp = get_Proj_pred(cmp_proj); - cmp_a = get_Cmp_left(cmp); - cmp_b = get_Cmp_right(cmp); - cmp_mode = get_irn_mode(cmp_a); + cmp = get_Proj_pred(cmp_proj); + cmp_a = get_Cmp_left(cmp); + cmp_b = get_Cmp_right(cmp); + cmp_mode = get_irn_mode(cmp_a); new_cmp_a = transform_node(env, cmp_a); new_cmp_b = transform_node(env, cmp_b); @@ -2048,35 +2075,35 @@ static ir_node *gen_Psi(ia32_transform_env_t *env, ir_node *node) { /* in case the compare operands are int, we move them into xmm register */ if (! mode_is_float(get_irn_mode(cmp_a))) { - new_cmp_a = gen_sse_conv_int2float(cg, dbg, irg, block, new_cmp_a, node, mode_E); - new_cmp_b = gen_sse_conv_int2float(cg, dbg, irg, block, new_cmp_b, node, mode_E); + new_cmp_a = gen_sse_conv_int2float(cg, dbgi, irg, block, new_cmp_a, node, mode_xmm); + new_cmp_b = gen_sse_conv_int2float(cg, dbgi, irg, block, new_cmp_b, node, mode_xmm); pnc |= 8; /* transform integer compare to fp compare */ } - new_op = new_rd_ia32_xCmp(dbg, irg, block, noreg, noreg, new_cmp_a, new_cmp_b, nomem); + new_op = new_rd_ia32_xCmp(dbgi, irg, block, noreg, noreg, new_cmp_a, new_cmp_b, nomem); set_ia32_pncode(new_op, pnc); set_ia32_am_support(new_op, ia32_am_Source); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node)); - and1 = new_rd_ia32_xAnd(dbg, irg, block, noreg, noreg, new_psi_true, new_op, nomem); + and1 = new_rd_ia32_xAnd(dbgi, irg, block, noreg, noreg, new_psi_true, new_op, nomem); set_ia32_am_support(and1, ia32_am_None); set_ia32_commutative(and1); SET_IA32_ORIG_NODE(and1, ia32_get_old_node_name(cg, node)); - and2 = new_rd_ia32_xAndNot(dbg, irg, block, noreg, noreg, new_op, new_psi_default, nomem); + and2 = new_rd_ia32_xAndNot(dbgi, irg, block, noreg, noreg, new_op, new_psi_default, nomem); set_ia32_am_support(and2, ia32_am_None); set_ia32_commutative(and2); SET_IA32_ORIG_NODE(and2, ia32_get_old_node_name(cg, node)); - new_op = new_rd_ia32_xOr(dbg, irg, block, noreg, noreg, and1, and2, nomem); + new_op = new_rd_ia32_xOr(dbgi, irg, block, noreg, noreg, and1, and2, nomem); set_ia32_am_support(new_op, ia32_am_None); set_ia32_commutative(new_op); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node)); } else { /* x87 FPU */ - new_op = new_rd_ia32_vfCMov(dbg, irg, block, new_cmp_a, new_cmp_b, new_psi_true, new_psi_default); + new_op = new_rd_ia32_vfCMov(dbgi, irg, block, new_cmp_a, new_cmp_b, new_psi_true, new_psi_default); set_ia32_pncode(new_op, pnc); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); } @@ -2113,18 +2140,18 @@ static ir_node *gen_Psi(ia32_transform_env_t *env, ir_node *node) { if (is_ia32_Const_0(new_cmp_b) && is_Proj(new_cmp_a) && (is_ia32_And(get_Proj_pred(new_cmp_a)) || is_ia32_Or(get_Proj_pred(new_cmp_a)))) { if (is_ia32_Const_1(psi_true) && is_ia32_Const_0(psi_default)) { /* first case for SETcc: default is 0, set to 1 iff condition is true */ - new_op = new_rd_ia32_PsiCondSet(dbg, irg, block, new_cmp_a); + new_op = new_rd_ia32_PsiCondSet(dbgi, irg, block, new_cmp_a); set_ia32_pncode(new_op, pnc); } else if (is_ia32_Const_0(psi_true) && is_ia32_Const_1(psi_default)) { /* second case for SETcc: default is 1, set to 0 iff condition is true: */ /* we invert condition and set default to 0 */ - new_op = new_rd_ia32_PsiCondSet(dbg, irg, block, new_cmp_a); + new_op = new_rd_ia32_PsiCondSet(dbgi, irg, block, new_cmp_a); set_ia32_pncode(new_op, get_inversed_pnc(pnc)); } else { /* otherwise: use CMOVcc */ - new_op = new_rd_ia32_PsiCondCMov(dbg, irg, block, new_cmp_a, new_psi_true, new_psi_default); + new_op = new_rd_ia32_PsiCondCMov(dbgi, irg, block, new_cmp_a, new_psi_true, new_psi_default); set_ia32_pncode(new_op, pnc); } @@ -2146,7 +2173,7 @@ static ir_node *gen_Psi(ia32_transform_env_t *env, ir_node *node) { } else { /* otherwise: use CMOVcc */ - new_op = cmov_func(dbg, irg, block, new_cmp_a, new_cmp_b, new_psi_true, new_psi_default); + new_op = cmov_func(dbgi, irg, block, new_cmp_a, new_cmp_b, new_psi_true, new_psi_default); set_ia32_pncode(new_op, pnc); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node)); } @@ -2189,18 +2216,18 @@ static ir_node *gen_Psi(ia32_transform_env_t *env, ir_node *node) { * Create a conversion from x87 state register to general purpose. */ static ir_node *gen_x87_fp_to_gp(ia32_transform_env_t *env, ir_node *node) { - ia32_code_gen_t *cg = env->cg; - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *noreg = ia32_new_NoReg_gp(cg); - ir_node *op = get_Conv_op(node); - ir_node *new_op = transform_node(env, op); - ir_node *fist, *load; + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *op = get_Conv_op(node); + ir_node *new_op = transform_node(env, op); + ia32_code_gen_t *cg = env->cg; + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(cg); ir_node *trunc_mode = ia32_new_Fpu_truncate(cg); + ir_node *fist, *load; /* do a fist */ - fist = new_rd_ia32_vfist(dbg, irg, block, + fist = new_rd_ia32_vfist(dbgi, irg, block, get_irg_frame(irg), noreg, new_op, trunc_mode, new_NoMem()); set_ia32_use_frame(fist); @@ -2211,7 +2238,7 @@ static ir_node *gen_x87_fp_to_gp(ia32_transform_env_t *env, ir_node *node) { SET_IA32_ORIG_NODE(fist, ia32_get_old_node_name(cg, node)); /* do a Load */ - load = new_rd_ia32_Load(dbg, irg, block, get_irg_frame(irg), noreg, fist); + load = new_rd_ia32_Load(dbgi, irg, block, get_irg_frame(irg), noreg, fist); set_ia32_use_frame(load); set_ia32_am_support(load, ia32_am_Source); @@ -2227,36 +2254,32 @@ static ir_node *gen_x87_fp_to_gp(ia32_transform_env_t *env, ir_node *node) { * Create a conversion from general purpose to x87 register */ static ir_node *gen_x87_gp_to_fp(ia32_transform_env_t *env, ir_node *node, ir_mode *src_mode) { -#ifndef NDEBUG - ia32_code_gen_t *cg = env->cg; -#endif - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_NoMem(); ir_node *op = get_Conv_op(node); ir_node *new_op = transform_node(env, op); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *nomem = new_NoMem(); ir_node *fild, *store; int src_bits; /* first convert to 32 bit if necessary */ src_bits = get_mode_size_bits(src_mode); if (src_bits == 8) { - new_op = new_rd_ia32_Conv_I2I8Bit(dbg, irg, block, noreg, noreg, new_op, nomem); + new_op = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, block, noreg, noreg, new_op, nomem); set_ia32_am_support(new_op, ia32_am_Source); set_ia32_ls_mode(new_op, src_mode); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node)); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); } else if (src_bits < 32) { - new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, new_op, nomem); + new_op = new_rd_ia32_Conv_I2I(dbgi, irg, block, noreg, noreg, new_op, nomem); set_ia32_am_support(new_op, ia32_am_Source); set_ia32_ls_mode(new_op, src_mode); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node)); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); } /* do a store */ - store = new_rd_ia32_Store(dbg, irg, block, get_irg_frame(irg), noreg, new_op, nomem); + store = new_rd_ia32_Store(dbgi, irg, block, get_irg_frame(irg), noreg, new_op, nomem); set_ia32_use_frame(store); set_ia32_am_support(store, ia32_am_Dest); @@ -2265,15 +2288,15 @@ static ir_node *gen_x87_gp_to_fp(ia32_transform_env_t *env, ir_node *node, ir_mo set_ia32_ls_mode(store, mode_Iu); /* do a fild */ - fild = new_rd_ia32_vfild(dbg, irg, block, get_irg_frame(irg), noreg, store); + fild = new_rd_ia32_vfild(dbgi, irg, block, get_irg_frame(irg), noreg, store); set_ia32_use_frame(fild); set_ia32_am_support(fild, ia32_am_Source); set_ia32_op_type(fild, ia32_AddrModeS); set_ia32_am_flavour(fild, ia32_am_OB); - set_ia32_ls_mode(fild, mode); + set_ia32_ls_mode(fild, mode_Iu); - return new_r_Proj(irg, block, fild, mode_F, pn_ia32_vfild_res); + return new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res); } /** @@ -2283,25 +2306,31 @@ static ir_node *gen_x87_gp_to_fp(ia32_transform_env_t *env, ir_node *node, ir_mo * @return The created ia32 Conv node */ static ir_node *gen_Conv(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); + ir_node *block = transform_node(env, get_nodes_block(node)); ir_node *op = get_Conv_op(node); + ir_node *new_op = transform_node(env, op); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); ir_mode *src_mode = get_irn_mode(op); ir_mode *tgt_mode = get_irn_mode(node); int src_bits = get_mode_size_bits(src_mode); int tgt_bits = get_mode_size_bits(tgt_mode); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *res; ir_node *noreg = ia32_new_NoReg_gp(env->cg); ir_node *nomem = new_rd_NoMem(irg); - ir_node *new_op = transform_node(env, op); - DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;) + ir_node *res; if (src_mode == tgt_mode) { - /* this should be optimized already, but who knows... */ - DEBUG_ONLY(ir_fprintf(stderr, "Debug warning: conv %+F is pointless\n", node)); - DB((mod, LEVEL_1, "killed Conv(mode, mode) ...")); - return new_op; + if (get_Conv_strict(node)) { + if (USE_SSE2(env->cg)) { + /* when we are in SSE mode, we can kill all strict no-op conversion */ + return new_op; + } + } else { + /* this should be optimized already, but who knows... */ + DEBUG_ONLY(ir_fprintf(stderr, "Debug warning: conv %+F is pointless\n", node)); + DB((dbg, LEVEL_1, "killed Conv(mode, mode) ...")); + return new_op; + } } if (mode_is_float(src_mode)) { @@ -2309,19 +2338,20 @@ static ir_node *gen_Conv(ia32_transform_env_t *env, ir_node *node) { if (mode_is_float(tgt_mode)) { /* ... to float */ if (USE_SSE2(env->cg)) { - DB((mod, LEVEL_1, "create Conv(float, float) ...")); - res = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, new_op, nomem); + DB((dbg, LEVEL_1, "create Conv(float, float) ...")); + res = new_rd_ia32_Conv_FP2FP(dbgi, irg, block, noreg, noreg, new_op, nomem); set_ia32_ls_mode(res, tgt_mode); } else { // Matze: TODO what about strict convs? - DB((mod, LEVEL_1, "killed Conv(float, float) ...")); + DEBUG_ONLY(ir_fprintf(stderr, "Debug warning: strict conv %+F ignored yet\n", node)); + DB((dbg, LEVEL_1, "killed Conv(float, float) ...")); return new_op; } } else { /* ... to int */ - DB((mod, LEVEL_1, "create Conv(float, int) ...")); + DB((dbg, LEVEL_1, "create Conv(float, int) ...")); if (USE_SSE2(env->cg)) { - res = new_rd_ia32_Conv_FP2I(dbg, irg, block, noreg, noreg, new_op, nomem); + res = new_rd_ia32_Conv_FP2I(dbgi, irg, block, noreg, noreg, new_op, nomem); set_ia32_ls_mode(res, src_mode); } else { return gen_x87_fp_to_gp(env, node); @@ -2332,9 +2362,9 @@ static ir_node *gen_Conv(ia32_transform_env_t *env, ir_node *node) { if (mode_is_float(tgt_mode)) { FP_USED(env->cg); /* ... to float */ - DB((mod, LEVEL_1, "create Conv(int, float) ...")); + DB((dbg, LEVEL_1, "create Conv(int, float) ...")); if (USE_SSE2(env->cg)) { - res = new_rd_ia32_Conv_I2FP(dbg, irg, block, noreg, noreg, new_op, nomem); + res = new_rd_ia32_Conv_I2FP(dbgi, irg, block, noreg, noreg, new_op, nomem); set_ia32_ls_mode(res, tgt_mode); if(src_bits == 32) { set_ia32_am_support(res, ia32_am_Source); @@ -2342,17 +2372,17 @@ static ir_node *gen_Conv(ia32_transform_env_t *env, ir_node *node) { } else { return gen_x87_gp_to_fp(env, node, src_mode); } - } - else { + } else { + /* to int */ ir_mode *smaller_mode; - int smaller_bits; + int smaller_bits; if (src_bits == tgt_bits) { - DB((mod, LEVEL_1, "omitting unnecessary Conv(%+F, %+F) ...", src_mode, tgt_mode)); + DB((dbg, LEVEL_1, "omitting unnecessary Conv(%+F, %+F) ...", src_mode, tgt_mode)); return new_op; } - if(src_bits < tgt_bits) { + if (src_bits < tgt_bits) { smaller_mode = src_mode; smaller_bits = src_bits; } else { @@ -2360,9 +2390,11 @@ static ir_node *gen_Conv(ia32_transform_env_t *env, ir_node *node) { smaller_bits = tgt_bits; } - // The following is not correct, we can't change the mode, - // maybe others are using the load too - // better move this to a separate phase! + /* + The following is not correct, we can't change the mode, + maybe others are using the load too + better move this to a separate phase! + */ #if 0 /* ... to int */ if(is_Proj(new_op)) { @@ -2374,14 +2406,14 @@ static ir_node *gen_Conv(ia32_transform_env_t *env, ir_node *node) { return new_op; } } -#endif +#endif /* if 0 */ - DB((mod, LEVEL_1, "create Conv(int, int) ...", src_mode, tgt_mode)); + DB((dbg, LEVEL_1, "create Conv(int, int) ...", src_mode, tgt_mode)); if (smaller_bits == 8) { - res = new_rd_ia32_Conv_I2I8Bit(dbg, irg, block, noreg, noreg, new_op, nomem); + res = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, block, noreg, noreg, new_op, nomem); set_ia32_ls_mode(res, smaller_mode); } else { - res = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, new_op, nomem); + res = new_rd_ia32_Conv_I2I(dbgi, irg, block, noreg, noreg, new_op, nomem); set_ia32_ls_mode(res, smaller_mode); } set_ia32_am_support(res, ia32_am_Source); @@ -2406,32 +2438,32 @@ static ir_node *gen_Conv(ia32_transform_env_t *env, ir_node *node) { ********************************************/ static ir_node *gen_be_StackParam(ia32_transform_env_t *env, ir_node *node) { - ir_node *new_op = NULL; - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_rd_NoMem(env->irg); - ir_node *ptr = get_irn_n(node, 0); - ir_node *new_ptr = transform_node(env, ptr); - ir_entity *ent = arch_get_frame_entity(env->cg->arch_env, node); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *ptr = get_irn_n(node, be_pos_StackParam_ptr); + ir_node *new_ptr = transform_node(env, ptr); + ir_node *new_op = NULL; + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *nomem = new_rd_NoMem(env->irg); + ir_entity *ent = arch_get_frame_entity(env->cg->arch_env, node); ir_mode *load_mode = get_irn_mode(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); ir_mode *proj_mode; long pn_res; if (mode_is_float(load_mode)) { FP_USED(env->cg); if (USE_SSE2(env->cg)) { - new_op = new_rd_ia32_xLoad(dbg, irg, block, new_ptr, noreg, nomem); - pn_res = pn_ia32_xLoad_res; + new_op = new_rd_ia32_xLoad(dbgi, irg, block, new_ptr, noreg, nomem); + pn_res = pn_ia32_xLoad_res; + proj_mode = mode_xmm; } else { - new_op = new_rd_ia32_vfld(dbg, irg, block, new_ptr, noreg, nomem); - pn_res = pn_ia32_vfld_res; + new_op = new_rd_ia32_vfld(dbgi, irg, block, new_ptr, noreg, nomem); + pn_res = pn_ia32_vfld_res; + proj_mode = mode_vfp; } - - proj_mode = mode_E; } else { - new_op = new_rd_ia32_Load(dbg, irg, block, new_ptr, noreg, nomem); + new_op = new_rd_ia32_Load(dbgi, irg, block, new_ptr, noreg, nomem); proj_mode = mode_Iu; pn_res = pn_ia32_Load_res; } @@ -2447,22 +2479,22 @@ static ir_node *gen_be_StackParam(ia32_transform_env_t *env, ir_node *node) { SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); - return new_rd_Proj(dbg, irg, block, new_op, proj_mode, pn_res); + return new_rd_Proj(dbgi, irg, block, new_op, proj_mode, pn_res); } /** * Transforms a FrameAddr into an ia32 Add. */ static ir_node *gen_be_FrameAddr(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *op = get_irn_n(node, 0); - ir_node *new_op = transform_node(env, op); - ir_node *res; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - - res = new_rd_ia32_Lea(dbg, irg, block, new_op, noreg); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *op = get_irn_n(node, be_pos_FrameAddr_ptr); + ir_node *new_op = transform_node(env, op); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *res; + + res = new_rd_ia32_Lea(dbgi, irg, block, new_op, noreg); set_ia32_frame_ent(res, arch_get_frame_entity(env->cg->arch_env, node)); set_ia32_am_support(res, ia32_am_Full); set_ia32_use_frame(res); @@ -2477,15 +2509,15 @@ static ir_node *gen_be_FrameAddr(ia32_transform_env_t *env, ir_node *node) { * Transforms a FrameLoad into an ia32 Load. */ static ir_node *gen_be_FrameLoad(ia32_transform_env_t *env, ir_node *node) { - ir_node *new_op = NULL; - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *mem = get_irn_n(node, 0); - ir_node *ptr = get_irn_n(node, 1); + ir_node *mem = get_irn_n(node, be_pos_FrameLoad_mem); ir_node *new_mem = transform_node(env, mem); + ir_node *ptr = get_irn_n(node, be_pos_FrameLoad_ptr); ir_node *new_ptr = transform_node(env, ptr); + ir_node *new_op = NULL; + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); ir_entity *ent = arch_get_frame_entity(env->cg->arch_env, node); ir_mode *mode = get_type_mode(get_entity_type(ent)); ir_node *projs[pn_Load_max]; @@ -2495,14 +2527,14 @@ static ir_node *gen_be_FrameLoad(ia32_transform_env_t *env, ir_node *node) { if (mode_is_float(mode)) { FP_USED(env->cg); if (USE_SSE2(env->cg)) { - new_op = new_rd_ia32_xLoad(dbg, irg, block, new_ptr, noreg, new_mem); + new_op = new_rd_ia32_xLoad(dbgi, irg, block, new_ptr, noreg, new_mem); } else { - new_op = new_rd_ia32_vfld(dbg, irg, block, new_ptr, noreg, new_mem); + new_op = new_rd_ia32_vfld(dbgi, irg, block, new_ptr, noreg, new_mem); } } else { - new_op = new_rd_ia32_Load(dbg, irg, block, new_ptr, noreg, new_mem); + new_op = new_rd_ia32_Load(dbgi, irg, block, new_ptr, noreg, new_mem); } set_ia32_frame_ent(new_op, ent); @@ -2523,34 +2555,31 @@ static ir_node *gen_be_FrameLoad(ia32_transform_env_t *env, ir_node *node) { * Transforms a FrameStore into an ia32 Store. */ static ir_node *gen_be_FrameStore(ia32_transform_env_t *env, ir_node *node) { - ir_node *new_op = NULL; - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *mem = get_irn_n(node, 0); - ir_node *ptr = get_irn_n(node, 1); - ir_node *val = get_irn_n(node, 2); + ir_node *mem = get_irn_n(node, be_pos_FrameStore_mem); ir_node *new_mem = transform_node(env, mem); + ir_node *ptr = get_irn_n(node, be_pos_FrameStore_ptr); ir_node *new_ptr = transform_node(env, ptr); + ir_node *val = get_irn_n(node, be_pos_FrameStore_val); ir_node *new_val = transform_node(env, val); + ir_node *new_op = NULL; + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); ir_entity *ent = arch_get_frame_entity(env->cg->arch_env, node); ir_mode *mode = get_irn_mode(val); if (mode_is_float(mode)) { FP_USED(env->cg); if (USE_SSE2(env->cg)) { - new_op = new_rd_ia32_xStore(dbg, irg, block, new_ptr, noreg, new_val, new_mem); - } - else { - new_op = new_rd_ia32_vfst(dbg, irg, block, new_ptr, noreg, new_val, new_mem); + new_op = new_rd_ia32_xStore(dbgi, irg, block, new_ptr, noreg, new_val, new_mem); + } else { + new_op = new_rd_ia32_vfst(dbgi, irg, block, new_ptr, noreg, new_val, new_mem); } - } - else if (get_mode_size_bits(mode) == 8) { - new_op = new_rd_ia32_Store8Bit(dbg, irg, block, new_ptr, noreg, new_val, new_mem); - } - else { - new_op = new_rd_ia32_Store(dbg, irg, block, new_ptr, noreg, new_val, new_mem); + } else if (get_mode_size_bits(mode) == 8) { + new_op = new_rd_ia32_Store8Bit(dbgi, irg, block, new_ptr, noreg, new_val, new_mem); + } else { + new_op = new_rd_ia32_Store(dbgi, irg, block, new_ptr, noreg, new_val, new_mem); } set_ia32_frame_ent(new_op, ent); @@ -2571,18 +2600,18 @@ static ir_node *gen_be_FrameStore(ia32_transform_env_t *env, ir_node *node) { */ static ir_node *gen_be_Return(ia32_transform_env_t *env, ir_node *node) { ir_graph *irg = env->irg; - dbg_info *dbg; - ir_node *block; ir_node *ret_val = get_irn_n(node, be_pos_Return_val); ir_node *ret_mem = get_irn_n(node, be_pos_Return_mem); ir_entity *ent = get_irg_entity(irg); ir_type *tp = get_entity_type(ent); + dbg_info *dbgi; + ir_node *block; ir_type *res_type; ir_mode *mode; - ir_node *frame, *sse_store, *fld, *mproj, *barrier; - ir_node *new_barrier, *new_ret_val, *new_ret_mem; - ir_node **in; - int pn_ret_val, pn_ret_mem, arity, i; + ir_node *frame, *sse_store, *fld, *mproj, *barrier; + ir_node *new_barrier, *new_ret_val, *new_ret_mem; + ir_node **in; + int pn_ret_val, pn_ret_mem, arity, i; assert(ret_val != NULL); if (be_Return_get_n_rets(node) < 1 || ! USE_SSE2(env->cg)) { @@ -2591,12 +2620,12 @@ static ir_node *gen_be_Return(ia32_transform_env_t *env, ir_node *node) { res_type = get_method_res_type(tp, 0); - if (!is_Primitive_type(res_type)) { + if (! is_Primitive_type(res_type)) { return duplicate_node(env, node); } mode = get_type_mode(res_type); - if (!mode_is_float(mode)) { + if (! mode_is_float(mode)) { return duplicate_node(env, node); } @@ -2609,20 +2638,20 @@ static ir_node *gen_be_Return(ia32_transform_env_t *env, ir_node *node) { barrier = get_Proj_pred(ret_val); /* get result input of the Barrier */ - ret_val = get_irn_n(barrier, pn_ret_val); + ret_val = get_irn_n(barrier, pn_ret_val); new_ret_val = transform_node(env, ret_val); /* get memory input of the Barrier */ - ret_mem = get_irn_n(barrier, pn_ret_mem); + ret_mem = get_irn_n(barrier, pn_ret_mem); new_ret_mem = transform_node(env, ret_mem); frame = get_irg_frame(irg); - dbg = get_irn_dbg_info(barrier); + dbgi = get_irn_dbg_info(barrier); block = transform_node(env, get_nodes_block(barrier)); /* store xmm0 onto stack */ - sse_store = new_rd_ia32_xStoreSimple(dbg, irg, block, frame, new_ret_val, new_ret_mem); + sse_store = new_rd_ia32_xStoreSimple(dbgi, irg, block, frame, new_ret_val, new_ret_mem); set_ia32_ls_mode(sse_store, mode); set_ia32_op_type(sse_store, ia32_AddrModeD); set_ia32_use_frame(sse_store); @@ -2630,7 +2659,7 @@ static ir_node *gen_be_Return(ia32_transform_env_t *env, ir_node *node) { set_ia32_am_support(sse_store, ia32_am_Dest); /* load into st0 */ - fld = new_rd_ia32_SetST0(dbg, irg, block, frame, sse_store); + fld = new_rd_ia32_SetST0(dbgi, irg, block, frame, sse_store); set_ia32_ls_mode(fld, mode); set_ia32_op_type(fld, ia32_AddrModeS); set_ia32_use_frame(fld); @@ -2638,17 +2667,18 @@ static ir_node *gen_be_Return(ia32_transform_env_t *env, ir_node *node) { set_ia32_am_support(fld, ia32_am_Source); mproj = new_r_Proj(irg, block, fld, mode_M, pn_ia32_SetST0_M); - fld = new_r_Proj(irg, block, fld, mode_E, pn_ia32_SetST0_res); + fld = new_r_Proj(irg, block, fld, mode_vfp, pn_ia32_SetST0_res); arch_set_irn_register(env->cg->arch_env, fld, &ia32_vfp_regs[REG_VF0]); /* create a new barrier */ arity = get_irn_arity(barrier); in = alloca(arity * sizeof(in[0])); - for(i = 0; i < arity; ++i) { + for (i = 0; i < arity; ++i) { ir_node *new_in; - if(i == pn_ret_val) { + + if (i == pn_ret_val) { new_in = fld; - } else if(i == pn_ret_mem) { + } else if (i == pn_ret_mem) { new_in = mproj; } else { ir_node *in = get_irn_n(barrier, i); @@ -2657,7 +2687,7 @@ static ir_node *gen_be_Return(ia32_transform_env_t *env, ir_node *node) { in[i] = new_in; } - new_barrier = new_ir_node(dbg, irg, block, + new_barrier = new_ir_node(dbgi, irg, block, get_irn_op(barrier), get_irn_mode(barrier), arity, in); copy_node_attr(barrier, new_barrier); @@ -2673,19 +2703,19 @@ static ir_node *gen_be_Return(ia32_transform_env_t *env, ir_node *node) { * Transform a be_AddSP into an ia32_AddSP. Eat up const sizes. */ static ir_node *gen_be_AddSP(ia32_transform_env_t *env, ir_node *node) { - ir_node *new_op; - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *sz = get_irn_n(node, be_pos_AddSP_size); - ir_node *new_sz = transform_node(env, sz); - ir_node *sp = get_irn_n(node, be_pos_AddSP_old_sp); - ir_node *new_sp = transform_node(env, sp); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_NoMem(); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *sz = get_irn_n(node, be_pos_AddSP_size); + ir_node *new_sz = transform_node(env, sz); + ir_node *sp = get_irn_n(node, be_pos_AddSP_old_sp); + ir_node *new_sp = transform_node(env, sp); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *nomem = new_NoMem(); + ir_node *new_op; /* ia32 stack grows in reverse direction, make a SubSP */ - new_op = new_rd_ia32_SubSP(dbg, irg, block, noreg, noreg, new_sp, new_sz, nomem); + new_op = new_rd_ia32_SubSP(dbgi, irg, block, noreg, noreg, new_sp, new_sz, nomem); set_ia32_am_support(new_op, ia32_am_Source); fold_immediate(env, new_op, 2, 3); @@ -2698,19 +2728,19 @@ static ir_node *gen_be_AddSP(ia32_transform_env_t *env, ir_node *node) { * Transform a be_SubSP into an ia32_SubSP. Eat up const sizes. */ static ir_node *gen_be_SubSP(ia32_transform_env_t *env, ir_node *node) { - ir_node *new_op; - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *sz = get_irn_n(node, be_pos_SubSP_size); - ir_node *new_sz = transform_node(env, sz); - ir_node *sp = get_irn_n(node, be_pos_SubSP_old_sp); - ir_node *new_sp = transform_node(env, sp); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_NoMem(); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *sz = get_irn_n(node, be_pos_SubSP_size); + ir_node *new_sz = transform_node(env, sz); + ir_node *sp = get_irn_n(node, be_pos_SubSP_old_sp); + ir_node *new_sp = transform_node(env, sp); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *nomem = new_NoMem(); + ir_node *new_op; /* ia32 stack grows in reverse direction, make an AddSP */ - new_op = new_rd_ia32_AddSP(dbg, irg, block, noreg, noreg, new_sp, new_sz, nomem); + new_op = new_rd_ia32_AddSP(dbgi, irg, block, noreg, noreg, new_sp, new_sz, nomem); set_ia32_am_support(new_op, ia32_am_Source); fold_immediate(env, new_op, 2, 3); @@ -2732,7 +2762,7 @@ static ir_node *gen_Unknown(ia32_transform_env_t *env, ir_node *node) { return ia32_new_Unknown_xmm(env->cg); else return ia32_new_Unknown_vfp(env->cg); - } else if (mode_is_int(mode) || mode_is_reference(mode)) { + } else if (mode_needs_gp_reg(mode)) { return ia32_new_Unknown_gp(env->cg); } else { assert(0 && "unsupported Unknown-Mode"); @@ -2745,28 +2775,30 @@ static ir_node *gen_Unknown(ia32_transform_env_t *env, ir_node *node) { * Change some phi modes */ static ir_node *gen_Phi(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *phi; - int i, arity; + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + ir_node *phi; + int i, arity; - if(mode_is_int(mode) || mode_is_reference(mode)) { - // we shouldn't have any 64bit stuff around anymore + if(mode_needs_gp_reg(mode)) { + /* we shouldn't have any 64bit stuff around anymore */ assert(get_mode_size_bits(mode) <= 32); - // all integer operations are on 32bit registers now + /* all integer operations are on 32bit registers now */ mode = mode_Iu; } else if(mode_is_float(mode)) { assert(mode == mode_D || mode == mode_F); - // all float operations are on mode_E registers - mode = mode_E; + if (USE_SSE2(env->cg)) { + mode = mode_xmm; + } else { + mode = mode_vfp; + } } /* phi nodes allow loops, so we use the old arguments for now * and fix this later */ - phi = new_ir_node(dbg, irg, block, op_Phi, mode, get_irn_arity(node), - get_irn_in(node) + 1); + phi = new_ir_node(dbgi, irg, block, op_Phi, mode, get_irn_arity(node), get_irn_in(node) + 1); copy_node_attr(node, phi); duplicate_deps(env, node, phi); @@ -2774,7 +2806,7 @@ static ir_node *gen_Phi(ia32_transform_env_t *env, ir_node *node) { /* put the preds in the worklist */ arity = get_irn_arity(node); - for(i = 0; i < arity; ++i) { + for (i = 0; i < arity; ++i) { ir_node *pred = get_irn_n(node, i); pdeq_putr(env->worklist, pred); } @@ -2804,16 +2836,16 @@ typedef ir_node *construct_store_func(dbg_info *db, ir_graph *irg, ir_node *bloc * Transforms a lowered Load into a "real" one. */ static ir_node *gen_lowered_Load(ia32_transform_env_t *env, ir_node *node, construct_load_func func, char fp_unit) { - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_mode *mode = get_ia32_ls_mode(node); - ir_node *new_op; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *ptr = get_irn_n(node, 0); - ir_node *mem = get_irn_n(node, 1); - ir_node *new_ptr = transform_node(env, ptr); - ir_node *new_mem = transform_node(env, mem); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *ptr = get_irn_n(node, 0); + ir_node *new_ptr = transform_node(env, ptr); + ir_node *mem = get_irn_n(node, 1); + ir_node *new_mem = transform_node(env, mem); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_ia32_ls_mode(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *new_op; /* Could be that we have SSE2 unit, but due to 64Bit Div/Conv @@ -2825,7 +2857,7 @@ static ir_node *gen_lowered_Load(ia32_transform_env_t *env, ir_node *node, const FORCE_x87(env->cg); } - new_op = func(dbg, irg, block, new_ptr, noreg, new_mem); + new_op = func(dbgi, irg, block, new_ptr, noreg, new_mem); set_ia32_am_support(new_op, ia32_am_Source); set_ia32_op_type(new_op, ia32_AddrModeS); @@ -2833,10 +2865,10 @@ static ir_node *gen_lowered_Load(ia32_transform_env_t *env, ir_node *node, const set_ia32_am_offs_int(new_op, 0); set_ia32_am_scale(new_op, 1); set_ia32_am_sc(new_op, get_ia32_am_sc(node)); - if(is_ia32_am_sc_sign(node)) + if (is_ia32_am_sc_sign(node)) set_ia32_am_sc_sign(new_op); set_ia32_ls_mode(new_op, get_ia32_ls_mode(node)); - if(is_ia32_use_frame(node)) { + if (is_ia32_use_frame(node)) { set_ia32_frame_ent(new_op, get_ia32_frame_ent(node)); set_ia32_use_frame(new_op); } @@ -2850,20 +2882,20 @@ static ir_node *gen_lowered_Load(ia32_transform_env_t *env, ir_node *node, const * Transforms a lowered Store into a "real" one. */ static ir_node *gen_lowered_Store(ia32_transform_env_t *env, ir_node *node, construct_store_func func, char fp_unit) { - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_mode *mode = get_ia32_ls_mode(node); - ir_node *new_op; - long am_offs; + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *ptr = get_irn_n(node, 0); + ir_node *new_ptr = transform_node(env, ptr); + ir_node *val = get_irn_n(node, 1); + ir_node *new_val = transform_node(env, val); + ir_node *mem = get_irn_n(node, 2); + ir_node *new_mem = transform_node(env, mem); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_mode *mode = get_ia32_ls_mode(node); + ir_node *new_op; + long am_offs; ia32_am_flavour_t am_flav = ia32_B; - ir_node *ptr = get_irn_n(node, 0); - ir_node *val = get_irn_n(node, 1); - ir_node *mem = get_irn_n(node, 2); - ir_node *new_ptr = transform_node(env, ptr); - ir_node *new_val = transform_node(env, val); - ir_node *new_mem = transform_node(env, mem); /* Could be that we have SSE2 unit, but due to 64Bit Div/Conv @@ -2875,7 +2907,7 @@ static ir_node *gen_lowered_Store(ia32_transform_env_t *env, ir_node *node, cons FORCE_x87(env->cg); } - new_op = func(dbg, irg, block, new_ptr, noreg, new_val, new_mem); + new_op = func(dbgi, irg, block, new_ptr, noreg, new_val, new_mem); if ((am_offs = get_ia32_am_offs_int(node)) != 0) { am_flav |= ia32_O; @@ -2954,21 +2986,23 @@ GEN_LOWERED_UNOP(Neg) GEN_LOWERED_LOAD(vfild, fp_x87) GEN_LOWERED_LOAD(Load, fp_none) -GEN_LOWERED_STORE(vfist, fp_x87) +/*GEN_LOWERED_STORE(vfist, fp_x87) + *TODO + */ GEN_LOWERED_STORE(Store, fp_none) static ir_node *gen_ia32_l_vfdiv(ia32_transform_env_t *env, ir_node *node) { - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *left = get_binop_left(node); - ir_node *right = get_binop_right(node); - ir_node *new_left = transform_node(env, left); - ir_node *new_right = transform_node(env, right); - ir_node *vfdiv; - - vfdiv = new_rd_ia32_vfdiv(dbg, irg, block, noreg, noreg, new_left, new_right, new_NoMem()); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *left = get_binop_left(node); + ir_node *new_left = transform_node(env, left); + ir_node *right = get_binop_right(node); + ir_node *new_right = transform_node(env, right); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *vfdiv; + + vfdiv = new_rd_ia32_vfdiv(dbgi, irg, block, noreg, noreg, new_left, new_right, new_NoMem()); clear_ia32_commutative(vfdiv); set_ia32_am_support(vfdiv, ia32_am_Source); fold_immediate(env, vfdiv, 2, 3); @@ -2987,26 +3021,26 @@ static ir_node *gen_ia32_l_vfdiv(ia32_transform_env_t *env, ir_node *node) { * @return the created ia32 Mul node */ static ir_node *gen_ia32_l_Mul(ia32_transform_env_t *env, ir_node *node) { - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *left = get_binop_left(node); - ir_node *right = get_binop_right(node); - ir_node *new_left = transform_node(env, left); - ir_node *new_right = transform_node(env, right); - ir_node *in[2]; + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *left = get_binop_left(node); + ir_node *new_left = transform_node(env, left); + ir_node *right = get_binop_right(node); + ir_node *new_right = transform_node(env, right); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *in[2]; /* l_Mul is already a mode_T node, so we create the Mul in the normal way */ /* and then skip the result Proj, because all needed Projs are already there. */ - ir_node *muls = new_rd_ia32_Mul(dbg, irg, block, noreg, noreg, new_left, new_right, new_NoMem()); + ir_node *muls = new_rd_ia32_Mul(dbgi, irg, block, noreg, noreg, new_left, new_right, new_NoMem()); clear_ia32_commutative(muls); set_ia32_am_support(muls, ia32_am_Source); fold_immediate(env, muls, 2, 3); /* check if EAX and EDX proj exist, add missing one */ - in[0] = new_rd_Proj(dbg, irg, block, muls, mode_Iu, pn_EAX); - in[1] = new_rd_Proj(dbg, irg, block, muls, mode_Iu, pn_EDX); + in[0] = new_rd_Proj(dbgi, irg, block, muls, mode_Iu, pn_EAX); + in[1] = new_rd_Proj(dbgi, irg, block, muls, mode_Iu, pn_EDX); be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 2, in); SET_IA32_ORIG_NODE(muls, ia32_get_old_node_name(env->cg, node)); @@ -3027,19 +3061,19 @@ GEN_LOWERED_SHIFT_OP(Sar) */ static ir_node *gen_lowered_64bit_shifts(ia32_transform_env_t *env, ir_node *node, ir_node *op1, ir_node *op2, - ir_node *count) { - ir_node *new_op = NULL; - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_NoMem(); - ir_node *imm_op; - ir_node *new_op1 = transform_node(env, op1); - ir_node *new_op2 = transform_node(env, op2); - ir_node *new_count = transform_node(env, count); - tarval *tv; - DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;) + ir_node *count) +{ + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *new_op1 = transform_node(env, op1); + ir_node *new_op2 = transform_node(env, op2); + ir_node *new_count = transform_node(env, count); + ir_node *new_op = NULL; + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *nomem = new_NoMem(); + ir_node *imm_op; + tarval *tv; assert(! mode_is_float(get_irn_mode(node)) && "Shift/Rotate with float not supported"); @@ -3063,24 +3097,24 @@ static ir_node *gen_lowered_64bit_shifts(ia32_transform_env_t *env, ir_node *nod /* integer operations */ if (imm_op) { /* This is ShiftD with const */ - DB((mod, LEVEL_1, "ShiftD with immediate ...")); + DB((dbg, LEVEL_1, "ShiftD with immediate ...")); if (is_ia32_l_ShlD(node)) - new_op = new_rd_ia32_ShlD(dbg, irg, block, noreg, noreg, + new_op = new_rd_ia32_ShlD(dbgi, irg, block, noreg, noreg, new_op1, new_op2, noreg, nomem); else - new_op = new_rd_ia32_ShrD(dbg, irg, block, noreg, noreg, + new_op = new_rd_ia32_ShrD(dbgi, irg, block, noreg, noreg, new_op1, new_op2, noreg, nomem); copy_ia32_Immop_attr(new_op, imm_op); } else { /* This is a normal ShiftD */ - DB((mod, LEVEL_1, "ShiftD binop ...")); + DB((dbg, LEVEL_1, "ShiftD binop ...")); if (is_ia32_l_ShlD(node)) - new_op = new_rd_ia32_ShlD(dbg, irg, block, noreg, noreg, + new_op = new_rd_ia32_ShlD(dbgi, irg, block, noreg, noreg, new_op1, new_op2, new_count, nomem); else - new_op = new_rd_ia32_ShrD(dbg, irg, block, noreg, noreg, + new_op = new_rd_ia32_ShrD(dbgi, irg, block, noreg, noreg, new_op1, new_op2, new_count, nomem); } @@ -3109,27 +3143,29 @@ static ir_node *gen_ia32_l_ShrD(ia32_transform_env_t *env, ir_node *node) { * In case SSE Unit is used, the node is transformed into a vfst + xLoad. */ static ir_node *gen_ia32_l_X87toSSE(ia32_transform_env_t *env, ir_node *node) { - ia32_code_gen_t *cg = env->cg; - ir_node *res = NULL; - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *ptr = get_irn_n(node, 0); - ir_node *val = get_irn_n(node, 1); - ir_node *new_val = transform_node(env, val); - ir_node *mem = get_irn_n(node, 2); - ir_node *noreg, *new_ptr, *new_mem; + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *val = get_irn_n(node, 1); + ir_node *new_val = transform_node(env, val); + ia32_code_gen_t *cg = env->cg; + ir_node *res = NULL; + ir_graph *irg = env->irg; + dbg_info *dbgi; + ir_node *noreg, *new_ptr, *new_mem; + ir_node *ptr, *mem; if (USE_SSE2(cg)) { return new_val; } - noreg = ia32_new_NoReg_gp(cg); + mem = get_irn_n(node, 2); new_mem = transform_node(env, mem); + ptr = get_irn_n(node, 0); new_ptr = transform_node(env, ptr); + noreg = ia32_new_NoReg_gp(cg); + dbgi = get_irn_dbg_info(node); /* Store x87 -> MEM */ - res = new_rd_ia32_vfst(dbg, irg, block, new_ptr, noreg, new_val, new_mem); + res = new_rd_ia32_vfst(dbgi, irg, block, new_ptr, noreg, new_val, new_mem); set_ia32_frame_ent(res, get_ia32_frame_ent(node)); set_ia32_use_frame(res); set_ia32_ls_mode(res, get_ia32_ls_mode(node)); @@ -3138,14 +3174,14 @@ static ir_node *gen_ia32_l_X87toSSE(ia32_transform_env_t *env, ir_node *node) { set_ia32_op_type(res, ia32_AddrModeD); /* Load MEM -> SSE */ - res = new_rd_ia32_xLoad(dbg, irg, block, new_ptr, noreg, res); + res = new_rd_ia32_xLoad(dbgi, irg, block, new_ptr, noreg, res); set_ia32_frame_ent(res, get_ia32_frame_ent(node)); set_ia32_use_frame(res); set_ia32_ls_mode(res, get_ia32_ls_mode(node)); set_ia32_am_support(res, ia32_am_Source); set_ia32_am_flavour(res, ia32_B); set_ia32_op_type(res, ia32_AddrModeS); - res = new_rd_Proj(dbg, irg, block, res, mode_E, pn_ia32_xLoad_res); + res = new_rd_Proj(dbgi, irg, block, res, mode_xmm, pn_ia32_xLoad_res); return res; } @@ -3154,29 +3190,30 @@ static ir_node *gen_ia32_l_X87toSSE(ia32_transform_env_t *env, ir_node *node) { * In case SSE Unit is used, the node is transformed into a xStore + vfld. */ static ir_node *gen_ia32_l_SSEtoX87(ia32_transform_env_t *env, ir_node *node) { - ia32_code_gen_t *cg = env->cg; - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *res = NULL; - ir_node *ptr = get_irn_n(node, 0); - ir_node *val = get_irn_n(node, 1); - ir_node *mem = get_irn_n(node, 2); - ir_entity *fent = get_ia32_frame_ent(node); - ir_mode *lsmode = get_ia32_ls_mode(node); - ir_node *new_val = transform_node(env, val); - ir_node *noreg, *new_ptr, *new_mem; - int offs = 0; - - if (!USE_SSE2(cg)) { + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *val = get_irn_n(node, 1); + ir_node *new_val = transform_node(env, val); + ia32_code_gen_t *cg = env->cg; + ir_graph *irg = env->irg; + ir_node *res = NULL; + ir_entity *fent = get_ia32_frame_ent(node); + ir_mode *lsmode = get_ia32_ls_mode(node); + int offs = 0; + ir_node *noreg, *new_ptr, *new_mem; + ir_node *ptr, *mem; + dbg_info *dbgi; + + if (! USE_SSE2(cg)) { /* SSE unit is not used -> skip this node. */ return new_val; } - noreg = ia32_new_NoReg_gp(cg); - new_val = transform_node(env, val); + ptr = get_irn_n(node, 0); new_ptr = transform_node(env, ptr); + mem = get_irn_n(node, 2); new_mem = transform_node(env, mem); + noreg = ia32_new_NoReg_gp(cg); + dbgi = get_irn_dbg_info(node); /* Store SSE -> MEM */ if (is_ia32_xLoad(skip_Proj(new_val))) { @@ -3187,7 +3224,7 @@ static ir_node *gen_ia32_l_SSEtoX87(ia32_transform_env_t *env, ir_node *node) { ptr = get_irn_n(ld, 0); offs = get_ia32_am_offs_int(ld); } else { - res = new_rd_ia32_xStore(dbg, irg, block, new_ptr, noreg, new_val, new_mem); + res = new_rd_ia32_xStore(dbgi, irg, block, new_ptr, noreg, new_val, new_mem); set_ia32_frame_ent(res, fent); set_ia32_use_frame(res); set_ia32_ls_mode(res, lsmode); @@ -3198,7 +3235,7 @@ static ir_node *gen_ia32_l_SSEtoX87(ia32_transform_env_t *env, ir_node *node) { } /* Load MEM -> x87 */ - res = new_rd_ia32_vfld(dbg, irg, block, new_ptr, noreg, new_mem); + res = new_rd_ia32_vfld(dbgi, irg, block, new_ptr, noreg, new_mem); set_ia32_frame_ent(res, fent); set_ia32_use_frame(res); set_ia32_ls_mode(res, lsmode); @@ -3206,7 +3243,7 @@ static ir_node *gen_ia32_l_SSEtoX87(ia32_transform_env_t *env, ir_node *node) { set_ia32_am_support(res, ia32_am_Source); set_ia32_am_flavour(res, ia32_B); set_ia32_op_type(res, ia32_AddrModeS); - res = new_rd_Proj(dbg, irg, block, res, lsmode, pn_ia32_vfld_res); + res = new_rd_Proj(dbgi, irg, block, res, mode_vfp, pn_ia32_vfld_res); return res; } @@ -3231,13 +3268,13 @@ static ir_node *bad_transform(ia32_transform_env_t *env, ir_node *node) { static ir_node *gen_End(ia32_transform_env_t *env, ir_node *node) { /* end has to be duplicated manually because we need a dynamic in array */ - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - int i, arity; - ir_node *new_end; + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = transform_node(env, get_nodes_block(node)); + int i, arity; + ir_node *new_end; - new_end = new_ir_node(dbg, irg, block, op_End, mode_X, -1, NULL); + new_end = new_ir_node(dbgi, irg, block, op_End, mode_X, -1, NULL); copy_node_attr(node, new_end); duplicate_deps(env, node, new_end); @@ -3246,8 +3283,8 @@ static ir_node *gen_End(ia32_transform_env_t *env, ir_node *node) { /* transform preds */ arity = get_irn_arity(node); - for(i = 0; i < arity; ++i) { - ir_node *in = get_irn_n(node, i); + for (i = 0; i < arity; ++i) { + ir_node *in = get_irn_n(node, i); ir_node *new_in = transform_node(env, in); add_End_keepalive(new_end, new_in); @@ -3257,23 +3294,23 @@ static ir_node *gen_End(ia32_transform_env_t *env, ir_node *node) { } static ir_node *gen_Block(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *start_block = env->old_anchors[anchor_start_block]; - ir_node *block; - int i, arity; + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *start_block = env->old_anchors[anchor_start_block]; + ir_node *block; + int i, arity; /* * We replace the ProjX from the start node with a jump, * so the startblock has no preds anymore now */ - if(node == start_block) { - return new_rd_Block(dbg, irg, 0, NULL); + if (node == start_block) { + return new_rd_Block(dbgi, irg, 0, NULL); } /* we use the old blocks for now, because jumps allow cycles in the graph * we have to fix this later */ - block = new_ir_node(dbg, irg, NULL, get_irn_op(node), get_irn_mode(node), + block = new_ir_node(dbgi, irg, NULL, get_irn_op(node), get_irn_mode(node), get_irn_arity(node), get_irn_in(node) + 1); copy_node_attr(node, block); @@ -3284,7 +3321,7 @@ static ir_node *gen_Block(ia32_transform_env_t *env, ir_node *node) { /* put the preds in the worklist */ arity = get_irn_arity(node); - for(i = 0; i < arity; ++i) { + for (i = 0; i < arity; ++i) { ir_node *in = get_irn_n(node, i); pdeq_putr(env->worklist, in); } @@ -3293,19 +3330,19 @@ static ir_node *gen_Block(ia32_transform_env_t *env, ir_node *node) { } static ir_node *gen_Proj_be_AddSP(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - ir_node *block = transform_node(env, get_nodes_block(node)); - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *pred = get_Proj_pred(node); - ir_node *new_pred = transform_node(env, pred); - long proj = get_Proj_proj(node); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = transform_node(env, pred); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + long proj = get_Proj_proj(node); - if(proj == pn_be_AddSP_res) { - ir_node *res = new_rd_Proj(dbg, irg, block, new_pred, mode_Iu, pn_ia32_AddSP_stack); + if (proj == pn_be_AddSP_res) { + ir_node *res = new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_AddSP_stack); arch_set_irn_register(env->cg->arch_env, res, &ia32_gp_regs[REG_ESP]); return res; - } else if(proj == pn_be_AddSP_M) { - return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_AddSP_M); + } else if (proj == pn_be_AddSP_M) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_AddSP_M); } assert(0); @@ -3313,19 +3350,19 @@ static ir_node *gen_Proj_be_AddSP(ia32_transform_env_t *env, ir_node *node) { } static ir_node *gen_Proj_be_SubSP(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - ir_node *block = transform_node(env, get_nodes_block(node)); - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *pred = get_Proj_pred(node); - ir_node *new_pred = transform_node(env, pred); - long proj = get_Proj_proj(node); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = transform_node(env, pred); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + long proj = get_Proj_proj(node); - if(proj == pn_be_SubSP_res) { - ir_node *res = new_rd_Proj(dbg, irg, block, new_pred, mode_Iu, pn_ia32_AddSP_stack); + if (proj == pn_be_SubSP_res) { + ir_node *res = new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_AddSP_stack); arch_set_irn_register(env->cg->arch_env, res, &ia32_gp_regs[REG_ESP]); return res; - } else if(proj == pn_be_SubSP_M) { - return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_SubSP_M); + } else if (proj == pn_be_SubSP_M) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_SubSP_M); } assert(0); @@ -3333,31 +3370,31 @@ static ir_node *gen_Proj_be_SubSP(ia32_transform_env_t *env, ir_node *node) { } static ir_node *gen_Proj_Load(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - ir_node *block = transform_node(env, get_nodes_block(node)); - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *pred = get_Proj_pred(node); - ir_node *new_pred = transform_node(env, pred); - long proj = get_Proj_proj(node); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = transform_node(env, pred); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + long proj = get_Proj_proj(node); /* renumber the proj */ - if(is_ia32_Load(new_pred)) { - if(proj == pn_Load_res) { - return new_rd_Proj(dbg, irg, block, new_pred, mode_Iu, pn_ia32_Load_res); - } else if(proj == pn_Load_M) { - return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_Load_M); + if (is_ia32_Load(new_pred)) { + if (proj == pn_Load_res) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Load_res); + } else if (proj == pn_Load_M) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Load_M); } - } else if(is_ia32_xLoad(new_pred)) { - if(proj == pn_Load_res) { - return new_rd_Proj(dbg, irg, block, new_pred, mode_E, pn_ia32_xLoad_res); - } else if(proj == pn_Load_M) { - return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_xLoad_M); + } else if (is_ia32_xLoad(new_pred)) { + if (proj == pn_Load_res) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_xmm, pn_ia32_xLoad_res); + } else if (proj == pn_Load_M) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_xLoad_M); } - } else if(is_ia32_vfld(new_pred)) { - if(proj == pn_Load_res) { - return new_rd_Proj(dbg, irg, block, new_pred, mode_E, pn_ia32_vfld_res); - } else if(proj == pn_Load_M) { - return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_vfld_M); + } else if (is_ia32_vfld(new_pred)) { + if (proj == pn_Load_res) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_vfp, pn_ia32_vfld_res); + } else if (proj == pn_Load_M) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_vfld_M); } } @@ -3366,46 +3403,45 @@ static ir_node *gen_Proj_Load(ia32_transform_env_t *env, ir_node *node) { } static ir_node *gen_Proj_DivMod(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_mode *mode = get_irn_mode(node); - - ir_node *pred = get_Proj_pred(node); - ir_node *new_pred = transform_node(env, pred); - long proj = get_Proj_proj(node); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = transform_node(env, pred); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + long proj = get_Proj_proj(node); assert(is_ia32_Div(new_pred) || is_ia32_IDiv(new_pred)); - switch(get_irn_opcode(pred)) { + switch (get_irn_opcode(pred)) { case iro_Div: - switch(proj) { + switch (proj) { case pn_Div_M: - return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_Div_M); + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Div_M); case pn_Div_res: - return new_rd_Proj(dbg, irg, block, new_pred, mode_Iu, pn_ia32_Div_div_res); + return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_div_res); default: break; } break; case iro_Mod: - switch(proj) { + switch (proj) { case pn_Mod_M: - return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_Div_M); + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Div_M); case pn_Mod_res: - return new_rd_Proj(dbg, irg, block, new_pred, mode_Iu, pn_ia32_Div_mod_res); + return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_mod_res); default: break; } break; case iro_DivMod: - switch(proj) { + switch (proj) { case pn_DivMod_M: - return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_Div_M); + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Div_M); case pn_DivMod_res_div: - return new_rd_Proj(dbg, irg, block, new_pred, mode_Iu, pn_ia32_Div_div_res); + return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_div_res); case pn_DivMod_res_mod: - return new_rd_Proj(dbg, irg, block, new_pred, mode_Iu, pn_ia32_Div_mod_res); + return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_Div_mod_res); default: break; } @@ -3418,25 +3454,21 @@ static ir_node *gen_Proj_DivMod(ia32_transform_env_t *env, ir_node *node) { return new_rd_Unknown(irg, mode); } -static ir_node *gen_Proj_CopyB(ia32_transform_env_t *env, ir_node *node) -{ - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_mode *mode = get_irn_mode(node); - - ir_node *pred = get_Proj_pred(node); - ir_node *new_pred = transform_node(env, pred); - long proj = get_Proj_proj(node); +static ir_node *gen_Proj_CopyB(ia32_transform_env_t *env, ir_node *node) { + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = transform_node(env, pred); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + long proj = get_Proj_proj(node); switch(proj) { case pn_CopyB_M_regular: - if(is_ia32_CopyB_i(new_pred)) { - return new_rd_Proj(dbg, irg, block, new_pred, mode_M, - pn_ia32_CopyB_i_M); - } else if(is_ia32_CopyB(new_pred)) { - return new_rd_Proj(dbg, irg, block, new_pred, mode_M, - pn_ia32_CopyB_M); + if (is_ia32_CopyB_i(new_pred)) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_CopyB_i_M); + } else if (is_ia32_CopyB(new_pred)) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_CopyB_M); } break; default: @@ -3447,22 +3479,20 @@ static ir_node *gen_Proj_CopyB(ia32_transform_env_t *env, ir_node *node) return new_rd_Unknown(irg, mode); } -static ir_node *gen_Proj_l_vfdiv(ia32_transform_env_t *env, ir_node *node) -{ - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_mode *mode = get_irn_mode(node); - - ir_node *pred = get_Proj_pred(node); - ir_node *new_pred = transform_node(env, pred); - long proj = get_Proj_proj(node); +static ir_node *gen_Proj_l_vfdiv(ia32_transform_env_t *env, ir_node *node) { + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = transform_node(env, pred); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + long proj = get_Proj_proj(node); - switch(proj) { + switch (proj) { case pn_ia32_l_vfdiv_M: - return new_rd_Proj(dbg, irg, block, new_pred, mode_M, pn_ia32_vfdiv_M); + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_vfdiv_M); case pn_ia32_l_vfdiv_res: - return new_rd_Proj(dbg, irg, block, new_pred, mode_E, pn_ia32_vfdiv_res); + return new_rd_Proj(dbgi, irg, block, new_pred, mode_vfp, pn_ia32_vfdiv_res); default: assert(0); } @@ -3470,34 +3500,28 @@ static ir_node *gen_Proj_l_vfdiv(ia32_transform_env_t *env, ir_node *node) return new_rd_Unknown(irg, mode); } -static ir_node *gen_Proj_Quot(ia32_transform_env_t *env, ir_node *node) -{ - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_mode *mode = get_irn_mode(node); - - ir_node *pred = get_Proj_pred(node); - ir_node *new_pred = transform_node(env, pred); - long proj = get_Proj_proj(node); +static ir_node *gen_Proj_Quot(ia32_transform_env_t *env, ir_node *node) { + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *pred = get_Proj_pred(node); + ir_node *new_pred = transform_node(env, pred); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + long proj = get_Proj_proj(node); switch(proj) { case pn_Quot_M: - if(is_ia32_xDiv(new_pred)) { - return new_rd_Proj(dbg, irg, block, new_pred, mode_M, - pn_ia32_xDiv_M); - } else if(is_ia32_vfdiv(new_pred)) { - return new_rd_Proj(dbg, irg, block, new_pred, mode_M, - pn_ia32_vfdiv_M); + if (is_ia32_xDiv(new_pred)) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_xDiv_M); + } else if (is_ia32_vfdiv(new_pred)) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_vfdiv_M); } break; case pn_Quot_res: - if(is_ia32_xDiv(new_pred)) { - return new_rd_Proj(dbg, irg, block, new_pred, mode_E, - pn_ia32_xDiv_res); - } else if(is_ia32_vfdiv(new_pred)) { - return new_rd_Proj(dbg, irg, block, new_pred, mode_E, - pn_ia32_vfdiv_res); + if (is_ia32_xDiv(new_pred)) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_xmm, pn_ia32_xDiv_res); + } else if (is_ia32_vfdiv(new_pred)) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_vfp, pn_ia32_vfdiv_res); } break; default: @@ -3509,25 +3533,23 @@ static ir_node *gen_Proj_Quot(ia32_transform_env_t *env, ir_node *node) } static ir_node *gen_Proj_tls(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - //dbg_info *dbg = get_irn_dbg_info(node); - dbg_info *dbg = NULL; - ir_node *block = transform_node(env, get_nodes_block(node)); - - ir_node *res = new_rd_ia32_LdTls(dbg, irg, block, mode_Iu); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_graph *irg = env->irg; + dbg_info *dbgi = NULL; + ir_node *res = new_rd_ia32_LdTls(dbgi, irg, block, mode_Iu); return res; } static ir_node *gen_Proj_be_Call(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - long proj = get_Proj_proj(node); - ir_mode *mode = get_irn_mode(node); - ir_node *block = transform_node(env, get_nodes_block(node)); - ir_node *sse_load; - ir_node *call = get_Proj_pred(node); - ir_node *new_call = transform_node(env, call); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_node *call = get_Proj_pred(node); + ir_node *new_call = transform_node(env, call); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + long proj = get_Proj_proj(node); + ir_mode *mode = get_irn_mode(node); + ir_node *sse_load; const arch_register_class_t *cls; /* The following is kinda tricky: If we're using SSE, then we have to @@ -3536,26 +3558,25 @@ static ir_node *gen_Proj_be_Call(ia32_transform_env_t *env, ir_node *node) { * after the call, we have to make sure to correctly make the * MemProj and the result Proj use these 2 nodes */ - if(proj == pn_be_Call_M_regular) { + if (proj == pn_be_Call_M_regular) { // get new node for result, are we doing the sse load/store hack? ir_node *call_res = be_get_Proj_for_pn(call, pn_be_Call_first_res); ir_node *call_res_new; ir_node *call_res_pred = NULL; - if(call_res != NULL) { - call_res_new = transform_node(env, call_res); + if (call_res != NULL) { + call_res_new = transform_node(env, call_res); call_res_pred = get_Proj_pred(call_res_new); } - if(call_res_pred == NULL || be_is_Call(call_res_pred)) { - return new_rd_Proj(dbg, irg, block, new_call, mode_M, pn_be_Call_M_regular); + if (call_res_pred == NULL || be_is_Call(call_res_pred)) { + return new_rd_Proj(dbgi, irg, block, new_call, mode_M, pn_be_Call_M_regular); } else { assert(is_ia32_xLoad(call_res_pred)); - return new_rd_Proj(dbg, irg, block, call_res_pred, mode_M, pn_ia32_xLoad_M); + return new_rd_Proj(dbgi, irg, block, call_res_pred, mode_M, pn_ia32_xLoad_M); } } - if(proj == pn_be_Call_first_res && mode_is_float(mode) - && USE_SSE2(env->cg)) { + if (proj == pn_be_Call_first_res && mode_is_float(mode) && USE_SSE2(env->cg)) { ir_node *fstp; ir_node *frame = get_irg_frame(irg); ir_node *noreg = ia32_new_NoReg_gp(env->cg); @@ -3565,10 +3586,10 @@ static ir_node *gen_Proj_be_Call(ia32_transform_env_t *env, ir_node *node) { const arch_register_class_t *cls; /* in case there is no memory output: create one to serialize the copy FPU -> SSE */ - call_mem = new_rd_Proj(dbg, irg, block, new_call, mode_M, pn_be_Call_M_regular); + call_mem = new_rd_Proj(dbgi, irg, block, new_call, mode_M, pn_be_Call_M_regular); /* store st(0) onto stack */ - fstp = new_rd_ia32_GetST0(dbg, irg, block, frame, noreg, call_mem); + fstp = new_rd_ia32_GetST0(dbgi, irg, block, frame, noreg, call_mem); set_ia32_ls_mode(fstp, mode); set_ia32_op_type(fstp, ia32_AddrModeD); @@ -3577,15 +3598,14 @@ static ir_node *gen_Proj_be_Call(ia32_transform_env_t *env, ir_node *node) { set_ia32_am_support(fstp, ia32_am_Dest); /* load into SSE register */ - sse_load = new_rd_ia32_xLoad(dbg, irg, block, frame, noreg, fstp); + sse_load = new_rd_ia32_xLoad(dbgi, irg, block, frame, noreg, fstp); set_ia32_ls_mode(sse_load, mode); set_ia32_op_type(sse_load, ia32_AddrModeS); set_ia32_use_frame(sse_load); set_ia32_am_flavour(sse_load, ia32_am_B); set_ia32_am_support(sse_load, ia32_am_Source); - //mproj = new_rd_Proj(dbg, irg, block, sse_load, mode_M, pn_ia32_xLoad_M); - sse_load = new_rd_Proj(dbg, irg, block, sse_load, mode_E, pn_ia32_xLoad_res); + sse_load = new_rd_Proj(dbgi, irg, block, sse_load, mode_xmm, pn_ia32_xLoad_res); /* now: create new Keep whith all former ins and one additional in - the result Proj */ @@ -3605,58 +3625,65 @@ static ir_node *gen_Proj_be_Call(ia32_transform_env_t *env, ir_node *node) { return sse_load; } - /* transform call modes to the mode_Iu or mode_E */ - if(mode != mode_M) { + /* transform call modes */ + if (mode_is_data(mode)) { cls = arch_get_irn_reg_class(env->cg->arch_env, node, -1); mode = cls->mode; } - return new_rd_Proj(dbg, irg, block, new_call, mode, proj); + return new_rd_Proj(dbgi, irg, block, new_call, mode, proj); } static ir_node *gen_Proj(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_node *pred = get_Proj_pred(node); - long proj = get_Proj_proj(node); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *pred = get_Proj_pred(node); + long proj = get_Proj_proj(node); - if(is_Store(pred) || be_is_FrameStore(pred)) { - if(proj == pn_Store_M) { + if (is_Store(pred) || be_is_FrameStore(pred)) { + if (proj == pn_Store_M) { return transform_node(env, pred); } else { assert(0); return new_r_Bad(irg); } - } else if(is_Load(pred) || be_is_FrameLoad(pred)) { + } else if (is_Load(pred) || be_is_FrameLoad(pred)) { return gen_Proj_Load(env, node); - } else if(is_Div(pred) || is_Mod(pred) || is_DivMod(pred)) { + } else if (is_Div(pred) || is_Mod(pred) || is_DivMod(pred)) { return gen_Proj_DivMod(env, node); - } else if(is_CopyB(pred)) { + } else if (is_CopyB(pred)) { return gen_Proj_CopyB(env, node); - } else if(is_Quot(pred)) { + } else if (is_Quot(pred)) { return gen_Proj_Quot(env, node); - } else if(is_ia32_l_vfdiv(pred)) { + } else if (is_ia32_l_vfdiv(pred)) { return gen_Proj_l_vfdiv(env, node); - } else if(be_is_SubSP(pred)) { + } else if (be_is_SubSP(pred)) { return gen_Proj_be_SubSP(env, node); - } else if(be_is_AddSP(pred)) { + } else if (be_is_AddSP(pred)) { return gen_Proj_be_AddSP(env, node); - } else if(be_is_Call(pred)) { + } else if (be_is_Call(pred)) { return gen_Proj_be_Call(env, node); - } else if(get_irn_op(pred) == op_Start) { - if(proj == pn_Start_X_initial_exec) { + } else if (get_irn_op(pred) == op_Start) { + if (proj == pn_Start_X_initial_exec) { ir_node *block = get_nodes_block(pred); ir_node *jump; + /* we exchange the ProjX with a jump */ block = transform_node(env, block); - // we exchange the ProjX with a jump - jump = new_rd_Jmp(dbg, irg, block); + jump = new_rd_Jmp(dbgi, irg, block); ir_fprintf(stderr, "created jump: %+F\n", jump); return jump; } - if(node == env->old_anchors[anchor_tls]) { + if (node == env->old_anchors[anchor_tls]) { return gen_Proj_tls(env, node); } + } else { + ir_node *new_pred = transform_node(env, pred); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_mode *mode = get_irn_mode(node); + if (mode_needs_gp_reg(mode)) { + return new_r_Proj(irg, block, new_pred, mode_Iu, get_Proj_proj(node)); + } } return duplicate_node(env, node); @@ -3731,7 +3758,7 @@ static void register_transformers(void) { GEN(ia32_l_vfsub); GEN(ia32_l_vfild); GEN(ia32_l_Load); - GEN(ia32_l_vfist); + /* GEN(ia32_l_vfist); TODO */ GEN(ia32_l_Store); GEN(ia32_l_X87toSSE); GEN(ia32_l_SSEtoX87); @@ -3787,8 +3814,8 @@ static void duplicate_deps(ia32_transform_env_t *env, ir_node *old_node, int i; int deps = get_irn_deps(old_node); - for(i = 0; i < deps; ++i) { - ir_node *dep = get_irn_dep(old_node, i); + for (i = 0; i < deps; ++i) { + ir_node *dep = get_irn_dep(old_node, i); ir_node *new_dep = transform_node(env, dep); add_irn_dep(new_node, new_dep); @@ -3797,46 +3824,50 @@ static void duplicate_deps(ia32_transform_env_t *env, ir_node *old_node, static ir_node *duplicate_node(ia32_transform_env_t *env, ir_node *node) { - ir_graph *irg = env->irg; - dbg_info *dbg = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); - ir_op *op = get_irn_op(node); - ir_node *block; - ir_node *new_node; - int i, arity; - - block = transform_node(env, get_nodes_block(node)); + ir_node *block = transform_node(env, get_nodes_block(node)); + ir_graph *irg = env->irg; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); + ir_op *op = get_irn_op(node); + ir_node *new_node; + int i, arity; arity = get_irn_arity(node); - if(op->opar == oparity_dynamic) { - new_node = new_ir_node(dbg, irg, block, op, mode, -1, NULL); - for(i = 0; i < arity; ++i) { + if (op->opar == oparity_dynamic) { + new_node = new_ir_node(dbgi, irg, block, op, mode, -1, NULL); + for (i = 0; i < arity; ++i) { ir_node *in = get_irn_n(node, i); in = transform_node(env, in); add_irn_n(new_node, in); } } else { ir_node **ins = alloca(arity * sizeof(ins[0])); - for(i = 0; i < arity; ++i) { + for (i = 0; i < arity; ++i) { ir_node *in = get_irn_n(node, i); ins[i] = transform_node(env, in); } - new_node = new_ir_node(dbg, irg, block, op, mode, arity, ins); + new_node = new_ir_node(dbgi, irg, block, op, mode, arity, ins); } copy_node_attr(node, new_node); duplicate_deps(env, node, new_node); +#ifdef DEBUG_libfirm + new_node->node_nr = node->node_nr; +#endif + return new_node; } -static ir_node *transform_node(ia32_transform_env_t *env, ir_node *node) -{ +/** + * Calls transformation function for given node and marks it visited. + */ +static ir_node *transform_node(ia32_transform_env_t *env, ir_node *node) { ir_node *new_node; ir_op *op = get_irn_op(node); - if(irn_visited(node)) { + if (irn_visited(node)) { assert(get_new_node(node) != NULL); return get_new_node(node); } @@ -3852,7 +3883,7 @@ static ir_node *transform_node(ia32_transform_env_t *env, ir_node *node) } else { new_node = duplicate_node(env, node); } - //ir_fprintf(stderr, "%+F -> %+F\n", node, new_node); + DB((dbg, LEVEL_4, "%+F -> %+F\n", node, new_node)); set_new_node(node, new_node); mark_irn_visited(new_node); @@ -3860,21 +3891,24 @@ static ir_node *transform_node(ia32_transform_env_t *env, ir_node *node) return new_node; } -static void fix_loops(ia32_transform_env_t *env, ir_node *node) -{ +/** + * Rewire nodes which are potential loops (like Phis) to avoid endless loops. + */ +static void fix_loops(ia32_transform_env_t *env, ir_node *node) { int i, arity; - if(irn_visited(node)) + if (irn_visited(node)) return; + mark_irn_visited(node); assert(node_is_in_irgs_storage(env->irg, node)); - if(!is_Block(node)) { - ir_node *block = get_nodes_block(node); - ir_node *new_block = (ir_node*) get_irn_link(block); + if (! is_Block(node)) { + ir_node *block = get_nodes_block(node); + ir_node *new_block = (ir_node *)get_irn_link(block); - if(new_block != NULL) { + if (new_block != NULL) { set_nodes_block(node, new_block); block = new_block; } @@ -3883,26 +3917,26 @@ static void fix_loops(ia32_transform_env_t *env, ir_node *node) } arity = get_irn_arity(node); - for(i = 0; i < arity; ++i) { + for (i = 0; i < arity; ++i) { ir_node *in = get_irn_n(node, i); - ir_node *new = (ir_node*) get_irn_link(in); + ir_node *nw = (ir_node *)get_irn_link(in); - if(new != NULL && new != in) { - set_irn_n(node, i, new); - in = new; + if (nw != NULL && nw != in) { + set_irn_n(node, i, nw); + in = nw; } fix_loops(env, in); } arity = get_irn_deps(node); - for(i = 0; i < arity; ++i) { + for (i = 0; i < arity; ++i) { ir_node *in = get_irn_dep(node, i); - ir_node *new = (ir_node*) get_irn_link(in); + ir_node *nw = (ir_node *)get_irn_link(in); - if(new != NULL && new != in) { - set_irn_dep(node, i, new); - in = new; + if (nw != NULL && nw != in) { + set_irn_dep(node, i, nw); + in = nw; } fix_loops(env, in); @@ -3911,47 +3945,49 @@ static void fix_loops(ia32_transform_env_t *env, ir_node *node) static void pre_transform_node(ir_node **place, ia32_transform_env_t *env) { - if(*place == NULL) + if (*place == NULL) return; *place = transform_node(env, *place); } -static void transform_nodes(ia32_code_gen_t *cg) -{ - int i; +/** + * Transforms all nodes. Deletes the old obstack and creates a new one. + */ +static void transform_nodes(ia32_code_gen_t *cg) { + int i; ir_graph *irg = cg->irg; - ir_node *old_end; + ir_node *old_end; ia32_transform_env_t env; hook_dead_node_elim(irg, 1); inc_irg_visited(irg); - env.irg = irg; - env.cg = cg; - env.visited = get_irg_visited(irg); - env.worklist = new_pdeq(); + env.irg = irg; + env.cg = cg; + env.visited = get_irg_visited(irg); + env.worklist = new_pdeq(); env.old_anchors = alloca(anchor_max * sizeof(env.old_anchors[0])); - DEBUG_ONLY(env.mod = cg->mod); old_end = get_irg_end(irg); /* put all anchor nodes in the worklist */ - for(i = 0; i < anchor_max; ++i) { + for (i = 0; i < anchor_max; ++i) { ir_node *anchor = irg->anchors[i]; - if(anchor == NULL) + + if (anchor == NULL) continue; pdeq_putr(env.worklist, anchor); - // remember anchor + /* remember anchor */ env.old_anchors[i] = anchor; - // and set it to NULL to make sure we don't accidently use it + /* and set it to NULL to make sure we don't accidently use it */ irg->anchors[i] = NULL; } - // pre transform some anchors (so they are available in the other transform - // functions) + /* pre transform some anchors (so they are available in the other transform + * functions) */ set_irg_bad(irg, transform_node(&env, env.old_anchors[anchor_bad])); set_irg_no_mem(irg, transform_node(&env, env.old_anchors[anchor_no_mem])); set_irg_start_block(irg, transform_node(&env, env.old_anchors[anchor_start_block])); @@ -3966,16 +4002,17 @@ static void transform_nodes(ia32_code_gen_t *cg) pre_transform_node(&cg->noreg_xmm, &env); /* process worklist (this should transform all nodes in the graph) */ - while(!pdeq_empty(env.worklist)) { + while (! pdeq_empty(env.worklist)) { ir_node *node = pdeq_getl(env.worklist); transform_node(&env, node); } /* fix loops and set new anchors*/ inc_irg_visited(irg); - for(i = 0; i < anchor_max; ++i) { + for (i = 0; i < anchor_max; ++i) { ir_node *anchor = env.old_anchors[i]; - if(anchor == NULL) + + if (anchor == NULL) continue; anchor = get_irn_link(anchor); @@ -4065,7 +4102,7 @@ static void transform_psi_cond(ir_node *cond, ir_mode *mode, ia32_code_gen_t *cg ir_node *cmp = get_Proj_pred(in); ir_node *cmp_a = get_Cmp_left(cmp); ir_node *cmp_b = get_Cmp_right(cmp); - dbg_info *dbg = get_irn_dbg_info(cmp); + dbg_info *dbgi = get_irn_dbg_info(cmp); ir_graph *irg = get_irn_irg(cmp); ir_node *block = get_nodes_block(cmp); ir_node *noreg = ia32_new_NoReg_gp(cg); @@ -4080,25 +4117,22 @@ static void transform_psi_cond(ir_node *cond, ir_mode *mode, ia32_code_gen_t *cg ir_mode *m = get_irn_mode(cmp_a); /* SSE FPU */ if (! mode_is_float(m)) { - cmp_a = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_a, cmp_a, mode); - cmp_b = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_b, cmp_b, mode); - } - else if (m == mode_F) { + cmp_a = gen_sse_conv_int2float(cg, dbgi, irg, block, cmp_a, cmp_a, mode); + cmp_b = gen_sse_conv_int2float(cg, dbgi, irg, block, cmp_b, cmp_b, mode); + } else if (m == mode_F) { /* we convert cmp values always to double, to get correct bitmask with cmpsd */ - cmp_a = gen_sse_conv_f2d(cg, dbg, irg, block, cmp_a, cmp_a); - cmp_b = gen_sse_conv_f2d(cg, dbg, irg, block, cmp_b, cmp_b); + cmp_a = gen_sse_conv_f2d(cg, dbgi, irg, block, cmp_a, cmp_a); + cmp_b = gen_sse_conv_f2d(cg, dbgi, irg, block, cmp_b, cmp_b); } - new_op = new_rd_ia32_xCmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); + new_op = new_rd_ia32_xCmp(dbgi, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); set_ia32_pncode(new_op, pnc); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, cmp)); - } - else { + } else { /* x87 FPU */ assert(0); } - } - else { + } else { /* integer Psi */ construct_binop_func *set_func = NULL; @@ -4109,21 +4143,19 @@ static void transform_psi_cond(ir_node *cond, ir_mode *mode, ia32_code_gen_t *cg if (USE_SSE2(cg)) { /* SSE FPU */ set_func = new_rd_ia32_xCmpSet; - } - else { + } else { /* x87 FPU */ set_func = new_rd_ia32_vfCmpSet; } pnc &= 7; /* fp compare -> int compare */ - } - else { + } else { /* 2nd case: compare operand are integer too */ set_func = new_rd_ia32_CmpSet; } - new_op = set_func(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); - if(!mode_is_signed(mode)) + new_op = set_func(dbgi, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); + if (! mode_is_signed(mode)) pnc |= ia32_pn_Cmp_Unsigned; set_ia32_pncode(new_op, pnc); @@ -4132,8 +4164,7 @@ static void transform_psi_cond(ir_node *cond, ir_mode *mode, ia32_code_gen_t *cg /* the the new compare as in */ set_irn_n(cond, i, new_op); - } - else { + } else { /* another complex condition */ transform_psi_cond(in, mode, cg); } @@ -4142,8 +4173,8 @@ static void transform_psi_cond(ir_node *cond, ir_mode *mode, ia32_code_gen_t *cg /** * The Psi selector can be a tree of compares combined with "And"s and "Or"s. - * We create a Set node, respectively a xCmp in case the Psi is a float, for each - * compare, which causes the compare result to be stores in a register. The + * We create a Set node, respectively a xCmp in case the Psi is a float, for + * each compare, which causes the compare result to be stored in a register. The * "And"s and "Or"s are transformed later, we just have to set their mode right. */ void ia32_transform_psi_cond_tree(ir_node *node, void *env) { @@ -4159,11 +4190,13 @@ void ia32_transform_psi_cond_tree(ir_node *node, void *env) { psi_sel = get_Psi_cond(node, 0); /* if psi_cond is a cmp: do nothing, this case is covered by gen_Psi */ - if (is_Proj(psi_sel)) + if (is_Proj(psi_sel)) { + assert(is_Cmp(get_Proj_pred(psi_sel))); return; + } //mode = get_irn_mode(node); - // TODO this is probably wrong... + // TODO probably wrong... mode = mode_Iu; transform_psi_cond(psi_sel, mode, cg); @@ -4172,17 +4205,24 @@ void ia32_transform_psi_cond_tree(ir_node *node, void *env) { block = get_nodes_block(node); /* we need to compare the evaluated condition tree with 0 */ - mode = get_irn_mode(node); + mode = get_irn_mode(node); if (mode_is_float(mode)) { - psi_sel = gen_sse_conv_int2float(cg, NULL, irg, block, psi_sel, NULL, mode); /* BEWARE: new_r_Const_long works for floating point as well */ - new_cmp = new_r_Cmp(irg, block, psi_sel, new_r_Const_long(irg, block, mode, 0)); + ir_node *zero = new_r_Const_long(irg, block, mode, 0); + + psi_sel = gen_sse_conv_int2float(cg, NULL, irg, block, psi_sel, NULL, mode); + new_cmp = new_r_Cmp(irg, block, psi_sel, zero); new_cmp = new_r_Proj(irg, block, new_cmp, mode_b, pn_Cmp_Ne); - } - else { - new_cmp = new_r_Cmp(irg, block, psi_sel, new_r_Const_long(irg, block, mode_Iu, 0)); + } else { + ir_node *zero = new_r_Const_long(irg, block, mode_Iu, 0); + new_cmp = new_r_Cmp(irg, block, psi_sel, zero); new_cmp = new_r_Proj(irg, block, new_cmp, mode_b, pn_Cmp_Gt | pn_Cmp_Lt); } set_Psi_cond(node, 0, new_cmp); } + +void ia32_init_transform(void) +{ + FIRM_DBG_REGISTER(dbg, "firm.be.ia32.transform"); +}