X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fia32_transform.c;h=9f789d9f20ff773c3a909b7eb0ad57d58bccb129;hb=09480efeccb17e980766b3ca092bb0f6ebc0a44f;hp=7e571d189409613a9d35fb51aafe9a9073b5964f;hpb=06588d996f1597c6098b9715ba7fc81afd975e9b;p=libfirm diff --git a/ir/be/ia32/ia32_transform.c b/ir/be/ia32/ia32_transform.c index 7e571d189..9f789d9f2 100644 --- a/ir/be/ia32/ia32_transform.c +++ b/ir/be/ia32/ia32_transform.c @@ -1,7 +1,6 @@ /** - * This file implements the IR transformation from firm into - * ia32-Firm. - * + * This file implements the IR transformation from firm into ia32-Firm. + * @author Christian Wuerdig * $Id$ */ @@ -25,18 +24,20 @@ #include "dbginfo.h" #include "irprintf.h" #include "debug.h" +#include "irdom.h" +#include "archop.h" /* we need this for Min and Max nodes */ #include "../benode_t.h" #include "../besched.h" +#include "../beabi.h" #include "bearch_ia32_t.h" - #include "ia32_nodes_attr.h" -#include "../arch/archop.h" /* we need this for Min and Max nodes */ #include "ia32_transform.h" #include "ia32_new_nodes.h" #include "ia32_map_regs.h" #include "ia32_dbg_stat.h" +#include "ia32_optimize.h" #include "gen_ia32_regalloc_if.h" @@ -77,6 +78,37 @@ typedef enum { * ****************************************************************************************************/ +/** + * Returns 1 if irn is a Const representing 0, 0 otherwise + */ +static INLINE int is_ia32_Const_0(ir_node *irn) { + return is_ia32_Const(irn) ? classify_tarval(get_ia32_Immop_tarval(irn)) == TV_CLASSIFY_NULL : 0; +} + +/** + * Returns 1 if irn is a Const representing 1, 0 otherwise + */ +static INLINE int is_ia32_Const_1(ir_node *irn) { + return is_ia32_Const(irn) ? classify_tarval(get_ia32_Immop_tarval(irn)) == TV_CLASSIFY_ONE : 0; +} + +/** + * Returns the Proj representing the UNKNOWN register for given mode. + */ +static ir_node *be_get_unknown_for_mode(ia32_code_gen_t *cg, ir_mode *mode) { + be_abi_irg_t *babi = cg->birg->abi; + const arch_register_t *unknwn_reg = NULL; + + if (mode_is_float(mode)) { + unknwn_reg = USE_SSE2(cg) ? &ia32_xmm_regs[REG_XMM_UKNWN] : &ia32_vfp_regs[REG_VFP_UKNWN]; + } + else { + unknwn_reg = &ia32_gp_regs[REG_GP_UKNWN]; + } + + return be_abi_get_callee_save_irn(babi, unknwn_reg); +} + /** * Gets the Proj with number pn from irn. */ @@ -95,6 +127,24 @@ static ir_node *get_proj_for_pn(const ir_node *irn, long pn) { return NULL; } +/** + * SSE convert of an integer node into a floating point node. + */ +static ir_node *gen_sse_conv_int2float(ia32_code_gen_t *cg, dbg_info *dbg, ir_graph *irg, ir_node *block, + ir_node *in, ir_node *old_node, ir_mode *tgt_mode) +{ + ir_node *noreg = ia32_new_NoReg_gp(cg); + ir_node *nomem = new_rd_NoMem(irg); + + ir_node *conv = new_rd_ia32_Conv_I2FP(dbg, irg, block, noreg, noreg, in, nomem); + set_ia32_src_mode(conv, get_irn_mode(in)); + set_ia32_tgt_mode(conv, tgt_mode); + set_ia32_am_support(conv, ia32_am_Source); + SET_IA32_ORIG_NODE(conv, ia32_get_old_node_name(cg, old_node)); + + return new_rd_Proj(dbg, irg, block, conv, tgt_mode, pn_ia32_Conv_I2FP_res); +} + /* Generates an entity for a known FP const (used for FP Neg + Abs) */ static ident *gen_fp_known_const(ir_mode *mode, ia32_known_const_t kct) { static const struct { @@ -196,7 +246,11 @@ static ir_node *gen_binop(ia32_transform_env_t *env, ir_node *op1, ir_node *op2, /* Check if immediate optimization is on and */ /* if it's an operation with immediate. */ - if (! (env->cg->opt & IA32_OPT_IMMOPS)) { + /* MulS and Mulh don't support immediates */ + if (! (env->cg->opt & IA32_OPT_IMMOPS) || + func == new_rd_ia32_Mulh || + func == new_rd_ia32_MulS) + { expr_op = op1; imm_op = NULL; } @@ -229,6 +283,7 @@ static ir_node *gen_binop(ia32_transform_env_t *env, ir_node *op1, ir_node *op2, new_op = func(dbg, irg, block, noreg_gp, noreg_gp, op1, op2, nomem); set_ia32_am_support(new_op, ia32_am_Source); } + set_ia32_ls_mode(new_op, mode); } else { /* integer operations */ @@ -305,6 +360,7 @@ static ir_node *gen_shift_binop(ia32_transform_env_t *env, ir_node *op1, ir_node if (tv) { tv = tarval_mod(tv, new_tarval_from_long(32, mode_Iu)); + set_ia32_Immop_tarval(imm_op, tv); } else { imm_op = NULL; @@ -471,11 +527,15 @@ static ir_node *gen_Add(ia32_transform_env_t *env) { new_op = new_rd_ia32_Lea(dbg, irg, block, op1, noreg, mode); set_ia32_am_sc(new_op, get_ia32_id_cnst(op2)); set_ia32_am_flavour(new_op, ia32_am_OB); + + DBG_OPT_LEA1(op2, new_op); } else { /* this is the 1st case */ new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode); + DBG_OPT_LEA2(op1, op2, new_op); + if (get_ia32_op_type(op1) == ia32_SymConst) { set_ia32_am_sc(new_op, get_ia32_id_cnst(op1)); add_ia32_am_offs(new_op, get_ia32_cnst(op2)); @@ -515,7 +575,7 @@ static ir_node *gen_Add(ia32_transform_env_t *env) { set_ia32_res_mode(new_op, mode); - return new_rd_Proj(dbg, irg, block, new_op, mode, 0); + return new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_Add_res); } @@ -791,11 +851,15 @@ static ir_node *gen_Sub(ia32_transform_env_t *env) { set_ia32_am_sc(new_op, get_ia32_id_cnst(op2)); set_ia32_am_sc_sign(new_op); set_ia32_am_flavour(new_op, ia32_am_OB); + + DBG_OPT_LEA1(op2, new_op); } else { /* this is the 1st case */ new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode); + DBG_OPT_LEA2(op1, op2, new_op); + if (get_ia32_op_type(op1) == ia32_SymConst) { set_ia32_am_sc(new_op, get_ia32_id_cnst(op1)); sub_ia32_am_offs(new_op, get_ia32_cnst(op2)); @@ -835,7 +899,7 @@ static ir_node *gen_Sub(ia32_transform_env_t *env) { set_ia32_res_mode(new_op, mode); - return new_rd_Proj(dbg, irg, block, new_op, mode, 0); + return new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_Sub_res); } @@ -903,11 +967,11 @@ static ir_node *generate_DivMod(ia32_transform_env_t *env, ir_node *dividend, ir if (get_irn_op(irn) == op_Div) { set_Proj_proj(proj, pn_DivMod_res_div); - in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode_Is, pn_DivMod_res_mod); + in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode, pn_DivMod_res_mod); } else { set_Proj_proj(proj, pn_DivMod_res_mod); - in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode_Is, pn_DivMod_res_div); + in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode, pn_DivMod_res_div); } be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep); @@ -915,7 +979,7 @@ static ir_node *generate_DivMod(ia32_transform_env_t *env, ir_node *dividend, ir SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn)); - set_ia32_res_mode(res, mode_Is); + set_ia32_res_mode(res, mode); return res; } @@ -1241,12 +1305,12 @@ static ir_node *gen_Abs(ia32_transform_env_t *env) { * @return the created ia32 Load node */ static ir_node *gen_Load(ia32_transform_env_t *env) { - ir_node *node = env->irn; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *ptr = get_Load_ptr(node); - ir_node *lptr = ptr; - ir_mode *mode = get_Load_mode(node); - int is_imm = 0; + ir_node *node = env->irn; + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *ptr = get_Load_ptr(node); + ir_node *lptr = ptr; + ir_mode *mode = get_Load_mode(node); + int is_imm = 0; ir_node *new_op; ia32_am_flavour_t am_flav = ia32_B; @@ -1284,6 +1348,16 @@ static ir_node *gen_Load(ia32_transform_env_t *env) { set_ia32_am_flavour(new_op, am_flav); set_ia32_ls_mode(new_op, mode); + /* + check for special case: the loaded value might not be used (optimized, volatile, ...) + we add a Proj + Keep for volatile loads and ignore all other cases + */ + if (! get_proj_for_pn(node, pn_Load_res) && get_Load_volatility(node) == volatility_is_volatile) { + /* add a result proj and a Keep to produce a pseudo use */ + ir_node *proj = new_r_Proj(env->irg, env->block, new_op, mode, pn_ia32_Load_res); + be_new_Keep(arch_get_irn_reg_class(env->cg->arch_env, proj, -1), env->irg, env->block, 1, &proj); + } + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); return new_op; @@ -1304,7 +1378,7 @@ static ir_node *gen_Store(ia32_transform_env_t *env) { ir_node *ptr = get_Store_ptr(node); ir_node *sptr = ptr; ir_node *mem = get_Store_mem(node); - ir_mode *mode = get_irn_mode(val); + ir_mode *mode = get_irn_link(node); ir_node *sval = val; int is_imm = 0; ir_node *new_op; @@ -1368,7 +1442,7 @@ static ir_node *gen_Store(ia32_transform_env_t *env) { set_ia32_am_support(new_op, ia32_am_Dest); set_ia32_op_type(new_op, ia32_AddrModeD); set_ia32_am_flavour(new_op, am_flav); - set_ia32_ls_mode(new_op, get_irn_mode(val)); + set_ia32_ls_mode(new_op, mode); set_ia32_immop_type(new_op, immop); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); @@ -1413,7 +1487,9 @@ static ir_node *gen_Cond(ia32_transform_env_t *env) { pn_Cmp pnc = get_Proj_proj(sel); if ((pnc == pn_Cmp_Eq || pnc == pn_Cmp_Lg) && mode_is_int(get_irn_mode(expr))) { - if (classify_tarval(get_ia32_Immop_tarval(cnst)) == TV_CLASSIFY_NULL) { + if (get_ia32_op_type(cnst) == ia32_Const && + classify_tarval(get_ia32_Immop_tarval(cnst)) == TV_CLASSIFY_NULL) + { /* a Cmp A =/!= 0 */ ir_node *op1 = expr; ir_node *op2 = expr; @@ -1561,12 +1637,176 @@ static ir_node *gen_CopyB(ia32_transform_env_t *env) { * @return The transformed node. */ static ir_node *gen_Mux(ia32_transform_env_t *env) { +#if 0 ir_node *node = env->irn; ir_node *new_op = new_rd_ia32_CMov(env->dbg, env->irg, env->block, \ get_Mux_sel(node), get_Mux_false(node), get_Mux_true(node), env->mode); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + return new_op; +#endif + return NULL; +} + +typedef ir_node *cmov_func_t(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *cmp_a, ir_node *cmp_b, \ + ir_node *psi_true, ir_node *psi_default, ir_mode *mode); + +/** + * Transforms a Psi node into CMov. + * + * @param env The transformation environment + * @return The transformed node. + */ +static ir_node *gen_Psi(ia32_transform_env_t *env) { + ia32_code_gen_t *cg = env->cg; + dbg_info *dbg = env->dbg; + ir_graph *irg = env->irg; + ir_mode *mode = env->mode; + ir_node *block = env->block; + ir_node *node = env->irn; + ir_node *cmp_proj = get_Mux_sel(node); + ir_node *psi_true = get_Psi_val(node, 0); + ir_node *psi_default = get_Psi_default(node); + ir_node *noreg = ia32_new_NoReg_gp(cg); + ir_node *nomem = new_rd_NoMem(irg); + ir_node *cmp, *cmp_a, *cmp_b, *and1, *and2, *new_op = NULL; + int pnc; + + assert(get_irn_mode(cmp_proj) == mode_b && "Condition for Psi must have mode_b"); + + cmp = get_Proj_pred(cmp_proj); + cmp_a = get_Cmp_left(cmp); + cmp_b = get_Cmp_right(cmp); + pnc = get_Proj_proj(cmp_proj); + + if (mode_is_float(mode)) { + /* floating point psi */ + FP_USED(cg); + + /* 1st case: compare operands are float too */ + if (USE_SSE2(cg)) { + /* psi(cmp(a, b), t, f) can be done as: */ + /* tmp = cmp a, b */ + /* tmp2 = t and tmp */ + /* tmp3 = f and not tmp */ + /* res = tmp2 or tmp3 */ + + /* in case the compare operands are int, we move them into xmm register */ + if (! mode_is_float(get_irn_mode(cmp_a))) { + cmp_a = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_a, node, mode_D); + cmp_b = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_b, node, mode_D); + + pnc |= 8; /* transform integer compare to fp compare */ + } + + new_op = new_rd_ia32_xCmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); + set_ia32_pncode(new_op, pnc); + set_ia32_am_support(new_op, ia32_am_Source); + set_ia32_res_mode(new_op, mode); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node)); + new_op = new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_xCmp_res); + + and1 = new_rd_ia32_xAnd(dbg, irg, block, noreg, noreg, psi_true, new_op, nomem); + set_ia32_am_support(and1, ia32_am_Source); + set_ia32_res_mode(and1, mode); + SET_IA32_ORIG_NODE(and1, ia32_get_old_node_name(cg, node)); + and1 = new_rd_Proj(dbg, irg, block, and1, mode, pn_ia32_xAnd_res); + + and2 = new_rd_ia32_xAndNot(dbg, irg, block, noreg, noreg, new_op, psi_default, nomem); + set_ia32_am_support(and2, ia32_am_Source); + set_ia32_res_mode(and2, mode); + SET_IA32_ORIG_NODE(and2, ia32_get_old_node_name(cg, node)); + and2 = new_rd_Proj(dbg, irg, block, and2, mode, pn_ia32_xAndNot_res); + + new_op = new_rd_ia32_xOr(dbg, irg, block, noreg, noreg, and1, and2, nomem); + set_ia32_am_support(new_op, ia32_am_Source); + set_ia32_res_mode(new_op, mode); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node)); + new_op = new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_xOr_res); + } + else { + /* x87 FPU */ + new_op = new_rd_ia32_vfCMov(dbg, irg, block, cmp_a, cmp_b, psi_true, psi_default, mode); + set_ia32_pncode(new_op, pnc); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); + } + } + else { + /* integer psi */ + construct_binop_func *set_func = NULL; + cmov_func_t *cmov_func = NULL; + + if (mode_is_float(get_irn_mode(cmp_a))) { + /* 1st case: compare operands are floats */ + FP_USED(cg); + + if (USE_SSE2(cg)) { + /* SSE FPU */ + set_func = new_rd_ia32_xCmpSet; + cmov_func = new_rd_ia32_xCmpCMov; + } + else { + /* x87 FPU */ + set_func = new_rd_ia32_vfCmpSet; + cmov_func = new_rd_ia32_vfCmpCMov; + } + + pnc &= 7; /* fp compare -> int compare */ + } + else { + /* 2nd case: compare operand are integer too */ + set_func = new_rd_ia32_CmpSet; + cmov_func = new_rd_ia32_CmpCMov; + } + + /* create the nodes */ + + /* check for special case first: And/Or -- Cmp with 0 -- Psi */ + if (is_ia32_Const_0(cmp_b) && is_Proj(cmp_a) && (is_ia32_And(get_Proj_pred(cmp_a)) || is_ia32_Or(get_Proj_pred(cmp_a)))) { + if (is_ia32_Const_1(psi_true) && is_ia32_Const_0(psi_default)) { + /* first case for SETcc: default is 0, set to 1 iff condition is true */ + new_op = new_rd_ia32_PsiCondSet(dbg, irg, block, cmp_a, mode); + set_ia32_pncode(new_op, pnc); + } + else if (is_ia32_Const_0(psi_true) && is_ia32_Const_1(psi_default)) { + /* second case for SETcc: default is 1, set to 0 iff condition is true: */ + /* we invert condition and set default to 0 */ + new_op = new_rd_ia32_PsiCondSet(dbg, irg, block, cmp_a, mode); + set_ia32_pncode(new_op, get_inversed_pnc(pnc)); + } + else { + /* otherwise: use CMOVcc */ + new_op = new_rd_ia32_PsiCondCMov(dbg, irg, block, cmp_a, psi_true, psi_default, mode); + set_ia32_pncode(new_op, pnc); + } + + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node)); + } + else { + env->irn = cmp; + if (is_ia32_Const_1(psi_true) && is_ia32_Const_0(psi_default)) { + /* first case for SETcc: default is 0, set to 1 iff condition is true */ + new_op = gen_binop(env, cmp_a, cmp_b, set_func); + set_ia32_pncode(get_Proj_pred(new_op), pnc); + set_ia32_am_support(get_Proj_pred(new_op), ia32_am_Source); + } + else if (is_ia32_Const_0(psi_true) && is_ia32_Const_1(psi_default)) { + /* second case for SETcc: default is 1, set to 0 iff condition is true: */ + /* we invert condition and set default to 0 */ + new_op = gen_binop(env, cmp_a, cmp_b, set_func); + set_ia32_pncode(get_Proj_pred(new_op), get_inversed_pnc(pnc)); + set_ia32_am_support(get_Proj_pred(new_op), ia32_am_Source); + } + else { + /* otherwise: use CMOVcc */ + new_op = cmov_func(dbg, irg, block, cmp_a, cmp_b, psi_true, psi_default, mode); + set_ia32_pncode(new_op, pnc); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node)); + } + } + } + return new_op; } @@ -1627,7 +1867,7 @@ static ir_node *gen_x87_fp_to_gp(ia32_transform_env_t *env, ir_mode *tgt_mode) { set_ia32_am_support(fist, ia32_am_Dest); set_ia32_op_type(fist, ia32_AddrModeD); set_ia32_am_flavour(fist, ia32_B); - set_ia32_ls_mode(fist, mode_E); + set_ia32_ls_mode(fist, mode_F); mem = new_r_Proj(irg, block, fist, mode_M, pn_ia32_vfist_M); @@ -1696,9 +1936,9 @@ static ir_node *gen_x87_gp_to_fp(ia32_transform_env_t *env, ir_mode *src_mode) { set_ia32_am_support(fild, ia32_am_Source); set_ia32_op_type(fild, ia32_AddrModeS); set_ia32_am_flavour(fild, ia32_B); - set_ia32_ls_mode(fild, mode_E); + set_ia32_ls_mode(fild, mode_F); - return new_r_Proj(irg, block, fild, mode_E, 0); + return new_r_Proj(irg, block, fild, mode_F, 0); } /** @@ -1708,18 +1948,19 @@ static ir_node *gen_x87_gp_to_fp(ia32_transform_env_t *env, ir_mode *src_mode) { * @return The created ia32 Conv node */ static ir_node *gen_Conv(ia32_transform_env_t *env) { - dbg_info *dbg = env->dbg; - ir_graph *irg = env->irg; - ir_node *op = get_Conv_op(env->irn); - ir_mode *src_mode = get_irn_mode(op); - ir_mode *tgt_mode = env->mode; - int src_bits = get_mode_size_bits(src_mode); - int tgt_bits = get_mode_size_bits(tgt_mode); - ir_node *block = env->block; - ir_node *new_op = NULL; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *nomem = new_rd_NoMem(irg); - ir_node *proj; + dbg_info *dbg = env->dbg; + ir_graph *irg = env->irg; + ir_node *op = get_Conv_op(env->irn); + ir_mode *src_mode = get_irn_mode(op); + ir_mode *tgt_mode = env->mode; + int src_bits = get_mode_size_bits(src_mode); + int tgt_bits = get_mode_size_bits(tgt_mode); + int pn = -1; + ir_node *block = env->block; + ir_node *new_op = NULL; + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *nomem = new_rd_NoMem(irg); + ir_node *proj; DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;) if (src_mode == tgt_mode) { @@ -1734,6 +1975,7 @@ static ir_node *gen_Conv(ia32_transform_env_t *env) { if (USE_SSE2(env->cg)) { DB((mod, LEVEL_1, "create Conv(float, float) ...")); new_op = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, op, nomem); + pn = pn_ia32_Conv_FP2FP_res; } else { DB((mod, LEVEL_1, "killed Conv(float, float) ...")); @@ -1743,8 +1985,10 @@ static ir_node *gen_Conv(ia32_transform_env_t *env) { else { /* ... to int */ DB((mod, LEVEL_1, "create Conv(float, int) ...")); - if (USE_SSE2(env->cg)) + if (USE_SSE2(env->cg)) { new_op = new_rd_ia32_Conv_FP2I(dbg, irg, block, noreg, noreg, op, nomem); + pn = pn_ia32_Conv_FP2I_res; + } else return gen_x87_fp_to_gp(env, tgt_mode); @@ -1755,13 +1999,15 @@ static ir_node *gen_Conv(ia32_transform_env_t *env) { set_ia32_tgt_mode(new_op, tgt_mode); set_ia32_src_mode(new_op, src_mode); - proj = new_rd_Proj(dbg, irg, block, new_op, mode_Is, 0); + proj = new_rd_Proj(dbg, irg, block, new_op, mode_Is, pn_ia32_Conv_FP2I_res); if (tgt_bits == 8 || src_bits == 8) { new_op = new_rd_ia32_Conv_I2I8Bit(dbg, irg, block, noreg, noreg, proj, nomem); + pn = pn_ia32_Conv_I2I8Bit_res; } else { new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, proj, nomem); + pn = pn_ia32_Conv_I2I_res; } } } @@ -1772,8 +2018,10 @@ static ir_node *gen_Conv(ia32_transform_env_t *env) { FP_USED(env->cg); /* ... to float */ DB((mod, LEVEL_1, "create Conv(int, float) ...")); - if (USE_SSE2(env->cg)) + if (USE_SSE2(env->cg)) { new_op = new_rd_ia32_Conv_I2FP(dbg, irg, block, noreg, noreg, op, nomem); + pn = pn_ia32_Conv_I2FP_res; + } else return gen_x87_gp_to_fp(env, src_mode); } @@ -1787,9 +2035,11 @@ static ir_node *gen_Conv(ia32_transform_env_t *env) { DB((mod, LEVEL_1, "create Conv(int, int) ...", src_mode, tgt_mode)); if (tgt_bits == 8 || src_bits == 8) { new_op = new_rd_ia32_Conv_I2I8Bit(dbg, irg, block, noreg, noreg, op, nomem); + pn = pn_ia32_Conv_I2I8Bit_res; } else { new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, op, nomem); + pn = pn_ia32_Conv_I2I_res; } } } @@ -1802,7 +2052,7 @@ static ir_node *gen_Conv(ia32_transform_env_t *env) { set_ia32_am_support(new_op, ia32_am_Source); - new_op = new_rd_Proj(dbg, irg, block, new_op, tgt_mode, 0); + new_op = new_rd_Proj(dbg, irg, block, new_op, tgt_mode, pn); } return new_op; @@ -1820,6 +2070,39 @@ static ir_node *gen_Conv(ia32_transform_env_t *env) { * ********************************************/ + /** + * Decides in which block the transformed StackParam should be placed. + * If the StackParam has more than one user, the dominator block of + * the users will be returned. In case of only one user, this is either + * the user block or, in case of a Phi, the predecessor block of the Phi. + */ + static ir_node *get_block_transformed_stack_param(ir_node *irn) { + ir_node *dom_bl = NULL; + + if (get_irn_n_edges(irn) == 1) { + ir_node *src = get_edge_src_irn(get_irn_out_edge_first(irn)); + + if (! is_Phi(src)) { + dom_bl = get_nodes_block(src); + } + else { + /* Determine on which in position of the Phi the irn is */ + /* and get the corresponding cfg predecessor block. */ + + int i = get_irn_pred_pos(src, irn); + assert(i >= 0 && "kaputt"); + dom_bl = get_Block_cfgpred_block(get_nodes_block(src), i); + } + } + else { + dom_bl = node_users_smallest_common_dominator(irn, 1); + } + + assert(dom_bl && "dominator block not found"); + + return dom_bl; + } + static ir_node *gen_be_StackParam(ia32_transform_env_t *env) { ir_node *new_op = NULL; ir_node *node = env->irn; @@ -1829,11 +2112,8 @@ static ir_node *gen_be_StackParam(ia32_transform_env_t *env) { entity *ent = be_get_frame_entity(node); ir_mode *mode = env->mode; -// /* If the StackParam has only one user -> */ -// /* put it in the Block where the user resides */ -// if (get_irn_n_edges(node) == 1) { -// env->block = get_nodes_block(get_edge_src_irn(get_irn_out_edge_first(node))); -// } + /* choose the block where to place the load */ + env->block = get_block_transformed_stack_param(node); if (mode_is_float(mode)) { FP_USED(env->cg); @@ -1982,6 +2262,352 @@ static ir_node *gen_Unknown(ia32_transform_env_t *env) { return NULL; } +/********************************************************************** + * _ _ _ + * | | | | | | + * | | _____ _____ _ __ ___ __| | _ __ ___ __| | ___ ___ + * | |/ _ \ \ /\ / / _ \ '__/ _ \/ _` | | '_ \ / _ \ / _` |/ _ \/ __| + * | | (_) \ V V / __/ | | __/ (_| | | | | | (_) | (_| | __/\__ \ + * |_|\___/ \_/\_/ \___|_| \___|\__,_| |_| |_|\___/ \__,_|\___||___/ + * + **********************************************************************/ + +/* These nodes are created in intrinsic lowering (64bit -> 32bit) */ + +typedef ir_node *construct_load_func(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, \ + ir_node *mem); + +typedef ir_node *construct_store_func(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, \ + ir_node *val, ir_node *mem); + +/** + * Transforms a lowered Load into a "real" one. + */ +static ir_node *gen_lowered_Load(ia32_transform_env_t *env, construct_load_func func, char fp_unit) { + ir_node *node = env->irn; + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_mode *mode = get_ia32_ls_mode(node); + ir_node *new_op; + char *am_offs; + ia32_am_flavour_t am_flav = ia32_B; + + /* + Could be that we have SSE2 unit, but due to 64Bit Div/Conv + lowering we have x87 nodes, so we need to enforce simulation. + */ + if (mode_is_float(mode)) { + FP_USED(env->cg); + if (fp_unit == fp_x87) + FORCE_x87(env->cg); + } + + new_op = func(env->dbg, env->irg, env->block, get_irn_n(node, 0), noreg, get_irn_n(node, 1)); + am_offs = get_ia32_am_offs(node); + + if (am_offs) { + am_flav |= ia32_O; + add_ia32_am_offs(new_op, am_offs); + } + + set_ia32_am_support(new_op, ia32_am_Source); + set_ia32_op_type(new_op, ia32_AddrModeS); + set_ia32_am_flavour(new_op, am_flav); + set_ia32_ls_mode(new_op, mode); + set_ia32_frame_ent(new_op, get_ia32_frame_ent(node)); + set_ia32_use_frame(new_op); + + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); + + return new_op; +} + +/** +* Transforms a lowered Store into a "real" one. +*/ +static ir_node *gen_lowered_Store(ia32_transform_env_t *env, construct_store_func func, char fp_unit) { + ir_node *node = env->irn; + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_mode *mode = get_ia32_ls_mode(node); + ir_node *new_op; + char *am_offs; + ia32_am_flavour_t am_flav = ia32_B; + + /* + Could be that we have SSE2 unit, but due to 64Bit Div/Conv + lowering we have x87 nodes, so we need to enforce simulation. + */ + if (mode_is_float(mode)) { + FP_USED(env->cg); + if (fp_unit == fp_x87) + FORCE_x87(env->cg); + } + + new_op = func(env->dbg, env->irg, env->block, get_irn_n(node, 0), noreg, get_irn_n(node, 1), get_irn_n(node, 2)); + + if (am_offs = get_ia32_am_offs(node)) { + am_flav |= ia32_O; + add_ia32_am_offs(new_op, am_offs); + } + + set_ia32_am_support(new_op, ia32_am_Dest); + set_ia32_op_type(new_op, ia32_AddrModeD); + set_ia32_am_flavour(new_op, am_flav); + set_ia32_ls_mode(new_op, mode); + set_ia32_frame_ent(new_op, get_ia32_frame_ent(node)); + set_ia32_use_frame(new_op); + + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, node)); + + return new_op; +} + + +/** + * Transforms an ia32_l_XXX into a "real" XXX node + * + * @param env The transformation environment + * @return the created ia32 XXX node + */ +#define GEN_LOWERED_OP(op) \ + static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \ + if (mode_is_float(env->mode)) \ + FP_USED(env->cg); \ + return gen_binop(env, get_binop_left(env->irn), get_binop_right(env->irn), new_rd_ia32_##op); \ + } + +#define GEN_LOWERED_x87_OP(op) \ + static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \ + ir_node *new_op; \ + FORCE_x87(env->cg); \ + new_op = gen_binop(env, get_binop_left(env->irn), get_binop_right(env->irn), new_rd_ia32_##op); \ + set_ia32_am_support(get_Proj_pred(new_op), ia32_am_None); \ + return new_op; \ + } + +#define GEN_LOWERED_UNOP(op) \ + static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \ + return gen_unop(env, get_unop_op(env->irn), new_rd_ia32_##op); \ + } + +#define GEN_LOWERED_SHIFT_OP(op) \ + static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \ + return gen_shift_binop(env, get_binop_left(env->irn), get_binop_right(env->irn), new_rd_ia32_##op); \ + } + +#define GEN_LOWERED_LOAD(op, fp_unit) \ + static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \ + return gen_lowered_Load(env, new_rd_ia32_##op, fp_unit); \ + } + +#define GEN_LOWERED_STORE(op, fp_unit) \ + static ir_node *gen_ia32_l_##op(ia32_transform_env_t *env) { \ + return gen_lowered_Store(env, new_rd_ia32_##op, fp_unit); \ +} + +GEN_LOWERED_OP(AddC) +GEN_LOWERED_OP(Add) +GEN_LOWERED_OP(SubC) +GEN_LOWERED_OP(Sub) +GEN_LOWERED_OP(Mul) +GEN_LOWERED_OP(Eor) +GEN_LOWERED_x87_OP(vfdiv) +GEN_LOWERED_x87_OP(vfmul) +GEN_LOWERED_x87_OP(vfsub) + +GEN_LOWERED_UNOP(Minus) + +GEN_LOWERED_LOAD(vfild, fp_x87) +GEN_LOWERED_LOAD(Load, fp_none) +GEN_LOWERED_STORE(vfist, fp_x87) +GEN_LOWERED_STORE(Store, fp_none) + +/** + * Transforms a l_MulS into a "real" MulS node. + * + * @param env The transformation environment + * @return the created ia32 MulS node + */ +static ir_node *gen_ia32_l_MulS(ia32_transform_env_t *env) { + + /* l_MulS is already a mode_T node, so we create the MulS in the normal way */ + /* and then skip the result Proj, because all needed Projs are already there. */ + + ir_node *new_op = gen_binop(env, get_binop_left(env->irn), get_binop_right(env->irn), new_rd_ia32_MulS); + ir_node *muls = get_Proj_pred(new_op); + + /* MulS cannot have AM for destination */ + if (get_ia32_am_support(muls) != ia32_am_None) + set_ia32_am_support(muls, ia32_am_Source); + + return muls; +} + +GEN_LOWERED_SHIFT_OP(Shl) +GEN_LOWERED_SHIFT_OP(Shr) +GEN_LOWERED_SHIFT_OP(Shrs) + +/** + * Transforms a l_ShlD/l_ShrD into a ShlD/ShrD. Those nodes have 3 data inputs: + * op1 - target to be shifted + * op2 - contains bits to be shifted into target + * op3 - shift count + * Only op3 can be an immediate. + */ +static ir_node *gen_lowered_64bit_shifts(ia32_transform_env_t *env, ir_node *op1, ir_node *op2, ir_node *count) { + ir_node *new_op = NULL; + ir_mode *mode = env->mode; + dbg_info *dbg = env->dbg; + ir_graph *irg = env->irg; + ir_node *block = env->block; + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *nomem = new_NoMem(); + ir_node *imm_op; + tarval *tv; + DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;) + + assert(! mode_is_float(mode) && "Shift/Rotate with float not supported"); + + /* Check if immediate optimization is on and */ + /* if it's an operation with immediate. */ + imm_op = (env->cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(NULL, count) : NULL; + + /* Limit imm_op within range imm8 */ + if (imm_op) { + tv = get_ia32_Immop_tarval(imm_op); + + if (tv) { + tv = tarval_mod(tv, new_tarval_from_long(32, mode_Iu)); + set_ia32_Immop_tarval(imm_op, tv); + } + else { + imm_op = NULL; + } + } + + /* integer operations */ + if (imm_op) { + /* This is ShiftD with const */ + DB((mod, LEVEL_1, "ShiftD with immediate ...")); + + if (is_ia32_l_ShlD(env->irn)) + new_op = new_rd_ia32_ShlD(dbg, irg, block, noreg, noreg, op1, op2, noreg, nomem); + else + new_op = new_rd_ia32_ShrD(dbg, irg, block, noreg, noreg, op1, op2, noreg, nomem); + set_ia32_Immop_attr(new_op, imm_op); + } + else { + /* This is a normal ShiftD */ + DB((mod, LEVEL_1, "ShiftD binop ...")); + if (is_ia32_l_ShlD(env->irn)) + new_op = new_rd_ia32_ShlD(dbg, irg, block, noreg, noreg, op1, op2, count, nomem); + else + new_op = new_rd_ia32_ShrD(dbg, irg, block, noreg, noreg, op1, op2, count, nomem); + } + + /* set AM support */ + set_ia32_am_support(new_op, ia32_am_Dest); + + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + + set_ia32_res_mode(new_op, mode); + set_ia32_emit_cl(new_op); + + return new_rd_Proj(dbg, irg, block, new_op, mode, 0); +} + +static ir_node *gen_ia32_l_ShlD(ia32_transform_env_t *env) { + return gen_lowered_64bit_shifts(env, get_irn_n(env->irn, 0), get_irn_n(env->irn, 1), get_irn_n(env->irn, 2)); +} + +static ir_node *gen_ia32_l_ShrD(ia32_transform_env_t *env) { + return gen_lowered_64bit_shifts(env, get_irn_n(env->irn, 0), get_irn_n(env->irn, 1), get_irn_n(env->irn, 2)); +} + +/** + * In case SSE Unit is used, the node is transformed into a vfst + xLoad. + */ +static ir_node *gen_ia32_l_X87toSSE(ia32_transform_env_t *env) { + ia32_code_gen_t *cg = env->cg; + ir_node *res = NULL; + ir_node *ptr = get_irn_n(env->irn, 0); + ir_node *val = get_irn_n(env->irn, 1); + ir_node *mem = get_irn_n(env->irn, 2); + + if (USE_SSE2(cg)) { + ir_node *noreg = ia32_new_NoReg_gp(cg); + + /* Store x87 -> MEM */ + res = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, mem); + set_ia32_frame_ent(res, get_ia32_frame_ent(env->irn)); + set_ia32_use_frame(res); + set_ia32_ls_mode(res, get_ia32_ls_mode(env->irn)); + set_ia32_am_support(res, ia32_am_Dest); + set_ia32_am_flavour(res, ia32_B); + res = new_rd_Proj(env->dbg, env->irg, env->block, res, mode_M, pn_ia32_vfst_M); + + /* Load MEM -> SSE */ + res = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, res); + set_ia32_frame_ent(res, get_ia32_frame_ent(env->irn)); + set_ia32_use_frame(res); + set_ia32_ls_mode(res, get_ia32_ls_mode(env->irn)); + set_ia32_am_support(res, ia32_am_Source); + set_ia32_am_flavour(res, ia32_B); + res = new_rd_Proj(env->dbg, env->irg, env->block, res, get_ia32_ls_mode(env->irn), pn_ia32_xLoad_res); + } + else { + /* SSE unit is not used -> skip this node. */ + int i; + + edges_reroute(env->irn, val, env->irg); + for (i = get_irn_arity(env->irn) - 1; i >= 0; i--) + set_irn_n(env->irn, i, get_irg_bad(env->irg)); + } + + return res; +} + +/** + * In case SSE Unit is used, the node is transformed into a xStore + vfld. + */ +static ir_node *gen_ia32_l_SSEtoX87(ia32_transform_env_t *env) { + ia32_code_gen_t *cg = env->cg; + ir_node *res = NULL; + ir_node *ptr = get_irn_n(env->irn, 0); + ir_node *val = get_irn_n(env->irn, 1); + ir_node *mem = get_irn_n(env->irn, 2); + + if (USE_SSE2(cg)) { + ir_node *noreg = ia32_new_NoReg_gp(cg); + + /* Store SSE -> MEM */ + res = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, mem); + set_ia32_frame_ent(res, get_ia32_frame_ent(env->irn)); + set_ia32_use_frame(res); + set_ia32_ls_mode(res, get_ia32_ls_mode(env->irn)); + set_ia32_am_support(res, ia32_am_Dest); + set_ia32_am_flavour(res, ia32_B); + res = new_rd_Proj(env->dbg, env->irg, env->block, res, mode_M, pn_ia32_xStore_M); + + /* Load MEM -> x87 */ + res = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem); + set_ia32_frame_ent(res, get_ia32_frame_ent(env->irn)); + set_ia32_use_frame(res); + set_ia32_ls_mode(res, get_ia32_ls_mode(env->irn)); + set_ia32_am_support(res, ia32_am_Source); + set_ia32_am_flavour(res, ia32_B); + res = new_rd_Proj(env->dbg, env->irg, env->block, res, get_ia32_ls_mode(env->irn), pn_ia32_vfld_res); + } + else { + /* SSE unit is not used -> skip this node. */ + int i; + + edges_reroute(env->irn, val, env->irg); + for (i = get_irn_arity(env->irn) - 1; i >= 0; i--) + set_irn_n(env->irn, i, get_irg_bad(env->irg)); + } + + return res; +} /********************************************************* * _ _ _ @@ -2080,6 +2706,9 @@ void ia32_transform_lea_to_add(ir_node *irn, ia32_code_gen_t *cg) { am_flav = get_ia32_am_flavour(irn); + if (get_ia32_am_sc(irn)) + return; + /* only some LEAs can be transformed to an Add */ if (am_flav != ia32_am_B && am_flav != ia32_am_OB && am_flav != ia32_am_OI && am_flav != ia32_am_BI) return; @@ -2169,7 +2798,7 @@ void ia32_transform_lea_to_add(ir_node *irn, ia32_code_gen_t *cg) { DBG_OPT_LEA2ADD(irn, res); - res = new_rd_Proj(tenv.dbg, tenv.irg, tenv.block, res, tenv.mode, 0); + res = new_rd_Proj(tenv.dbg, tenv.irg, tenv.block, res, tenv.mode, pn_ia32_Add_res); /* add result Proj to schedule */ sched_add_before(irn, res); @@ -2232,6 +2861,31 @@ void ia32_register_transformers(void) { GEN(CopyB); GEN(Mux); + GEN(Psi); + + /* transform ops from intrinsic lowering */ + GEN(ia32_l_Add); + GEN(ia32_l_AddC); + GEN(ia32_l_Sub); + GEN(ia32_l_SubC); + GEN(ia32_l_Minus); + GEN(ia32_l_Mul); + GEN(ia32_l_Eor); + GEN(ia32_l_MulS); + GEN(ia32_l_Shl); + GEN(ia32_l_Shr); + GEN(ia32_l_Shrs); + GEN(ia32_l_ShlD); + GEN(ia32_l_ShrD); + GEN(ia32_l_vfdiv); + GEN(ia32_l_vfmul); + GEN(ia32_l_vfsub); + GEN(ia32_l_vfild); + GEN(ia32_l_Load); + GEN(ia32_l_vfist); + GEN(ia32_l_Store); + GEN(ia32_l_X87toSSE); + GEN(ia32_l_SSEtoX87); IGN(Call); IGN(Alloc); @@ -2301,10 +2955,17 @@ void ia32_transform_node(ir_node *node, void *env) { ia32_code_gen_t *cg = (ia32_code_gen_t *)env; ir_op *op = get_irn_op(node); ir_node *asm_node = NULL; + int i; if (is_Block(node)) return; + /* link arguments pointing to Unknown to the UNKNOWN Proj */ + for (i = get_irn_arity(node) - 1; i >= 0; i--) { + if (is_Unknown(get_irn_n(node, i))) + set_irn_n(node, i, be_get_unknown_for_mode(cg, get_irn_mode(get_irn_n(node, i)))); + } + DBG((cg->mod, LEVEL_1, "check %+F ... ", node)); if (op->ops.generic) { ia32_transform_env_t tenv; @@ -2330,3 +2991,142 @@ void ia32_transform_node(ir_node *node, void *env) { DB((cg->mod, LEVEL_1, "ignored\n")); } } + +/** + * Transforms a psi condition. + */ +static void transform_psi_cond(ir_node *cond, ir_mode *mode, ia32_code_gen_t *cg) { + int i; + + /* if the mode is target mode, we have already seen this part of the tree */ + if (get_irn_mode(cond) == mode) + return; + + assert(get_irn_mode(cond) == mode_b && "logical operator for condition must be mode_b"); + + set_irn_mode(cond, mode); + + for (i = get_irn_arity(cond) - 1; i >= 0; i--) { + ir_node *in = get_irn_n(cond, i); + + /* if in is a compare: transform into Set/xCmp */ + if (is_Proj(in)) { + ir_node *new_op = NULL; + ir_node *cmp = get_Proj_pred(in); + ir_node *cmp_a = get_Cmp_left(cmp); + ir_node *cmp_b = get_Cmp_right(cmp); + dbg_info *dbg = get_irn_dbg_info(cmp); + ir_graph *irg = get_irn_irg(cmp); + ir_node *block = get_nodes_block(cmp); + ir_node *noreg = ia32_new_NoReg_gp(cg); + ir_node *nomem = new_rd_NoMem(irg); + int pnc = get_Proj_proj(in); + + /* this is a compare */ + if (mode_is_float(mode)) { + /* Psi is float, we need a floating point compare */ + + if (USE_SSE2(cg)) { + /* SSE FPU */ + if (! mode_is_float(get_irn_mode(cmp_a))) { + cmp_a = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_a, cmp_a, mode); + cmp_b = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_b, cmp_b, mode); + pnc |= 8; + } + + new_op = new_rd_ia32_xCmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); + set_ia32_pncode(new_op, pnc); + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, cmp)); + } + else { + /* x87 FPU */ + assert(0); + } + } + else { + /* integer Psi */ + ia32_transform_env_t tenv; + construct_binop_func *set_func = NULL; + + if (mode_is_float(get_irn_mode(cmp_a))) { + /* 1st case: compare operands are floats */ + FP_USED(cg); + + if (USE_SSE2(cg)) { + /* SSE FPU */ + set_func = new_rd_ia32_xCmpSet; + } + else { + /* x87 FPU */ + set_func = new_rd_ia32_vfCmpSet; + } + + pnc &= 7; /* fp compare -> int compare */ + } + else { + /* 2nd case: compare operand are integer too */ + set_func = new_rd_ia32_CmpSet; + } + + tenv.block = block; + tenv.cg = cg; + tenv.dbg = dbg; + tenv.irg = irg; + tenv.irn = cmp; + tenv.mode = mode; + tenv.mod = cg->mod; + + new_op = gen_binop(&tenv, cmp_a, cmp_b, set_func); + set_ia32_pncode(get_Proj_pred(new_op), pnc); + set_ia32_am_support(get_Proj_pred(new_op), ia32_am_Source); + } + + /* the the new compare as in */ + set_irn_n(cond, i, new_op); + } + else { + /* another complex condition */ + transform_psi_cond(in, mode, cg); + } + } +} + +/** + * The Psi selector can be a tree of compares combined with "And"s and "Or"s. + * We create a Set node, respectively a xCmp in case the Psi is a float, for each + * compare, which causes the compare result to be stores in a register. The + * "And"s and "Or"s are transformed later, we just have to set their mode right. + */ +void ia32_transform_psi_cond_tree(ir_node *node, void *env) { + ia32_code_gen_t *cg = (ia32_code_gen_t *)env; + ir_node *psi_sel, *new_cmp, *block; + ir_graph *irg; + ir_mode *mode; + + /* check for Psi */ + if (get_irn_opcode(node) != iro_Psi) + return; + + psi_sel = get_Psi_cond(node, 0); + + /* if psi_cond is a cmp: do nothing, this case is covered by gen_Psi */ + if (is_Proj(psi_sel)) + return; + + mode = get_irn_mode(node); + + transform_psi_cond(psi_sel, mode, cg); + + irg = get_irn_irg(node); + block = get_nodes_block(node); + + /* we need to compare the evaluated condition tree with 0 */ + + /* BEWARE: new_r_Const_long works for floating point as well */ + new_cmp = new_r_Cmp(irg, block, psi_sel, new_r_Const_long(irg, block, mode, 0)); + /* transform the const */ + ia32_place_consts_set_modes(new_cmp, cg); + new_cmp = new_r_Proj(irg, block, new_cmp, mode_b, pn_Cmp_Ne + (mode_is_float(mode) ? pn_Cmp_Uo : 0)); + + set_Psi_cond(node, 0, new_cmp); +}