X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fia32_transform.c;h=6b603efcd7aec84477dfab593f725b342d4408e3;hb=a1a465eb2b3f54027b29f829423fffd0396937f4;hp=e127a76e3941657d1dd00d901b6876432e135a7c;hpb=7d54dc33b08ef428dd34dd82a79a4579d0fea3cb;p=libfirm diff --git a/ir/be/ia32/ia32_transform.c b/ir/be/ia32/ia32_transform.c index e127a76e3..6b603efcd 100644 --- a/ir/be/ia32/ia32_transform.c +++ b/ir/be/ia32/ia32_transform.c @@ -25,6 +25,8 @@ #include "irprintf.h" #include "debug.h" #include "irdom.h" +#include "type.h" +#include "entity.h" #include "archop.h" /* we need this for Min and Max nodes */ #include "../benode_t.h" @@ -38,6 +40,7 @@ #include "ia32_map_regs.h" #include "ia32_dbg_stat.h" #include "ia32_optimize.h" +#include "ia32_util.h" #include "gen_ia32_regalloc_if.h" @@ -82,14 +85,16 @@ typedef enum { * Returns 1 if irn is a Const representing 0, 0 otherwise */ static INLINE int is_ia32_Const_0(ir_node *irn) { - return is_ia32_Const(irn) ? classify_tarval(get_ia32_Immop_tarval(irn)) == TV_CLASSIFY_NULL : 0; + return (is_ia32_irn(irn) && get_ia32_op_type(irn) == ia32_Const) ? + classify_tarval(get_ia32_Immop_tarval(irn)) == TV_CLASSIFY_NULL : 0; } /** * Returns 1 if irn is a Const representing 1, 0 otherwise */ static INLINE int is_ia32_Const_1(ir_node *irn) { - return is_ia32_Const(irn) ? classify_tarval(get_ia32_Immop_tarval(irn)) == TV_CLASSIFY_ONE : 0; + return (is_ia32_irn(irn) && get_ia32_op_type(irn) == ia32_Const) ? + classify_tarval(get_ia32_Immop_tarval(irn)) == TV_CLASSIFY_ONE : 0; } /** @@ -127,6 +132,34 @@ static ir_node *get_proj_for_pn(const ir_node *irn, long pn) { return NULL; } +/** + * Collects all Projs of a node into the node array. Index is the projnum. + * BEWARE: The caller has to assure the appropriate array size! + */ +static void ia32_collect_Projs(ir_node *irn, ir_node **projs, int size) { + const ir_edge_t *edge; + ir_node *proj; + assert(get_irn_mode(irn) == mode_T && "need mode_T"); + + memset(projs, 0, size * sizeof(projs[0])); + + foreach_out_edge(irn, edge) { + proj = get_edge_src_irn(edge); + projs[get_Proj_proj(proj)] = proj; + } +} + +/** + * Renumbers the proj having pn_old in the array tp pn_new + * and removes the proj from the array. + */ +static INLINE void ia32_renumber_Proj(ir_node **projs, long pn_old, long pn_new) { + if (projs[pn_old]) { + set_Proj_proj(projs[pn_old], pn_new); + projs[pn_old] = NULL; + } +} + /** * SSE convert of an integer node into a floating point node. */ @@ -145,8 +178,26 @@ static ir_node *gen_sse_conv_int2float(ia32_code_gen_t *cg, dbg_info *dbg, ir_gr return new_rd_Proj(dbg, irg, block, conv, tgt_mode, pn_ia32_Conv_I2FP_res); } +/** +* SSE convert of an float node into a double node. +*/ +static ir_node *gen_sse_conv_f2d(ia32_code_gen_t *cg, dbg_info *dbg, ir_graph *irg, ir_node *block, + ir_node *in, ir_node *old_node) +{ + ir_node *noreg = ia32_new_NoReg_gp(cg); + ir_node *nomem = new_rd_NoMem(irg); + + ir_node *conv = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, in, nomem); + set_ia32_src_mode(conv, mode_F); + set_ia32_tgt_mode(conv, mode_D); + set_ia32_am_support(conv, ia32_am_Source); + SET_IA32_ORIG_NODE(conv, ia32_get_old_node_name(cg, old_node)); + + return new_rd_Proj(dbg, irg, block, conv, mode_D, pn_ia32_Conv_FP2FP_res); +} + /* Generates an entity for a known FP const (used for FP Neg + Abs) */ -static ident *gen_fp_known_const(ir_mode *mode, ia32_known_const_t kct) { +static ident *gen_fp_known_const(ia32_known_const_t kct) { static const struct { const char *tp_name; const char *ent_name; @@ -157,7 +208,7 @@ static ident *gen_fp_known_const(ir_mode *mode, ia32_known_const_t kct) { { TP_SFP_ABS, ENT_SFP_ABS, SFP_ABS }, /* ia32_SABS */ { TP_DFP_ABS, ENT_DFP_ABS, DFP_ABS } /* ia32_DABS */ }; - static struct entity *ent_cache[ia32_known_const_max]; + static entity *ent_cache[ia32_known_const_max]; const char *tp_name, *ent_name, *cnst_str; ir_type *tp; @@ -165,12 +216,14 @@ static ident *gen_fp_known_const(ir_mode *mode, ia32_known_const_t kct) { ir_graph *rem; entity *ent; tarval *tv; + ir_mode *mode; ent_name = names[kct].ent_name; if (! ent_cache[kct]) { tp_name = names[kct].tp_name; cnst_str = names[kct].cnst_str; + mode = kct == ia32_SSIGN || kct == ia32_SABS ? mode_Iu : mode_Lu; tv = new_tarval_from_str(cnst_str, strlen(cnst_str), mode); tp = new_type_primitive(new_id_from_str(tp_name), mode); ent = new_entity(get_glob_type(), new_id_from_str(ent_name), tp); @@ -233,26 +286,31 @@ static ir_node *get_expr_op(ir_node *op1, ir_node *op2) { * @return The constructed ia32 node. */ static ir_node *gen_binop(ia32_transform_env_t *env, ir_node *op1, ir_node *op2, construct_binop_func *func) { - ir_node *new_op = NULL; - ir_mode *mode = env->mode; - dbg_info *dbg = env->dbg; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg); - ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg); - ir_node *nomem = new_NoMem(); - ir_node *expr_op, *imm_op; + ir_node *new_op = NULL; + ir_mode *mode = env->mode; + dbg_info *dbg = env->dbg; + ir_graph *irg = env->irg; + ir_node *block = env->block; + ir_node *noreg_gp = ia32_new_NoReg_gp(env->cg); + ir_node *noreg_fp = ia32_new_NoReg_fp(env->cg); + ir_node *nomem = new_NoMem(); + int is_mul = 0; + ir_node *expr_op, *imm_op; DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;) /* Check if immediate optimization is on and */ /* if it's an operation with immediate. */ - /* MulS and Mulh don't support immediates */ + /* Mul/MulS/Mulh don't support immediates */ if (! (env->cg->opt & IA32_OPT_IMMOPS) || + func == new_rd_ia32_Mul || func == new_rd_ia32_Mulh || func == new_rd_ia32_MulS) { expr_op = op1; imm_op = NULL; + /* immediate operations are requested, but we are here: it a mul */ + if (env->cg->opt & IA32_OPT_IMMOPS) + is_mul = 1; } else if (is_op_commutative(get_irn_op(env->irn))) { imm_op = get_immediate_op(op1, op2); @@ -304,6 +362,10 @@ static ir_node *gen_binop(ia32_transform_env_t *env, ir_node *op1, ir_node *op2, /* set AM support */ set_ia32_am_support(new_op, ia32_am_Full); } + + /* Muls can only have AM source */ + if (is_mul) + set_ia32_am_support(new_op, ia32_am_Source); } SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); @@ -359,7 +421,7 @@ static ir_node *gen_shift_binop(ia32_transform_env_t *env, ir_node *op1, ir_node tv = get_ia32_Immop_tarval(imm_op); if (tv) { - tv = tarval_mod(tv, new_tarval_from_long(32, mode_Iu)); + tv = tarval_mod(tv, new_tarval_from_long(32, get_tarval_mode(tv))); set_ia32_Immop_tarval(imm_op, tv); } else { @@ -453,7 +515,7 @@ static ir_node *gen_imm_Add(ia32_transform_env_t *env, ir_node *expr_op, ir_node DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;) /* try to optimize to inc/dec */ - if ((env->cg->opt & IA32_OPT_INCDEC) && (get_ia32_op_type(const_op) == ia32_Const)) { + if ((env->cg->opt & IA32_OPT_INCDEC) && tv && (get_ia32_op_type(const_op) == ia32_Const)) { /* optimize tarvals */ class_tv = classify_tarval(tv); class_negtv = classify_tarval(tarval_neg(tv)); @@ -528,13 +590,13 @@ static ir_node *gen_Add(ia32_transform_env_t *env) { set_ia32_am_sc(new_op, get_ia32_id_cnst(op2)); set_ia32_am_flavour(new_op, ia32_am_OB); - DBG_OPT_LEA1(op2, new_op); + DBG_OPT_LEA3(op1, op2, env->irn, new_op); } else { /* this is the 1st case */ new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode); - DBG_OPT_LEA2(op1, op2, new_op); + DBG_OPT_LEA3(op1, op2, env->irn, new_op); if (get_ia32_op_type(op1) == ia32_SymConst) { set_ia32_am_sc(new_op, get_ia32_id_cnst(op1)); @@ -777,7 +839,7 @@ static ir_node *gen_imm_Sub(ia32_transform_env_t *env, ir_node *expr_op, ir_node DEBUG_ONLY(firm_dbg_module_t *mod = env->mod;) /* try to optimize to inc/dec */ - if ((env->cg->opt & IA32_OPT_INCDEC) && tv) { + if ((env->cg->opt & IA32_OPT_INCDEC) && tv && (get_ia32_op_type(const_op) == ia32_Const)) { /* optimize tarvals */ class_tv = classify_tarval(tv); class_negtv = classify_tarval(tarval_neg(tv)); @@ -836,13 +898,13 @@ static ir_node *gen_Sub(ia32_transform_env_t *env) { } else { /* integer SUB */ - if (!expr_op) { + if (! expr_op) { /* No expr_op means, that we have two const - one symconst and */ /* one tarval or another symconst - because this case is not */ /* covered by constant folding */ /* We need to check for: */ - /* 1) symconst + const -> becomes a LEA */ - /* 2) symconst + symconst -> becomes a const + LEA as the elf */ + /* 1) symconst - const -> becomes a LEA */ + /* 2) symconst - symconst -> becomes a const - LEA as the elf */ /* linker doesn't support two symconsts */ if (get_ia32_op_type(op1) == ia32_SymConst && get_ia32_op_type(op2) == ia32_SymConst) { @@ -852,13 +914,13 @@ static ir_node *gen_Sub(ia32_transform_env_t *env) { set_ia32_am_sc_sign(new_op); set_ia32_am_flavour(new_op, ia32_am_OB); - DBG_OPT_LEA1(op2, new_op); + DBG_OPT_LEA3(op1, op2, env->irn, new_op); } else { /* this is the 1st case */ new_op = new_rd_ia32_Lea(dbg, irg, block, noreg, noreg, mode); - DBG_OPT_LEA2(op1, op2, new_op); + DBG_OPT_LEA3(op1, op2, env->irn, new_op); if (get_ia32_op_type(op1) == ia32_SymConst) { set_ia32_am_sc(new_op, get_ia32_id_cnst(op1)); @@ -915,7 +977,7 @@ static ir_node *gen_Sub(ia32_transform_env_t *env) { * @return The created ia32 DivMod node */ static ir_node *generate_DivMod(ia32_transform_env_t *env, ir_node *dividend, ir_node *divisor, ia32_op_flavour_t dm_flav) { - ir_node *res, *proj; + ir_node *res, *proj_div, *proj_mod; ir_node *edx_node, *cltd; ir_node *in_keep[1]; dbg_info *dbg = env->dbg; @@ -924,6 +986,9 @@ static ir_node *generate_DivMod(ia32_transform_env_t *env, ir_node *dividend, ir ir_mode *mode = env->mode; ir_node *irn = env->irn; ir_node *mem; + ir_node *projs[pn_DivMod_max]; + + ia32_collect_Projs(irn, projs, pn_DivMod_max); switch (dm_flav) { case flavour_Div: @@ -935,8 +1000,10 @@ static ir_node *generate_DivMod(ia32_transform_env_t *env, ir_node *dividend, ir mode = get_irn_mode(get_proj_for_pn(irn, pn_Mod_res)); break; case flavour_DivMod: - mem = get_DivMod_mem(irn); - mode = get_irn_mode(get_proj_for_pn(irn, pn_DivMod_res_div)); + mem = get_DivMod_mem(irn); + proj_div = get_proj_for_pn(irn, pn_DivMod_res_div); + proj_mod = get_proj_for_pn(irn, pn_DivMod_res_mod); + mode = proj_div ? get_irn_mode(proj_div) : get_irn_mode(proj_mod); break; default: assert(0); @@ -949,37 +1016,71 @@ static ir_node *generate_DivMod(ia32_transform_env_t *env, ir_node *dividend, ir edx_node = new_rd_Proj(dbg, irg, block, cltd, mode_Is, pn_ia32_Cdq_EDX); } else { - edx_node = new_rd_ia32_Const(dbg, irg, block, get_irg_no_mem(irg), mode_Iu); + edx_node = new_rd_ia32_Const(dbg, irg, block, mode_Iu); + add_irn_dep(edx_node, be_abi_get_start_barrier(env->cg->birg->abi)); set_ia32_Const_type(edx_node, ia32_Const); set_ia32_Immop_tarval(edx_node, get_tarval_null(mode_Iu)); } res = new_rd_ia32_DivMod(dbg, irg, block, dividend, divisor, edx_node, mem, dm_flav); - set_ia32_n_res(res, 2); /* Only one proj is used -> We must add a second proj and */ /* connect this one to a Keep node to eat up the second */ /* destroyed register. */ - if (get_irn_n_edges(irn) == 1) { - proj = get_edge_src_irn(get_irn_out_edge_first(irn)); - assert(is_Proj(proj) && "non-Proj to Div/Mod node"); - - if (get_irn_op(irn) == op_Div) { - set_Proj_proj(proj, pn_DivMod_res_div); - in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode_Is, pn_DivMod_res_mod); - } - else { - set_Proj_proj(proj, pn_DivMod_res_mod); - in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode_Is, pn_DivMod_res_div); - } - - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep); + /* We also renumber the Firm projs into ia32 projs. */ + + switch (get_irn_opcode(irn)) { + case iro_Div: + ia32_renumber_Proj(projs, pn_Div_M, pn_ia32_DivMod_M); + ia32_renumber_Proj(projs, pn_Div_res, pn_ia32_DivMod_div_res); + /* add Proj-Keep for mod res */ + in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode, pn_ia32_DivMod_mod_res); + be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep); + break; + case iro_Mod: + ia32_renumber_Proj(projs, pn_Mod_M, pn_ia32_DivMod_M); + ia32_renumber_Proj(projs, pn_Mod_res, pn_ia32_DivMod_mod_res); + /* add Proj-Keep for div res */ + in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode, pn_ia32_DivMod_div_res); + be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep); + break; + case iro_DivMod: + /* check, which Proj-Keep, we need to add */ + proj_div = get_proj_for_pn(irn, pn_DivMod_res_div); + proj_mod = get_proj_for_pn(irn, pn_DivMod_res_mod); + + /* BEWARE: renumber after getting original projs */ + ia32_renumber_Proj(projs, pn_DivMod_M, pn_ia32_DivMod_M); + + if (proj_div && proj_mod) { + /* we have both results used: simply renumber */ + ia32_renumber_Proj(projs, pn_DivMod_res_div, pn_ia32_DivMod_div_res); + ia32_renumber_Proj(projs, pn_DivMod_res_mod, pn_ia32_DivMod_mod_res); + } + else if (! proj_div && ! proj_mod) { + assert(0 && "Missing DivMod result proj"); + } + else if (! proj_div) { + /* We have only mod result: add div res Proj-Keep */ + ia32_renumber_Proj(projs, pn_DivMod_res_mod, pn_ia32_DivMod_mod_res); + in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode, pn_ia32_DivMod_div_res); + be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep); + } + else { + /* We have only div result: add mod res Proj-Keep */ + ia32_renumber_Proj(projs, pn_DivMod_res_div, pn_ia32_DivMod_div_res); + in_keep[0] = new_rd_Proj(dbg, irg, block, res, mode, pn_ia32_DivMod_mod_res); + be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in_keep); + } + break; + default: + assert(0 && "Div, Mod, or DivMod expected."); + break; } SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn)); - - set_ia32_res_mode(res, mode_Is); + set_ia32_res_mode(res, mode); return res; } @@ -1024,6 +1125,11 @@ static ir_node *gen_Quot(ia32_transform_env_t *env) { ir_node *nomem = new_rd_NoMem(env->irg); ir_node *op1 = get_Quot_left(env->irn); ir_node *op2 = get_Quot_right(env->irn); + ir_mode *mode = get_irn_mode(get_proj_for_pn(env->irn, pn_Quot_res)); + ir_node *projs[pn_Quot_max]; + /* BEWARE: Projs will be renumbered, so retrieve res Proj here */ + + ia32_collect_Projs(env->irn, projs, pn_Quot_max); FP_USED(env->cg); if (USE_SSE2(env->cg)) { @@ -1036,14 +1142,17 @@ static ir_node *gen_Quot(ia32_transform_env_t *env) { new_op = new_rd_ia32_xDiv(env->dbg, env->irg, env->block, noreg, noreg, op1, op2, nomem); set_ia32_am_support(new_op, ia32_am_Source); } + ia32_renumber_Proj(projs, pn_Quot_M, pn_ia32_xDiv_M); + ia32_renumber_Proj(projs, pn_Quot_res, pn_ia32_xDiv_res); } else { new_op = new_rd_ia32_vfdiv(env->dbg, env->irg, env->block, noreg, noreg, op1, op2, nomem); set_ia32_am_support(new_op, ia32_am_Source); + ia32_renumber_Proj(projs, pn_Quot_M, pn_ia32_vfdiv_M); + ia32_renumber_Proj(projs, pn_Quot_res, pn_ia32_vfdiv_res); } - set_ia32_res_mode(new_op, get_irn_mode(get_proj_for_pn(env->irn, pn_Quot_res))); + set_ia32_res_mode(new_op, mode); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); - return new_op; } @@ -1169,7 +1278,7 @@ static ir_node *gen_Rot(ia32_transform_env_t *env) { * @param op The Minus operand * @return The created ia32 Minus node */ -static ir_node *gen_Minus_ex(ia32_transform_env_t *env, ir_node *op) { +ir_node *gen_Minus_ex(ia32_transform_env_t *env, ir_node *op) { ident *name; ir_node *new_op; int size; @@ -1184,14 +1293,15 @@ static ir_node *gen_Minus_ex(ia32_transform_env_t *env, ir_node *op) { new_op = new_rd_ia32_xEor(env->dbg, env->irg, env->block, noreg_gp, noreg_gp, op, noreg_fp, nomem); size = get_mode_size_bits(env->mode); - name = gen_fp_known_const(env->mode, size == 32 ? ia32_SSIGN : ia32_DSIGN); + name = gen_fp_known_const(size == 32 ? ia32_SSIGN : ia32_DSIGN); - set_ia32_sc(new_op, name); + set_ia32_am_sc(new_op, name); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); set_ia32_res_mode(new_op, env->mode); - set_ia32_immop_type(new_op, ia32_ImmSymConst); + set_ia32_op_type(new_op, ia32_AddrModeS); + set_ia32_ls_mode(new_op, env->mode); new_op = new_rd_Proj(env->dbg, env->irg, env->block, new_op, env->mode, pn_ia32_xEor_res); } @@ -1256,14 +1366,15 @@ static ir_node *gen_Abs(ia32_transform_env_t *env) { res = new_rd_ia32_xAnd(dbg,irg, block, noreg_gp, noreg_gp, op, noreg_fp, nomem); size = get_mode_size_bits(mode); - name = gen_fp_known_const(mode, size == 32 ? ia32_SABS : ia32_DABS); + name = gen_fp_known_const(size == 32 ? ia32_SABS : ia32_DABS); - set_ia32_sc(res, name); + set_ia32_am_sc(res, name); SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn)); set_ia32_res_mode(res, mode); - set_ia32_immop_type(res, ia32_ImmSymConst); + set_ia32_op_type(res, ia32_AddrModeS); + set_ia32_ls_mode(res, env->mode); res = new_rd_Proj(dbg, irg, block, res, mode, pn_ia32_xAnd_res); } @@ -1305,14 +1416,27 @@ static ir_node *gen_Abs(ia32_transform_env_t *env) { * @return the created ia32 Load node */ static ir_node *gen_Load(ia32_transform_env_t *env) { - ir_node *node = env->irn; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *ptr = get_Load_ptr(node); - ir_node *lptr = ptr; - ir_mode *mode = get_Load_mode(node); - int is_imm = 0; + ir_node *node = env->irn; + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *ptr = get_Load_ptr(node); + ir_node *lptr = ptr; + ir_mode *mode = get_Load_mode(node); + int is_imm = 0; ir_node *new_op; - ia32_am_flavour_t am_flav = ia32_B; + ia32_am_flavour_t am_flav = ia32_am_B; + ir_node *projs[pn_Load_max]; + + ia32_collect_Projs(env->irn, projs, pn_Load_max); + + /* + check for special case: the loaded value might not be used (optimized, volatile, ...) + we add a Proj + Keep for volatile loads and ignore all other cases + */ + if (! get_proj_for_pn(node, pn_Load_res) && get_Load_volatility(node) == volatility_is_volatile) { + /* add a result proj and a Keep to produce a pseudo use */ + ir_node *proj = new_r_Proj(env->irg, env->block, node, mode, pn_ia32_Load_res); + be_new_Keep(arch_get_irn_reg_class(env->cg->arch_env, proj, -1), env->irg, env->block, 1, &proj); + } /* address might be a constant (symconst or absolute address) */ if (is_ia32_Const(ptr)) { @@ -1322,25 +1446,36 @@ static ir_node *gen_Load(ia32_transform_env_t *env) { if (mode_is_float(mode)) { FP_USED(env->cg); - if (USE_SSE2(env->cg)) + if (USE_SSE2(env->cg)) { new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node)); - else + ia32_renumber_Proj(projs, pn_Load_M, pn_ia32_xLoad_M); + ia32_renumber_Proj(projs, pn_Load_res, pn_ia32_xLoad_res); + } + else { new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node)); + ia32_renumber_Proj(projs, pn_Load_M, pn_ia32_vfld_M); + ia32_renumber_Proj(projs, pn_Load_res, pn_ia32_vfld_res); + } } else { new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, lptr, noreg, get_Load_mem(node)); + ia32_renumber_Proj(projs, pn_Load_M, pn_ia32_Load_M); + ia32_renumber_Proj(projs, pn_Load_res, pn_ia32_Load_res); } /* base is an constant address */ if (is_imm) { - if (get_ia32_immop_type(ptr) == ia32_ImmSymConst) { + if (get_ia32_op_type(ptr) == ia32_SymConst) { set_ia32_am_sc(new_op, get_ia32_id_cnst(ptr)); + am_flav = ia32_am_N; } else { add_ia32_am_offs(new_op, get_ia32_cnst(ptr)); + am_flav = ia32_am_O; } - - am_flav = ia32_O; + /* add dependency to barrier, if we are in start block */ + if (get_irg_start_block(env->irg) == env->block) + add_irn_dep(new_op, be_abi_get_start_barrier(env->cg->birg->abi)); } set_ia32_am_support(new_op, ia32_am_Source); @@ -1368,12 +1503,15 @@ static ir_node *gen_Store(ia32_transform_env_t *env) { ir_node *ptr = get_Store_ptr(node); ir_node *sptr = ptr; ir_node *mem = get_Store_mem(node); - ir_mode *mode = get_irn_link(node); + ir_mode *mode = get_irn_mode(val); ir_node *sval = val; int is_imm = 0; ir_node *new_op; - ia32_am_flavour_t am_flav = ia32_B; + ia32_am_flavour_t am_flav = ia32_am_B; ia32_immop_type_t immop = ia32_ImmNone; + ir_node *projs[pn_Store_max]; + + ia32_collect_Projs(env->irn, projs, pn_Store_max); if (! mode_is_float(mode)) { /* in case of storing a const (but not a symconst) -> make it an attribute */ @@ -1400,16 +1538,22 @@ static ir_node *gen_Store(ia32_transform_env_t *env) { if (mode_is_float(mode)) { FP_USED(env->cg); - if (USE_SSE2(env->cg)) + if (USE_SSE2(env->cg)) { new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, sptr, noreg, sval, mem); - else + ia32_renumber_Proj(projs, pn_Store_M, pn_ia32_xStore_M); + } + else { new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, sptr, noreg, sval, mem); + ia32_renumber_Proj(projs, pn_Store_M, pn_ia32_vfst_M); + } } else if (get_mode_size_bits(mode) == 8) { new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, sptr, noreg, sval, mem); + ia32_renumber_Proj(projs, pn_Store_M, pn_ia32_Store8Bit_M); } else { new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, sptr, noreg, sval, mem); + ia32_renumber_Proj(projs, pn_Store_M, pn_ia32_Store_M); } /* stored const is an attribute (saves a register) */ @@ -1419,14 +1563,14 @@ static ir_node *gen_Store(ia32_transform_env_t *env) { /* base is an constant address */ if (is_imm) { - if (get_ia32_immop_type(ptr) == ia32_ImmSymConst) { + if (get_ia32_op_type(ptr) == ia32_SymConst) { set_ia32_am_sc(new_op, get_ia32_id_cnst(ptr)); + am_flav = ia32_am_N; } else { add_ia32_am_offs(new_op, get_ia32_cnst(ptr)); + am_flav = ia32_am_O; } - - am_flav = ia32_O; } set_ia32_am_support(new_op, ia32_am_Dest); @@ -1462,6 +1606,7 @@ static ir_node *gen_Cond(ia32_transform_env_t *env) { if (is_Proj(sel) && sel_mode == mode_b) { ir_node *nomem = new_NoMem(); + pn_Cmp pnc = get_Proj_proj(sel); pred = get_Proj_pred(sel); @@ -1474,10 +1619,15 @@ static ir_node *gen_Cond(ia32_transform_env_t *env) { expr = get_expr_op(cmp_a, cmp_b); if (cnst && expr) { - pn_Cmp pnc = get_Proj_proj(sel); + /* immop has to be the right operand, we might need to flip pnc */ + if(cnst != cmp_b) { + pnc = get_inversed_pnc(pnc); + } if ((pnc == pn_Cmp_Eq || pnc == pn_Cmp_Lg) && mode_is_int(get_irn_mode(expr))) { - if (classify_tarval(get_ia32_Immop_tarval(cnst)) == TV_CLASSIFY_NULL) { + if (get_ia32_op_type(cnst) == ia32_Const && + classify_tarval(get_ia32_Immop_tarval(cnst)) == TV_CLASSIFY_NULL) + { /* a Cmp A =/!= 0 */ ir_node *op1 = expr; ir_node *op2 = expr; @@ -1492,7 +1642,7 @@ static ir_node *gen_Cond(ia32_transform_env_t *env) { cnst = (is_ia32_ImmConst(and) || is_ia32_ImmSymConst(and)) ? get_ia32_cnst(and) : NULL; } res = new_rd_ia32_TestJmp(dbg, irg, block, op1, op2); - set_ia32_pncode(res, get_Proj_proj(sel)); + set_ia32_pncode(res, pnc); set_ia32_res_mode(res, get_irn_mode(op1)); if (cnst) { @@ -1537,7 +1687,7 @@ static ir_node *gen_Cond(ia32_transform_env_t *env) { set_ia32_res_mode(res, get_irn_mode(cmp_a)); } - set_ia32_pncode(res, get_Proj_proj(sel)); + set_ia32_pncode(res, pnc); //set_ia32_am_support(res, ia32_am_Source); } else { @@ -1580,35 +1730,56 @@ static ir_node *gen_Cond(ia32_transform_env_t *env) { * @return The transformed node. */ static ir_node *gen_CopyB(ia32_transform_env_t *env) { - ir_node *res = NULL; - dbg_info *dbg = env->dbg; - ir_graph *irg = env->irg; - ir_mode *mode = env->mode; - ir_node *block = env->block; - ir_node *node = env->irn; - ir_node *src = get_CopyB_src(node); - ir_node *dst = get_CopyB_dst(node); - ir_node *mem = get_CopyB_mem(node); - int size = get_type_size_bytes(get_CopyB_type(node)); - int rem; - - /* If we have to copy more than 16 bytes, we use REP MOVSx and */ + ir_node *res = NULL; + dbg_info *dbg = env->dbg; + ir_graph *irg = env->irg; + ir_node *block = env->block; + ir_node *node = env->irn; + ir_node *src = get_CopyB_src(node); + ir_node *dst = get_CopyB_dst(node); + ir_node *mem = get_CopyB_mem(node); + int size = get_type_size_bytes(get_CopyB_type(node)); + ir_mode *dst_mode = get_irn_mode(dst); + ir_mode *src_mode = get_irn_mode(src); + int rem; + ir_node *in[3]; + ir_node *projs[pn_CopyB_max]; + + ia32_collect_Projs(env->irn, projs, pn_CopyB_max); + + /* If we have to copy more than 32 bytes, we use REP MOVSx and */ /* then we need the size explicitly in ECX. */ - if (size >= 16 * 4) { + if (size >= 32 * 4) { rem = size & 0x3; /* size % 4 */ size >>= 2; - res = new_rd_ia32_Const(dbg, irg, block, get_irg_no_mem(irg), mode_Is); + res = new_rd_ia32_Const(dbg, irg, block, mode_Is); + add_irn_dep(res, be_abi_get_start_barrier(env->cg->birg->abi)); set_ia32_op_type(res, ia32_Const); set_ia32_Immop_tarval(res, new_tarval_from_long(size, mode_Is)); - res = new_rd_ia32_CopyB(dbg, irg, block, dst, src, res, mem, mode); + res = new_rd_ia32_CopyB(dbg, irg, block, dst, src, res, mem); set_ia32_Immop_tarval(res, new_tarval_from_long(rem, mode_Is)); + + /* ok: now attach Proj's because rep movsd will destroy esi, edi and ecx */ + in[0] = new_r_Proj(irg, block, res, dst_mode, pn_ia32_CopyB_DST); + in[1] = new_r_Proj(irg, block, res, src_mode, pn_ia32_CopyB_SRC); + in[2] = new_r_Proj(irg, block, res, mode_Is, pn_ia32_CopyB_CNT); + be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 3, in); + + ia32_renumber_Proj(projs, pn_CopyB_M_regular, pn_ia32_CopyB_M); } else { - res = new_rd_ia32_CopyB_i(dbg, irg, block, dst, src, mem, mode); + res = new_rd_ia32_CopyB_i(dbg, irg, block, dst, src, mem); set_ia32_Immop_tarval(res, new_tarval_from_long(size, mode_Is)); set_ia32_immop_type(res, ia32_ImmConst); + + /* ok: now attach Proj's because movsd will destroy esi and edi */ + in[0] = new_r_Proj(irg, block, res, dst_mode, pn_ia32_CopyB_i_DST); + in[1] = new_r_Proj(irg, block, res, src_mode, pn_ia32_CopyB_i_SRC); + be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 2, in); + + ia32_renumber_Proj(projs, pn_CopyB_M_regular, pn_ia32_CopyB_i_M); } SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env->cg, env->irn)); @@ -1696,20 +1867,23 @@ static ir_node *gen_Psi(ia32_transform_env_t *env) { new_op = new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_xCmp_res); and1 = new_rd_ia32_xAnd(dbg, irg, block, noreg, noreg, psi_true, new_op, nomem); - set_ia32_am_support(and1, ia32_am_Source); + set_ia32_am_support(and1, ia32_am_None); set_ia32_res_mode(and1, mode); + set_ia32_commutative(and1); SET_IA32_ORIG_NODE(and1, ia32_get_old_node_name(cg, node)); and1 = new_rd_Proj(dbg, irg, block, and1, mode, pn_ia32_xAnd_res); and2 = new_rd_ia32_xAndNot(dbg, irg, block, noreg, noreg, new_op, psi_default, nomem); - set_ia32_am_support(and2, ia32_am_Source); + set_ia32_am_support(and2, ia32_am_None); set_ia32_res_mode(and2, mode); + set_ia32_commutative(and2); SET_IA32_ORIG_NODE(and2, ia32_get_old_node_name(cg, node)); and2 = new_rd_Proj(dbg, irg, block, and2, mode, pn_ia32_xAndNot_res); new_op = new_rd_ia32_xOr(dbg, irg, block, noreg, noreg, and1, and2, nomem); - set_ia32_am_support(new_op, ia32_am_Source); + set_ia32_am_support(new_op, ia32_am_None); set_ia32_res_mode(new_op, mode); + set_ia32_commutative(new_op); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node)); new_op = new_rd_Proj(dbg, irg, block, new_op, mode, pn_ia32_xOr_res); } @@ -1833,13 +2007,13 @@ static ir_node *gen_Psi(ia32_transform_env_t *env) { * Create a conversion from x87 state register to general purpose. */ static ir_node *gen_x87_fp_to_gp(ia32_transform_env_t *env, ir_mode *tgt_mode) { - ia32_code_gen_t *cg = env->cg; - entity *ent = cg->fp_to_gp; - ir_graph *irg = env->irg; - ir_node *block = env->block; - ir_node *noreg = ia32_new_NoReg_gp(env->cg); - ir_node *op = get_Conv_op(env->irn); - ir_node *fist, *mem, *load; + ia32_code_gen_t *cg = env->cg; + entity *ent = cg->fp_to_gp; + ir_graph *irg = env->irg; + ir_node *block = env->block; + ir_node *noreg = ia32_new_NoReg_gp(env->cg); + ir_node *op = get_Conv_op(env->irn); + ir_node *fist, *mem, *load; if (! ent) { int size = get_mode_size_bytes(ia32_reg_classes[CLASS_ia32_vfp].mode); @@ -1914,7 +2088,7 @@ static ir_node *gen_x87_gp_to_fp(ia32_transform_env_t *env, ir_mode *src_mode) { set_ia32_am_flavour(store, ia32_B); set_ia32_ls_mode(store, mode_Is); - mem = new_r_Proj(irg, block, store, mode_M, 0); + mem = new_r_Proj(irg, block, store, mode_M, pn_ia32_Store_M); /* do a fild */ fild = new_rd_ia32_vfild(env->dbg, irg, block, get_irg_frame(irg), noreg, mem); @@ -1926,7 +2100,7 @@ static ir_node *gen_x87_gp_to_fp(ia32_transform_env_t *env, ir_mode *src_mode) { set_ia32_am_flavour(fild, ia32_B); set_ia32_ls_mode(fild, mode_F); - return new_r_Proj(irg, block, fild, mode_F, 0); + return new_r_Proj(irg, block, fild, mode_F, pn_ia32_vfild_res); } /** @@ -1944,6 +2118,7 @@ static ir_node *gen_Conv(ia32_transform_env_t *env) { int src_bits = get_mode_size_bits(src_mode); int tgt_bits = get_mode_size_bits(tgt_mode); int pn = -1; + int kill = 0; ir_node *block = env->block; ir_node *new_op = NULL; ir_node *noreg = ia32_new_NoReg_gp(env->cg); @@ -1967,7 +2142,13 @@ static ir_node *gen_Conv(ia32_transform_env_t *env) { } else { DB((mod, LEVEL_1, "killed Conv(float, float) ...")); - edges_reroute(env->irn, op, irg); + /* + remark: we create a intermediate conv here, so modes will be spread correctly + these convs will be killed later + */ + new_op = new_rd_ia32_Conv_FP2FP(dbg, irg, block, noreg, noreg, op, nomem); + pn = pn_ia32_Conv_FP2FP_res; + kill = 1; } } else { @@ -1984,7 +2165,7 @@ static ir_node *gen_Conv(ia32_transform_env_t *env) { if (tgt_bits < 32) { SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); set_ia32_am_support(new_op, ia32_am_Source); - set_ia32_tgt_mode(new_op, tgt_mode); + set_ia32_tgt_mode(new_op, mode_Is); set_ia32_src_mode(new_op, src_mode); proj = new_rd_Proj(dbg, irg, block, new_op, mode_Is, pn_ia32_Conv_FP2I_res); @@ -1997,6 +2178,7 @@ static ir_node *gen_Conv(ia32_transform_env_t *env) { new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, proj, nomem); pn = pn_ia32_Conv_I2I_res; } + src_mode = mode_Is; } } } @@ -2017,7 +2199,13 @@ static ir_node *gen_Conv(ia32_transform_env_t *env) { /* ... to int */ if (get_mode_size_bits(src_mode) == tgt_bits) { DB((mod, LEVEL_1, "omitting equal size Conv(%+F, %+F) ...", src_mode, tgt_mode)); - edges_reroute(env->irn, op, irg); + /* + remark: we create a intermediate conv here, so modes will be spread correctly + these convs will be killed later + */ + new_op = new_rd_ia32_Conv_I2I(dbg, irg, block, noreg, noreg, op, nomem); + pn = pn_ia32_Conv_I2I_res; + kill = 1; } else { DB((mod, LEVEL_1, "create Conv(int, int) ...", src_mode, tgt_mode)); @@ -2038,9 +2226,13 @@ static ir_node *gen_Conv(ia32_transform_env_t *env) { set_ia32_tgt_mode(new_op, tgt_mode); set_ia32_src_mode(new_op, src_mode); - set_ia32_am_support(new_op, ia32_am_Source); + if(tgt_bits >= src_bits) + set_ia32_am_support(new_op, ia32_am_Source); new_op = new_rd_Proj(dbg, irg, block, new_op, tgt_mode, pn); + + if (kill) + nodeset_insert(env->cg->kill_conv, new_op); } return new_op; @@ -2058,60 +2250,30 @@ static ir_node *gen_Conv(ia32_transform_env_t *env) { * ********************************************/ - /** - * Decides in which block the transformed StackParam should be placed. - * If the StackParam has more than one user, the dominator block of - * the users will be returned. In case of only one user, this is either - * the user block or, in case of a Phi, the predecessor block of the Phi. - */ - static ir_node *get_block_transformed_stack_param(ir_node *irn) { - ir_node *dom_bl = NULL; - - if (get_irn_n_edges(irn) == 1) { - ir_node *src = get_edge_src_irn(get_irn_out_edge_first(irn)); - - if (! is_Phi(src)) { - dom_bl = get_nodes_block(src); - } - else { - /* Determine on which in position of the Phi the irn is */ - /* and get the corresponding cfg predecessor block. */ - - int i = get_irn_pred_pos(src, irn); - assert(i >= 0 && "kaputt"); - dom_bl = get_Block_cfgpred_block(get_nodes_block(src), i); - } - } - else { - dom_bl = node_users_smallest_common_dominator(irn, 1); - } - - assert(dom_bl && "dominator block not found"); - - return dom_bl; - } - static ir_node *gen_be_StackParam(ia32_transform_env_t *env) { ir_node *new_op = NULL; ir_node *node = env->irn; ir_node *noreg = ia32_new_NoReg_gp(env->cg); ir_node *mem = new_rd_NoMem(env->irg); ir_node *ptr = get_irn_n(node, 0); - entity *ent = be_get_frame_entity(node); + entity *ent = arch_get_frame_entity(env->cg->arch_env, node); ir_mode *mode = env->mode; - - /* choose the block where to place the load */ - env->block = get_block_transformed_stack_param(node); + long pn_res; if (mode_is_float(mode)) { FP_USED(env->cg); - if (USE_SSE2(env->cg)) + if (USE_SSE2(env->cg)) { new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem); - else + pn_res = pn_ia32_xLoad_res; + } + else { new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem); + pn_res = pn_ia32_vfld_res; + } } else { new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem); + pn_res = pn_ia32_Load_res; } set_ia32_frame_ent(new_op, ent); @@ -2121,10 +2283,11 @@ static ir_node *gen_be_StackParam(ia32_transform_env_t *env) { set_ia32_op_type(new_op, ia32_AddrModeS); set_ia32_am_flavour(new_op, ia32_B); set_ia32_ls_mode(new_op, mode); + set_ia32_flags(new_op, get_ia32_flags(new_op) | arch_irn_flags_rematerializable); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); - return new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_ia32_Load_res); + return new_rd_Proj(env->dbg, env->irg, env->block, new_op, mode, pn_res); } /** @@ -2138,7 +2301,7 @@ static ir_node *gen_be_FrameAddr(ia32_transform_env_t *env) { ir_node *nomem = new_rd_NoMem(env->irg); new_op = new_rd_ia32_Add(env->dbg, env->irg, env->block, noreg, noreg, op, noreg, nomem); - set_ia32_frame_ent(new_op, be_get_frame_entity(node)); + set_ia32_frame_ent(new_op, arch_get_frame_entity(env->cg->arch_env, node)); set_ia32_am_support(new_op, ia32_am_Full); set_ia32_use_frame(new_op); set_ia32_immop_type(new_op, ia32_ImmConst); @@ -2158,18 +2321,30 @@ static ir_node *gen_be_FrameLoad(ia32_transform_env_t *env) { ir_node *noreg = ia32_new_NoReg_gp(env->cg); ir_node *mem = get_irn_n(node, 0); ir_node *ptr = get_irn_n(node, 1); - entity *ent = be_get_frame_entity(node); + entity *ent = arch_get_frame_entity(env->cg->arch_env, node); ir_mode *mode = get_type_mode(get_entity_type(ent)); + ir_node *projs[pn_Load_max]; + + ia32_collect_Projs(env->irn, projs, pn_Load_max); if (mode_is_float(mode)) { FP_USED(env->cg); - if (USE_SSE2(env->cg)) + if (USE_SSE2(env->cg)) { new_op = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, ptr, noreg, mem); - else + ia32_renumber_Proj(projs, pn_Load_M, pn_ia32_xLoad_M); + ia32_renumber_Proj(projs, pn_Load_res, pn_ia32_xLoad_res); + } + else { new_op = new_rd_ia32_vfld(env->dbg, env->irg, env->block, ptr, noreg, mem); + ia32_renumber_Proj(projs, pn_Load_M, pn_ia32_vfld_M); + ia32_renumber_Proj(projs, pn_Load_res, pn_ia32_vfld_res); + } } - else + else { new_op = new_rd_ia32_Load(env->dbg, env->irg, env->block, ptr, noreg, mem); + ia32_renumber_Proj(projs, pn_Load_M, pn_ia32_Load_M); + ia32_renumber_Proj(projs, pn_Load_res, pn_ia32_Load_res); + } set_ia32_frame_ent(new_op, ent); set_ia32_use_frame(new_op); @@ -2195,21 +2370,30 @@ static ir_node *gen_be_FrameStore(ia32_transform_env_t *env) { ir_node *mem = get_irn_n(node, 0); ir_node *ptr = get_irn_n(node, 1); ir_node *val = get_irn_n(node, 2); - entity *ent = be_get_frame_entity(node); + entity *ent = arch_get_frame_entity(env->cg->arch_env, node); ir_mode *mode = get_irn_mode(val); + ir_node *projs[pn_Store_max]; + + ia32_collect_Projs(env->irn, projs, pn_Store_max); if (mode_is_float(mode)) { FP_USED(env->cg); - if (USE_SSE2(env->cg)) + if (USE_SSE2(env->cg)) { new_op = new_rd_ia32_xStore(env->dbg, env->irg, env->block, ptr, noreg, val, mem); - else + ia32_renumber_Proj(projs, pn_Store_M, pn_ia32_xStore_M); + } + else { new_op = new_rd_ia32_vfst(env->dbg, env->irg, env->block, ptr, noreg, val, mem); + ia32_renumber_Proj(projs, pn_Store_M, pn_ia32_vfst_M); + } } else if (get_mode_size_bits(mode) == 8) { new_op = new_rd_ia32_Store8Bit(env->dbg, env->irg, env->block, ptr, noreg, val, mem); + ia32_renumber_Proj(projs, pn_Store_M, pn_ia32_Store8Bit_M); } else { new_op = new_rd_ia32_Store(env->dbg, env->irg, env->block, ptr, noreg, val, mem); + ia32_renumber_Proj(projs, pn_Store_M, pn_ia32_Store_M); } set_ia32_frame_ent(new_op, ent); @@ -2225,6 +2409,252 @@ static ir_node *gen_be_FrameStore(ia32_transform_env_t *env) { return new_op; } +/** + * In case SSE is used we need to copy the result from FPU TOS. + */ +static ir_node *gen_be_Call(ia32_transform_env_t *env) { + ir_node *call_res = get_proj_for_pn(env->irn, pn_be_Call_first_res); + ir_node *call_mem = get_proj_for_pn(env->irn, pn_be_Call_M_regular); + ir_mode *mode; + + if (! call_res || ! USE_SSE2(env->cg)) + return NULL; + + mode = get_irn_mode(call_res); + + /* in case there is no memory output: create one to serialize the copy FPU -> SSE */ + if (! call_mem) + call_mem = new_r_Proj(env->irg, env->block, env->irn, mode_M, pn_be_Call_M_regular); + + if (mode_is_float(mode)) { + /* store st(0) onto stack */ + ir_node *frame = get_irg_frame(env->irg); + ir_node *fstp = new_rd_ia32_GetST0(env->dbg, env->irg, env->block, frame, call_mem); + ir_node *mproj = new_r_Proj(env->irg, env->block, fstp, mode_M, pn_ia32_GetST0_M); + entity *ent = frame_alloc_area(get_irg_frame_type(env->irg), get_mode_size_bytes(mode), 16, 0); + ir_node *sse_load, *p, *bad, *keep; + ir_node **in_keep; + int keep_arity, i; + + set_ia32_ls_mode(fstp, mode); + set_ia32_op_type(fstp, ia32_AddrModeD); + set_ia32_use_frame(fstp); + set_ia32_frame_ent(fstp, ent); + set_ia32_am_flavour(fstp, ia32_B); + set_ia32_am_support(fstp, ia32_am_Dest); + + /* load into SSE register */ + sse_load = new_rd_ia32_xLoad(env->dbg, env->irg, env->block, frame, ia32_new_NoReg_gp(env->cg), mproj); + set_ia32_ls_mode(sse_load, mode); + set_ia32_op_type(sse_load, ia32_AddrModeS); + set_ia32_use_frame(sse_load); + set_ia32_frame_ent(sse_load, ent); + set_ia32_am_flavour(sse_load, ia32_B); + set_ia32_am_support(sse_load, ia32_am_Source); + sse_load = new_r_Proj(env->irg, env->block, sse_load, mode, pn_ia32_xLoad_res); + + /* reroute all users of the result proj to the sse load */ + edges_reroute(call_res, sse_load, env->irg); + + /* now: create new Keep whith all former ins and one additional in - the result Proj */ + + /* get a Proj representing a caller save register */ + p = get_proj_for_pn(env->irn, pn_be_Call_first_res + 1); + assert(is_Proj(p) && "Proj expected."); + + /* user of the the proj is the Keep */ + p = get_edge_src_irn(get_irn_out_edge_first(p)); + assert(be_is_Keep(p) && "Keep expected."); + + /* copy in array of the old keep and set the result proj as additional in */ + keep_arity = get_irn_arity(p) + 1; + NEW_ARR_A(ir_node *, in_keep, keep_arity); + in_keep[keep_arity - 1] = call_res; + for (i = 0; i < keep_arity - 1; ++i) + in_keep[i] = get_irn_n(p, i); + + /* create new keep and set the in class requirements properly */ + keep = be_new_Keep(NULL, env->irg, env->block, keep_arity, in_keep); + for(i = 0; i < keep_arity; ++i) { + const arch_register_class_t *cls = arch_get_irn_reg_class(env->cg->arch_env, in_keep[i], -1); + be_node_set_reg_class(keep, i, cls); + } + + /* kill the old keep */ + bad = get_irg_bad(env->irg); + for (i = 0; i < keep_arity - 1; i++) + set_irn_n(p, i, bad); + remove_End_keepalive(get_irg_end(env->irg), p); + } + + return NULL; +} + +/** + * In case SSE is used we need to copy the result from XMM0 to FPU TOS before return. + */ +static ir_node *gen_be_Return(ia32_transform_env_t *env) { + ir_node *ret_val = get_irn_n(env->irn, be_pos_Return_val); + ir_node *ret_mem = get_irn_n(env->irn, be_pos_Return_mem); + entity *ent = get_irg_entity(get_irn_irg(ret_val)); + ir_type *tp = get_entity_type(ent); + + if (be_Return_get_n_rets(env->irn) < 1 || ! ret_val || ! USE_SSE2(env->cg)) + return NULL; + + if (get_method_n_ress(tp) == 1) { + ir_type *res_type = get_method_res_type(tp, 0); + ir_mode *mode; + + if (is_Primitive_type(res_type)) { + mode = get_type_mode(res_type); + if (mode_is_float(mode)) { + ir_node *frame; + entity *ent; + ir_node *sse_store, *fld, *mproj, *barrier; + int pn_ret_val = get_Proj_proj(ret_val); + int pn_ret_mem = get_Proj_proj(ret_mem); + + /* get the Barrier */ + barrier = get_Proj_pred(ret_val); + + /* get result input of the Barrier */ + ret_val = get_irn_n(barrier, pn_ret_val); + + /* get memory input of the Barrier */ + ret_mem = get_irn_n(barrier, pn_ret_mem); + + frame = get_irg_frame(env->irg); + ent = frame_alloc_area(get_irg_frame_type(env->irg), get_mode_size_bytes(mode), 16, 0); + + /* store xmm0 onto stack */ + sse_store = new_rd_ia32_xStoreSimple(env->dbg, env->irg, env->block, frame, ret_val, ret_mem); + set_ia32_ls_mode(sse_store, mode); + set_ia32_op_type(sse_store, ia32_AddrModeD); + set_ia32_use_frame(sse_store); + set_ia32_frame_ent(sse_store, ent); + set_ia32_am_flavour(sse_store, ia32_B); + set_ia32_am_support(sse_store, ia32_am_Dest); + sse_store = new_r_Proj(env->irg, env->block, sse_store, mode_M, pn_ia32_xStore_M); + + /* load into st0 */ + fld = new_rd_ia32_SetST0(env->dbg, env->irg, env->block, frame, sse_store); + set_ia32_ls_mode(fld, mode); + set_ia32_op_type(fld, ia32_AddrModeS); + set_ia32_use_frame(fld); + set_ia32_frame_ent(fld, ent); + set_ia32_am_flavour(fld, ia32_B); + set_ia32_am_support(fld, ia32_am_Source); + mproj = new_r_Proj(env->irg, env->block, fld, mode_M, pn_ia32_SetST0_M); + fld = new_r_Proj(env->irg, env->block, fld, mode, pn_ia32_SetST0_res); + arch_set_irn_register(env->cg->arch_env, fld, &ia32_vfp_regs[REG_VF0]); + + /* set new return value */ + set_irn_n(barrier, pn_ret_val, fld); + set_irn_n(barrier, pn_ret_mem, mproj); + } + } + } + + return NULL; +} + +/** + * Transform a be_AddSP into an ia32_AddSP. Eat up const sizes. + */ +static ir_node *gen_be_AddSP(ia32_transform_env_t *env) { + ir_node *new_op; + const ir_edge_t *edge; + ir_node *sz = get_irn_n(env->irn, be_pos_AddSP_size); + ir_node *sp = get_irn_n(env->irn, be_pos_AddSP_old_sp); + + new_op = new_rd_ia32_AddSP(env->dbg, env->irg, env->block, sp, sz); + + if (is_ia32_Const(sz)) { + set_ia32_Immop_attr(new_op, sz); + set_irn_n(new_op, 1, ia32_new_NoReg_gp(env->cg)); + } + else if (is_ia32_Load(sz) && get_ia32_am_flavour(sz) == ia32_O) { + set_ia32_immop_type(new_op, ia32_ImmSymConst); + set_ia32_op_type(new_op, ia32_AddrModeS); + set_ia32_am_sc(new_op, get_ia32_am_sc(sz)); + add_ia32_am_offs(new_op, get_ia32_am_offs(sz)); + set_irn_n(new_op, 1, ia32_new_NoReg_gp(env->cg)); + } + + /* fix proj nums */ + foreach_out_edge(env->irn, edge) { + ir_node *proj = get_edge_src_irn(edge); + + assert(is_Proj(proj)); + + if (get_Proj_proj(proj) == pn_be_AddSP_res) { + /* the node is not yet exchanged: we need to set the register manually */ + ia32_attr_t *attr = get_ia32_attr(new_op); + attr->slots[pn_ia32_AddSP_stack] = &ia32_gp_regs[REG_ESP]; + set_Proj_proj(proj, pn_ia32_AddSP_stack); + } + else if (get_Proj_proj(proj) == pn_be_AddSP_M) { + set_Proj_proj(proj, pn_ia32_AddSP_M); + } + else { + assert(0); + } + } + + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + + return new_op; +} + +/** + * Transform a be_SubSP into an ia32_SubSP. Eat up const sizes. + */ +static ir_node *gen_be_SubSP(ia32_transform_env_t *env) { + ir_node *new_op; + const ir_edge_t *edge; + ir_node *sz = get_irn_n(env->irn, be_pos_SubSP_size); + ir_node *sp = get_irn_n(env->irn, be_pos_SubSP_old_sp); + + new_op = new_rd_ia32_SubSP(env->dbg, env->irg, env->block, sp, sz); + + if (is_ia32_Const(sz)) { + set_ia32_Immop_attr(new_op, sz); + set_irn_n(new_op, 1, ia32_new_NoReg_gp(env->cg)); + } + else if (is_ia32_Load(sz) && get_ia32_am_flavour(sz) == ia32_O) { + set_ia32_immop_type(new_op, ia32_ImmSymConst); + set_ia32_op_type(new_op, ia32_AddrModeS); + set_ia32_am_sc(new_op, get_ia32_am_sc(sz)); + add_ia32_am_offs(new_op, get_ia32_am_offs(sz)); + set_irn_n(new_op, 1, ia32_new_NoReg_gp(env->cg)); + } + + /* fix proj nums */ + foreach_out_edge(env->irn, edge) { + ir_node *proj = get_edge_src_irn(edge); + + assert(is_Proj(proj)); + + if (get_Proj_proj(proj) == pn_be_SubSP_res) { + /* the node is not yet exchanged: we need to set the register manually */ + ia32_attr_t *attr = get_ia32_attr(new_op); + attr->slots[pn_ia32_SubSP_stack] = &ia32_gp_regs[REG_ESP]; + set_Proj_proj(proj, pn_ia32_SubSP_stack); + } + else if (get_Proj_proj(proj) == pn_be_SubSP_M) { + set_Proj_proj(proj, pn_ia32_SubSP_M); + } + else { + assert(0); + } + } + + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env->cg, env->irn)); + + return new_op; +} + /** * This function just sets the register for the Unknown node * as this is not done during register allocation because Unknown @@ -2240,7 +2670,7 @@ static ir_node *gen_Unknown(ia32_transform_env_t *env) { else arch_set_irn_register(env->cg->arch_env, irn, &ia32_vfp_regs[REG_VFP_UKNWN]); } - else if (mode_is_int(mode) || mode_is_reference(mode)) { + else if (mode_is_int(mode) || mode_is_reference(mode) || mode_is_character(mode)) { arch_set_irn_register(env->cg->arch_env, irn, &ia32_gp_regs[REG_GP_UKNWN]); } else { @@ -2289,9 +2719,10 @@ static ir_node *gen_lowered_Load(ia32_transform_env_t *env, construct_load_func FORCE_x87(env->cg); } - new_op = func(env->dbg, env->irg, env->block, get_irn_n(node, 0), noreg, get_irn_n(node, 1)); + new_op = func(env->dbg, env->irg, env->block, get_irn_n(node, 0), noreg, get_irn_n(node, 1)); + am_offs = get_ia32_am_offs(node); - if (am_offs = get_ia32_am_offs(node)) { + if (am_offs) { am_flav |= ia32_O; add_ia32_am_offs(new_op, am_offs); } @@ -2331,7 +2762,7 @@ static ir_node *gen_lowered_Store(ia32_transform_env_t *env, construct_store_fun new_op = func(env->dbg, env->irg, env->block, get_irn_n(node, 0), noreg, get_irn_n(node, 1), get_irn_n(node, 2)); - if (am_offs = get_ia32_am_offs(node)) { + if ((am_offs = get_ia32_am_offs(node)) != NULL) { am_flav |= ia32_O; add_ia32_am_offs(new_op, am_offs); } @@ -2420,7 +2851,26 @@ static ir_node *gen_ia32_l_MulS(ia32_transform_env_t *env) { /* and then skip the result Proj, because all needed Projs are already there. */ ir_node *new_op = gen_binop(env, get_binop_left(env->irn), get_binop_right(env->irn), new_rd_ia32_MulS); - return get_Proj_pred(new_op); + ir_node *muls = get_Proj_pred(new_op); + ir_node *proj; + + /* MulS cannot have AM for destination */ + if (get_ia32_am_support(muls) != ia32_am_None) + set_ia32_am_support(muls, ia32_am_Source); + + /* check if EAX and EDX proj exist, add missing one */ + proj = get_proj_for_pn(env->irn, pn_ia32_MulS_EAX); + if (! proj) { + proj = new_r_Proj(env->irg, env->block, muls, get_ia32_res_mode(env->irn), pn_ia32_MulS_EAX); + be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], env->irg, env->block, 1, &proj); + } + proj = get_proj_for_pn(env->irn, pn_ia32_MulS_EDX); + if (! proj) { + proj = new_r_Proj(env->irg, env->block, muls, get_ia32_res_mode(env->irn), pn_ia32_MulS_EDX); + be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], env->irg, env->block, 1, &proj); + } + + return muls; } GEN_LOWERED_SHIFT_OP(Shl) @@ -2600,197 +3050,6 @@ static ir_node *gen_ia32_l_SSEtoX87(ia32_transform_env_t *env) { * *********************************************************/ -/** - * Transforms a Sub or xSub into Neg--Add iff OUT_REG == SRC2_REG. - * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION. - */ -void ia32_transform_sub_to_neg_add(ir_node *irn, ia32_code_gen_t *cg) { - ia32_transform_env_t tenv; - ir_node *in1, *in2, *noreg, *nomem, *res; - const arch_register_t *in1_reg, *in2_reg, *out_reg, **slots; - - /* Return if AM node or not a Sub or xSub */ - if (get_ia32_op_type(irn) != ia32_Normal || !(is_ia32_Sub(irn) || is_ia32_xSub(irn))) - return; - - noreg = ia32_new_NoReg_gp(cg); - nomem = new_rd_NoMem(cg->irg); - in1 = get_irn_n(irn, 2); - in2 = get_irn_n(irn, 3); - in1_reg = arch_get_irn_register(cg->arch_env, in1); - in2_reg = arch_get_irn_register(cg->arch_env, in2); - out_reg = get_ia32_out_reg(irn, 0); - - tenv.block = get_nodes_block(irn); - tenv.dbg = get_irn_dbg_info(irn); - tenv.irg = cg->irg; - tenv.irn = irn; - tenv.mode = get_ia32_res_mode(irn); - tenv.cg = cg; - DEBUG_ONLY(tenv.mod = cg->mod;) - - /* in case of sub and OUT == SRC2 we can transform the sequence into neg src2 -- add */ - if (REGS_ARE_EQUAL(out_reg, in2_reg)) { - /* generate the neg src2 */ - res = gen_Minus_ex(&tenv, in2); - arch_set_irn_register(cg->arch_env, res, in2_reg); - - /* add to schedule */ - sched_add_before(irn, res); - - /* generate the add */ - if (mode_is_float(tenv.mode)) { - res = new_rd_ia32_xAdd(tenv.dbg, tenv.irg, tenv.block, noreg, noreg, res, in1, nomem); - set_ia32_am_support(res, ia32_am_Source); - } - else { - res = new_rd_ia32_Add(tenv.dbg, tenv.irg, tenv.block, noreg, noreg, res, in1, nomem); - set_ia32_am_support(res, ia32_am_Full); - set_ia32_commutative(res); - } - set_ia32_res_mode(res, tenv.mode); - - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(tenv.cg, irn)); - /* copy register */ - slots = get_ia32_slots(res); - slots[0] = in2_reg; - - /* add to schedule */ - sched_add_before(irn, res); - - /* remove the old sub */ - sched_remove(irn); - - DBG_OPT_SUB2NEGADD(irn, res); - - /* exchange the add and the sub */ - exchange(irn, res); - } -} - -/** - * Transforms a LEA into an Add if possible - * THIS FUNCTIONS MUST BE CALLED AFTER REGISTER ALLOCATION. - */ -void ia32_transform_lea_to_add(ir_node *irn, ia32_code_gen_t *cg) { - ia32_am_flavour_t am_flav; - int imm = 0; - ir_node *res = NULL; - ir_node *nomem, *noreg, *base, *index, *op1, *op2; - char *offs; - ia32_transform_env_t tenv; - const arch_register_t *out_reg, *base_reg, *index_reg; - - /* must be a LEA */ - if (! is_ia32_Lea(irn)) - return; - - am_flav = get_ia32_am_flavour(irn); - - if (get_ia32_am_sc(irn)) - return; - - /* only some LEAs can be transformed to an Add */ - if (am_flav != ia32_am_B && am_flav != ia32_am_OB && am_flav != ia32_am_OI && am_flav != ia32_am_BI) - return; - - noreg = ia32_new_NoReg_gp(cg); - nomem = new_rd_NoMem(cg->irg); - op1 = noreg; - op2 = noreg; - base = get_irn_n(irn, 0); - index = get_irn_n(irn,1); - - offs = get_ia32_am_offs(irn); - - /* offset has a explicit sign -> we need to skip + */ - if (offs && offs[0] == '+') - offs++; - - out_reg = arch_get_irn_register(cg->arch_env, irn); - base_reg = arch_get_irn_register(cg->arch_env, base); - index_reg = arch_get_irn_register(cg->arch_env, index); - - tenv.block = get_nodes_block(irn); - tenv.dbg = get_irn_dbg_info(irn); - tenv.irg = cg->irg; - tenv.irn = irn; - DEBUG_ONLY(tenv.mod = cg->mod;) - tenv.mode = get_irn_mode(irn); - tenv.cg = cg; - - switch(get_ia32_am_flavour(irn)) { - case ia32_am_B: - /* out register must be same as base register */ - if (! REGS_ARE_EQUAL(out_reg, base_reg)) - return; - - op1 = base; - break; - case ia32_am_OB: - /* out register must be same as base register */ - if (! REGS_ARE_EQUAL(out_reg, base_reg)) - return; - - op1 = base; - imm = 1; - break; - case ia32_am_OI: - /* out register must be same as index register */ - if (! REGS_ARE_EQUAL(out_reg, index_reg)) - return; - - op1 = index; - imm = 1; - break; - case ia32_am_BI: - /* out register must be same as one in register */ - if (REGS_ARE_EQUAL(out_reg, base_reg)) { - op1 = base; - op2 = index; - } - else if (REGS_ARE_EQUAL(out_reg, index_reg)) { - op1 = index; - op2 = base; - } - else { - /* in registers a different from out -> no Add possible */ - return; - } - default: - break; - } - - res = new_rd_ia32_Add(tenv.dbg, tenv.irg, tenv.block, noreg, noreg, op1, op2, nomem); - arch_set_irn_register(cg->arch_env, res, out_reg); - set_ia32_op_type(res, ia32_Normal); - set_ia32_commutative(res); - set_ia32_res_mode(res, tenv.mode); - - if (imm) { - set_ia32_cnst(res, offs); - set_ia32_immop_type(res, ia32_ImmConst); - } - - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, irn)); - - /* add Add to schedule */ - sched_add_before(irn, res); - - DBG_OPT_LEA2ADD(irn, res); - - res = new_rd_Proj(tenv.dbg, tenv.irg, tenv.block, res, tenv.mode, pn_ia32_Add_res); - - /* add result Proj to schedule */ - sched_add_before(irn, res); - - /* remove the old LEA */ - sched_remove(irn); - - /* exchange the Add and the LEA */ - exchange(irn, res); -} - /** * the BAD transformer. */ @@ -2886,6 +3145,7 @@ void ia32_register_transformers(void) { IGN(SymConst); IGN(Sync); + /* we should never see these nodes */ BAD(Raise); BAD(Sel); BAD(InstOf); @@ -2900,10 +3160,15 @@ void ia32_register_transformers(void) { BAD(EndReg); BAD(EndExcept); + /* handle generic backend nodes */ GEN(be_FrameAddr); + GEN(be_Call); + GEN(be_Return); GEN(be_FrameLoad); GEN(be_FrameStore); GEN(be_StackParam); + GEN(be_AddSP); + GEN(be_SubSP); /* set the register for all Unknown nodes */ GEN(Unknown); @@ -2952,12 +3217,12 @@ void ia32_transform_node(ir_node *node, void *env) { ia32_transform_env_t tenv; transform_func *transform = (transform_func *)op->ops.generic; - tenv.block = get_nodes_block(node); - tenv.dbg = get_irn_dbg_info(node); - tenv.irg = current_ir_graph; - tenv.irn = node; - tenv.mode = get_irn_mode(node); - tenv.cg = cg; + tenv.block = get_nodes_block(node); + tenv.dbg = get_irn_dbg_info(node); + tenv.irg = current_ir_graph; + tenv.irn = node; + tenv.mode = get_irn_mode(node); + tenv.cg = cg; DEBUG_ONLY(tenv.mod = cg->mod;) asm_node = (*transform)(&tenv); @@ -3008,11 +3273,16 @@ static void transform_psi_cond(ir_node *cond, ir_mode *mode, ia32_code_gen_t *cg /* Psi is float, we need a floating point compare */ if (USE_SSE2(cg)) { + ir_mode *m = get_irn_mode(cmp_a); /* SSE FPU */ - if (! mode_is_float(get_irn_mode(cmp_a))) { + if (! mode_is_float(m)) { cmp_a = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_a, cmp_a, mode); cmp_b = gen_sse_conv_int2float(cg, dbg, irg, block, cmp_b, cmp_b, mode); - pnc |= 8; + } + else if (m == mode_F) { + /* we convert cmp values always to double, to get correct bitmask with cmpsd */ + cmp_a = gen_sse_conv_f2d(cg, dbg, irg, block, cmp_a, cmp_a); + cmp_b = gen_sse_conv_f2d(cg, dbg, irg, block, cmp_b, cmp_b); } new_op = new_rd_ia32_xCmp(dbg, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); @@ -3079,7 +3349,7 @@ static void transform_psi_cond(ir_node *cond, ir_mode *mode, ia32_code_gen_t *cg * "And"s and "Or"s are transformed later, we just have to set their mode right. */ void ia32_transform_psi_cond_tree(ir_node *node, void *env) { - ia32_code_gen_t *cg = (ia32_code_gen_t *)env; + ia32_code_gen_t *cg = env; ir_node *psi_sel, *new_cmp, *block; ir_graph *irg; ir_mode *mode; @@ -3094,7 +3364,8 @@ void ia32_transform_psi_cond_tree(ir_node *node, void *env) { if (is_Proj(psi_sel)) return; - mode = get_irn_mode(node); + //mode = get_irn_mode(node); + mode = mode_Iu; transform_psi_cond(psi_sel, mode, cg); @@ -3102,12 +3373,17 @@ void ia32_transform_psi_cond_tree(ir_node *node, void *env) { block = get_nodes_block(node); /* we need to compare the evaluated condition tree with 0 */ - - /* BEWARE: new_r_Const_long works for floating point as well */ - new_cmp = new_r_Cmp(irg, block, psi_sel, new_r_Const_long(irg, block, mode, 0)); - /* transform the const */ - ia32_place_consts_set_modes(new_cmp, cg); - new_cmp = new_r_Proj(irg, block, new_cmp, mode_b, pn_Cmp_Ne + (mode_is_float(mode) ? pn_Cmp_Uo : 0)); + mode = get_irn_mode(node); + if (mode_is_float(mode)) { + psi_sel = gen_sse_conv_int2float(cg, NULL, irg, block, psi_sel, NULL, mode); + /* BEWARE: new_r_Const_long works for floating point as well */ + new_cmp = new_r_Cmp(irg, block, psi_sel, new_r_Const_long(irg, block, mode, 0)); + new_cmp = new_r_Proj(irg, block, new_cmp, mode_b, pn_Cmp_Ne); + } + else { + new_cmp = new_r_Cmp(irg, block, psi_sel, new_r_Const_long(irg, block, mode_Iu, 0)); + new_cmp = new_r_Proj(irg, block, new_cmp, mode_b, pn_Cmp_Gt | pn_Cmp_Lt); + } set_Psi_cond(node, 0, new_cmp); }