X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fia32_transform.c;h=e346632e90b212431570b7a16a728c87d9d352f3;hb=a70660f699871be4a0af96eb09683bfbf17bda0a;hp=f9f38462366eaafd1bd5387b409e7221d2a5cc1f;hpb=aef4d3b28b21856e05c0bd91552b51b69ed5ac50;p=libfirm diff --git a/ir/be/ia32/ia32_transform.c b/ir/be/ia32/ia32_transform.c index f9f384623..e346632e9 100644 --- a/ir/be/ia32/ia32_transform.c +++ b/ir/be/ia32/ia32_transform.c @@ -19,7 +19,8 @@ /** * @file - * @brief This file implements the IR transformation from firm into ia32-Firm. + * @brief This file implements the IR transformation from firm into + * ia32-Firm. * @author Christian Wuerdig, Matthias Braun * @version $Id$ */ @@ -41,7 +42,6 @@ #include "irvrfy.h" #include "ircons.h" #include "irgwalk.h" -#include "dbginfo.h" #include "irprintf.h" #include "debug.h" #include "irdom.h" @@ -86,8 +86,9 @@ DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;) -/** holdd the current code generator during transformation */ -static ia32_code_gen_t *env_cg; +/** hold the current code generator during transformation */ +static ia32_code_gen_t *env_cg = NULL; +static ir_node *initial_fpcw = NULL; extern ir_op *get_op_Mulh(void); @@ -116,13 +117,20 @@ typedef ir_node *construct_unop_func(dbg_info *db, ir_graph *irg, static ir_node *try_create_Immediate(ir_node *node, char immediate_constraint_type); +static ir_node *create_immediate_or_transform(ir_node *node, + char immediate_constraint_type); + +static ir_node *create_I2I_Conv(ir_mode *src_mode, ir_mode *tgt_mode, + dbg_info *dbgi, ir_node *new_block, + ir_node *new_op); + /** * Return true if a mode can be stored in the GP register set */ static INLINE int mode_needs_gp_reg(ir_mode *mode) { if(mode == mode_fpcw) return 0; - return mode_is_int(mode) || mode_is_character(mode) || mode_is_reference(mode); + return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b; } /** @@ -262,10 +270,11 @@ static int is_Const_1(ir_node *node) { * Transforms a Const. */ static ir_node *gen_Const(ir_node *node) { - ir_graph *irg = current_ir_graph; - ir_node *block = be_transform_node(get_nodes_block(node)); - dbg_info *dbgi = get_irn_dbg_info(node); - ir_mode *mode = get_irn_mode(node); + ir_graph *irg = current_ir_graph; + ir_node *old_block = get_nodes_block(node); + ir_node *block = be_transform_node(old_block); + dbg_info *dbgi = get_irn_dbg_info(node); + ir_mode *mode = get_irn_mode(node); if (mode_is_float(mode)) { ir_node *res = NULL; @@ -274,7 +283,6 @@ static ir_node *gen_Const(ir_node *node) { ir_node *load; ir_entity *floatent; - FP_USED(env_cg); if (! USE_SSE2(env_cg)) { cnst_classify_t clss = classify_Const(node); @@ -343,13 +351,13 @@ static ir_node *gen_Const(ir_node *node) { */ static ir_node *gen_SymConst(ir_node *node) { ir_graph *irg = current_ir_graph; - ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *old_block = get_nodes_block(node); + ir_node *block = be_transform_node(old_block); dbg_info *dbgi = get_irn_dbg_info(node); ir_mode *mode = get_irn_mode(node); ir_node *cnst; if (mode_is_float(mode)) { - FP_USED(env_cg); if (USE_SSE2(env_cg)) cnst = new_rd_ia32_xConst(dbgi, irg, block); else @@ -495,33 +503,16 @@ static ir_node *gen_binop(ir_node *node, ir_node *op1, ir_node *op2, construct_binop_func *func, int commutative) { ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *new_op1 = NULL; - ir_node *new_op2 = NULL; - ir_node *new_node = NULL; ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); ir_node *nomem = new_NoMem(); + ir_node *new_node; - if(commutative) { - new_op2 = try_create_Immediate(op1, 0); - if(new_op2 != NULL) { - new_op1 = be_transform_node(op2); - commutative = 0; - } - } - - if(new_op2 == NULL) { - new_op2 = try_create_Immediate(op2, 0); - if(new_op2 != NULL) { - new_op1 = be_transform_node(op1); - commutative = 0; - } - } - - if(new_op2 == NULL) { - new_op1 = be_transform_node(op1); - new_op2 = be_transform_node(op2); + ir_node *new_op1 = be_transform_node(op1); + ir_node *new_op2 = create_immediate_or_transform(op2, 0); + if (is_ia32_Immediate(new_op2)) { + commutative = 0; } new_node = func(dbgi, irg, block, noreg_gp, noreg_gp, new_op1, new_op2, nomem); @@ -566,15 +557,26 @@ static ir_node *gen_binop_sse_float(ir_node *node, ir_node *op1, ir_node *op2, if (is_op_commutative(get_irn_op(node))) { set_ia32_commutative(new_node); } - if (USE_SSE2(env_cg)) { - set_ia32_ls_mode(new_node, mode); - } + set_ia32_ls_mode(new_node, mode); SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); return new_node; } +static ir_node *get_fpcw(void) +{ + ir_node *fpcw; + if(initial_fpcw != NULL) + return initial_fpcw; + + fpcw = be_abi_get_ignore_irn(env_cg->birg->abi, + &ia32_fp_cw_regs[REG_FPCW]); + initial_fpcw = be_transform_node(fpcw); + + return initial_fpcw; +} + /** * Construct a standard binary operation, set AM and immediate if required. * @@ -592,21 +594,15 @@ static ir_node *gen_binop_x87_float(ir_node *node, ir_node *op1, ir_node *op2, ir_node *new_node = NULL; dbg_info *dbgi = get_irn_dbg_info(node); ir_graph *irg = current_ir_graph; - ir_mode *mode = get_irn_mode(node); ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); ir_node *nomem = new_NoMem(); - ir_node *fpcw = be_abi_get_ignore_irn(env_cg->birg->abi, - &ia32_fp_cw_regs[REG_FPCW]); new_node = func(dbgi, irg, block, noreg_gp, noreg_gp, new_op1, new_op2, - nomem, fpcw); + nomem, get_fpcw()); set_ia32_am_support(new_node, ia32_am_Source, ia32_am_binary); if (is_op_commutative(get_irn_op(node))) { set_ia32_commutative(new_node); } - if (USE_SSE2(env_cg)) { - set_ia32_ls_mode(new_node, mode); - } SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); @@ -636,10 +632,7 @@ static ir_node *gen_shift_binop(ir_node *node, ir_node *op1, ir_node *op2, assert(! mode_is_float(get_irn_mode(node)) && "Shift/Rotate with float not supported"); - new_op2 = try_create_Immediate(op2, 'N'); - if(new_op2 == NULL) { - new_op2 = be_transform_node(op2); - } + new_op2 = create_immediate_or_transform(op2, 'N'); new_op = func(dbgi, irg, block, noreg, noreg, new_op1, new_op2, nomem); @@ -650,6 +643,13 @@ static ir_node *gen_shift_binop(ir_node *node, ir_node *op1, ir_node *op2, set_ia32_emit_cl(new_op); + /* lowered shift instruction may have a dependency operand, handle it here */ + if (get_irn_arity(node) == 3) { + /* we have a dependency */ + ir_node *new_dep = be_transform_node(get_irn_n(node, 2)); + add_irn_dep(new_op, new_dep); + } + return new_op; } @@ -707,7 +707,6 @@ static ir_node *gen_Add(ir_node *node) { assert((expr_op || imm_op) && "invalid operands"); if (mode_is_float(mode)) { - FP_USED(env_cg); if (USE_SSE2(env_cg)) return gen_binop_sse_float(node, op1, op2, new_rd_ia32_xAdd); else @@ -810,36 +809,6 @@ static ir_node *gen_Add(ir_node *node) { return new_op; } -#if 0 -static ir_node *create_ia32_Mul(ir_node *node) { - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *op1 = get_Mul_left(node); - ir_node *op2 = get_Mul_right(node); - ir_node *new_op1 = be_transform_node(op1); - ir_node *new_op2 = be_transform_node(op2); - ir_node *noreg = ia32_new_NoReg_gp(env_cg); - ir_node *proj_EAX, *proj_EDX, *res; - ir_node *in[1]; - - res = new_rd_ia32_Mul(dbgi, irg, block, noreg, noreg, new_op1, new_op2, new_NoMem()); - set_ia32_commutative(res); - set_ia32_am_support(res, ia32_am_Source | ia32_am_binary); - - /* imediates are not supported, so no fold_immediate */ - proj_EAX = new_rd_Proj(dbgi, irg, block, res, mode_Iu, pn_EAX); - proj_EDX = new_rd_Proj(dbgi, irg, block, res, mode_Iu, pn_EDX); - - /* keep EAX */ - in[0] = proj_EDX; - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in); - - return proj_EAX; -} -#endif /* if 0 */ - - /** * Creates an ia32 Mul. * @@ -851,7 +820,6 @@ static ir_node *gen_Mul(ir_node *node) { ir_mode *mode = get_irn_mode(node); if (mode_is_float(mode)) { - FP_USED(env_cg); if (USE_SSE2(env_cg)) return gen_binop_sse_float(node, op1, op2, new_rd_ia32_xMul); else @@ -883,26 +851,22 @@ static ir_node *gen_Mulh(ir_node *node) { dbg_info *dbgi = get_irn_dbg_info(node); ir_node *noreg = ia32_new_NoReg_gp(env_cg); ir_mode *mode = get_irn_mode(node); - ir_node *proj_EAX, *proj_EDX, *res; - ir_node *in[1]; + ir_node *proj_EDX, *res; assert(!mode_is_float(mode) && "Mulh with float not supported"); if (mode_is_signed(mode)) { - res = new_rd_ia32_IMul1OP(dbgi, irg, block, noreg, noreg, new_op1, new_op2, new_NoMem()); + res = new_rd_ia32_IMul1OP(dbgi, irg, block, noreg, noreg, new_op1, + new_op2, new_NoMem()); } else { - res = new_rd_ia32_Mul(dbgi, irg, block, noreg, noreg, new_op1, new_op2, new_NoMem()); + res = new_rd_ia32_Mul(dbgi, irg, block, noreg, noreg, new_op1, new_op2, + new_NoMem()); } set_ia32_commutative(res); set_ia32_am_support(res, ia32_am_Source, ia32_am_binary); - proj_EAX = new_rd_Proj(dbgi, irg, block, res, mode_Iu, pn_EAX); proj_EDX = new_rd_Proj(dbgi, irg, block, res, mode_Iu, pn_EDX); - /* keep EAX */ - in[0] = proj_EAX; - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in); - return proj_EDX; } @@ -916,8 +880,33 @@ static ir_node *gen_Mulh(ir_node *node) { static ir_node *gen_And(ir_node *node) { ir_node *op1 = get_And_left(node); ir_node *op2 = get_And_right(node); + assert(! mode_is_float(get_irn_mode(node))); + + /* check for zero extension first */ + if (is_Const(op2)) { + tarval *tv = get_Const_tarval(op2); + long v = get_tarval_long(tv); + + if (v == 0xFF || v == 0xFFFF) { + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *new_op = be_transform_node(op1); + ir_mode *src_mode; + ir_node *res; + + if(v == 0xFF) { + src_mode = mode_Bu; + } else { + assert(v == 0xFFFF); + src_mode = mode_Hu; + } + res = create_I2I_Conv(src_mode, mode_Iu, dbgi, block, new_op); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + + return res; + } + } - assert (! mode_is_float(get_irn_mode(node))); return gen_binop(node, op1, op2, new_rd_ia32_And, 1); } @@ -952,86 +941,6 @@ static ir_node *gen_Eor(ir_node *node) { } - -/** - * Creates an ia32 Max. - * - * @return the created ia32 Max node - */ -static ir_node *gen_Max(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *op1 = get_irn_n(node, 0); - ir_node *new_op1 = be_transform_node(op1); - ir_node *op2 = get_irn_n(node, 1); - ir_node *new_op2 = be_transform_node(op2); - ir_graph *irg = current_ir_graph; - ir_mode *mode = get_irn_mode(node); - dbg_info *dbgi = get_irn_dbg_info(node); - ir_mode *op_mode = get_irn_mode(op1); - ir_node *new_op; - - assert(get_mode_size_bits(mode) == 32); - - if (mode_is_float(mode)) { - FP_USED(env_cg); - if (USE_SSE2(env_cg)) { - new_op = gen_binop_sse_float(node, new_op1, new_op2, new_rd_ia32_xMax); - } else { - panic("Can't create Max node"); - } - } else { - long pnc = pn_Cmp_Gt; - if (! mode_is_signed(op_mode)) { - pnc |= ia32_pn_Cmp_Unsigned; - } - new_op = new_rd_ia32_CmpCMov(dbgi, irg, block, new_op1, new_op2, - new_op1, new_op2, pnc); - } - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); - - return new_op; -} - -/** - * Creates an ia32 Min. - * - * @return the created ia32 Min node - */ -static ir_node *gen_Min(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *op1 = get_irn_n(node, 0); - ir_node *new_op1 = be_transform_node(op1); - ir_node *op2 = get_irn_n(node, 1); - ir_node *new_op2 = be_transform_node(op2); - ir_graph *irg = current_ir_graph; - ir_mode *mode = get_irn_mode(node); - dbg_info *dbgi = get_irn_dbg_info(node); - ir_mode *op_mode = get_irn_mode(op1); - ir_node *new_op; - - assert(get_mode_size_bits(mode) == 32); - - if (mode_is_float(mode)) { - FP_USED(env_cg); - if (USE_SSE2(env_cg)) { - new_op = gen_binop_sse_float(node, op1, op2, new_rd_ia32_xMin); - } else { - panic("can't create Min node"); - } - } else { - long pnc = pn_Cmp_Lt; - if (! mode_is_signed(op_mode)) { - pnc |= ia32_pn_Cmp_Unsigned; - } - new_op = new_rd_ia32_CmpCMov(dbgi, irg, block, new_op1, new_op2, - new_op1, new_op2, pnc); - } - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); - - return new_op; -} - - /** * Creates an ia32 Sub. * @@ -1059,7 +968,6 @@ static ir_node *gen_Sub(ir_node *node) { assert((expr_op || imm_op) && "invalid operands"); if (mode_is_float(mode)) { - FP_USED(env_cg); if (USE_SSE2(env_cg)) return gen_binop_sse_float(node, op1, op2, new_rd_ia32_xSub); else @@ -1184,10 +1092,9 @@ static ir_node *generate_DivMod(ir_node *node, ir_node *dividend, ir_node *noreg = ia32_new_NoReg_gp(env_cg); ir_node *res, *proj_div, *proj_mod; ir_node *sign_extension; - ir_node *in_keep[2]; ir_node *mem, *new_mem; ir_node *projs[pn_DivMod_max]; - int i, has_exc; + int has_exc; ia32_collect_Projs(node, projs, pn_DivMod_max); @@ -1220,7 +1127,10 @@ static ir_node *generate_DivMod(ir_node *node, ir_node *dividend, if (mode_is_signed(mode)) { /* in signed mode, we need to sign extend the dividend */ - sign_extension = new_rd_ia32_Cltd(dbgi, irg, block, new_dividend); + ir_node *produceval = new_rd_ia32_ProduceVal(dbgi, irg, block); + add_irn_dep(produceval, get_irg_frame(irg)); + sign_extension = new_rd_ia32_Cltd(dbgi, irg, block, new_dividend, + produceval); } else { sign_extension = new_rd_ia32_Const(dbgi, irg, block); set_ia32_Immop_tarval(sign_extension, get_tarval_null(mode_Iu)); @@ -1238,27 +1148,7 @@ static ir_node *generate_DivMod(ir_node *node, ir_node *dividend, set_ia32_exc_label(res, has_exc); set_irn_pinned(res, get_irn_pinned(node)); - - /* Matze: code can't handle this at the moment... */ -#if 0 - /* set AM support */ set_ia32_am_support(res, ia32_am_Source, ia32_am_binary); -#endif - - /* check, which Proj-Keep, we need to add */ - i = 0; - if (proj_div == NULL) { - /* We have only mod result: add div res Proj-Keep */ - in_keep[i] = new_rd_Proj(dbgi, irg, block, res, mode_Iu, pn_ia32_Div_div_res); - ++i; - } - if (proj_mod == NULL) { - /* We have only div result: add mod res Proj-Keep */ - in_keep[i] = new_rd_Proj(dbgi, irg, block, res, mode_Iu, pn_ia32_Div_mod_res); - ++i; - } - if(i > 0) - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, i, in_keep); SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); @@ -1311,7 +1201,6 @@ static ir_node *gen_Quot(ir_node *node) { ir_node *nomem = new_rd_NoMem(current_ir_graph); ir_node *new_op; - FP_USED(env_cg); if (USE_SSE2(env_cg)) { ir_mode *mode = get_irn_mode(op1); if (is_ia32_xConst(new_op2)) { @@ -1321,16 +1210,14 @@ static ir_node *gen_Quot(ir_node *node) { } else { new_op = new_rd_ia32_xDiv(dbgi, irg, block, noreg, noreg, new_op1, new_op2, nomem); // Matze: disabled for now, spillslot coalescer fails - //set_ia32_am_support(new_op, ia32_am_Source | ia32_am_binary); + set_ia32_am_support(new_op, ia32_am_Source, ia32_am_binary); } set_ia32_ls_mode(new_op, mode); } else { - ir_node *fpcw = be_abi_get_ignore_irn(env_cg->birg->abi, - &ia32_fp_cw_regs[REG_FPCW]); new_op = new_rd_ia32_vfdiv(dbgi, irg, block, noreg, noreg, new_op1, - new_op2, nomem, fpcw); + new_op2, nomem, get_fpcw()); // Matze: disabled for now (spillslot coalescer fails) - //set_ia32_am_support(new_op, ia32_am_Source | ia32_am_binary); + set_ia32_am_support(new_op, ia32_am_Source, ia32_am_binary); } SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); return new_op; @@ -1369,7 +1256,8 @@ static ir_node *gen_Shr(ir_node *node) { static ir_node *gen_Shrs(ir_node *node) { ir_node *left = get_Shrs_left(node); ir_node *right = get_Shrs_right(node); - if(is_Const(right) && get_irn_mode(left) == mode_Is) { + ir_mode *mode = get_irn_mode(node); + if(is_Const(right) && mode == mode_Is) { tarval *tv = get_Const_tarval(right); long val = get_tarval_long(tv); if(val == 31) { @@ -1379,10 +1267,46 @@ static ir_node *gen_Shrs(ir_node *node) { ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *op = left; ir_node *new_op = be_transform_node(op); + ir_node *pval = new_rd_ia32_ProduceVal(dbgi, irg, block); + add_irn_dep(pval, get_irg_frame(irg)); - return new_rd_ia32_Cltd(dbgi, irg, block, new_op); + return new_rd_ia32_Cltd(dbgi, irg, block, new_op, pval); } } +#if 1 + /* 8 or 16 bit sign extension? */ + if(is_Const(right) && is_Shl(left) && mode == mode_Is) { + ir_node *shl_left = get_Shl_left(left); + ir_node *shl_right = get_Shl_right(left); + if(is_Const(shl_right)) { + tarval *tv1 = get_Const_tarval(right); + tarval *tv2 = get_Const_tarval(shl_right); + if(tv1 == tv2 && tarval_is_long(tv1)) { + long val = get_tarval_long(tv1); + if(val == 16 || val == 24) { + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *new_op = be_transform_node(shl_left); + ir_mode *src_mode; + ir_node *res; + + if(val == 24) { + src_mode = mode_Bs; + } else { + assert(val == 16); + src_mode = mode_Hs; + } + res = create_I2I_Conv(src_mode, mode_Is, dbgi, block, + new_op); + SET_IA32_ORIG_NODE(res, + ia32_get_old_node_name(env_cg, node)); + + return res; + } + } + } + } +#endif return gen_shift_binop(node, left, right, new_rd_ia32_Sar); } @@ -1478,7 +1402,6 @@ ir_node *gen_Minus_ex(ir_node *node, ir_node *op) { if (mode_is_float(mode)) { ir_node *new_op = be_transform_node(op); - FP_USED(env_cg); if (USE_SSE2(env_cg)) { ir_node *noreg_gp = ia32_new_NoReg_gp(env_cg); ir_node *noreg_fp = ia32_new_NoReg_fp(env_cg); @@ -1513,6 +1436,29 @@ static ir_node *gen_Minus(ir_node *node) { return gen_Minus_ex(node, get_Minus_op(node)); } +static ir_node *create_Immediate_from_int(int val) +{ + ir_graph *irg = current_ir_graph; + ir_node *start_block = get_irg_start_block(irg); + ir_node *immediate = new_rd_ia32_Immediate(NULL, irg, start_block, NULL, 0, val); + arch_set_irn_register(env_cg->arch_env, immediate, &ia32_gp_regs[REG_GP_NOREG]); + + return immediate; +} + +static ir_node *gen_bin_Not(ir_node *node) +{ + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *op = get_Not_op(node); + ir_node *new_op = be_transform_node(op); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_NoMem(); + ir_node *one = create_Immediate_from_int(1); + + return new_rd_ia32_Xor(dbgi, irg, block, noreg, noreg, new_op, one, nomem); +} /** * Transforms a Not node. @@ -1520,7 +1466,12 @@ static ir_node *gen_Minus(ir_node *node) { * @return The created ia32 Not node */ static ir_node *gen_Not(ir_node *node) { - ir_node *op = get_Not_op(node); + ir_node *op = get_Not_op(node); + ir_mode *mode = get_irn_mode(node); + + if(mode == mode_b) { + return gen_bin_Not(node); + } assert (! mode_is_float(get_irn_mode(node))); return gen_unop(node, op, new_rd_ia32_Not); @@ -1548,7 +1499,6 @@ static ir_node *gen_Abs(ir_node *node) { ir_entity *ent; if (mode_is_float(mode)) { - FP_USED(env_cg); if (USE_SSE2(env_cg)) { res = new_rd_ia32_xAnd(dbgi,irg, block, noreg_gp, noreg_gp, new_op, noreg_fp, nomem); @@ -1568,7 +1518,11 @@ static ir_node *gen_Abs(ir_node *node) { } } else { ir_node *xor; - ir_node *sign_extension = new_rd_ia32_Cltd(dbgi, irg, block, new_op); + ir_node *pval = new_rd_ia32_ProduceVal(dbgi, irg, block); + ir_node *sign_extension = new_rd_ia32_Cltd(dbgi, irg, block, new_op, + pval); + + add_irn_dep(pval, get_irg_frame(irg)); SET_IA32_ORIG_NODE(sign_extension, ia32_get_old_node_name(env_cg, node)); @@ -1592,7 +1546,8 @@ static ir_node *gen_Abs(ir_node *node) { * @return the created ia32 Load node */ static ir_node *gen_Load(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *old_block = get_nodes_block(node); + ir_node *block = be_transform_node(old_block); ir_node *ptr = get_Load_ptr(node); ir_node *new_ptr = be_transform_node(ptr); ir_node *mem = get_Load_mem(node); @@ -1605,11 +1560,8 @@ static ir_node *gen_Load(ir_node *node) { ir_node *lptr = new_ptr; int is_imm = 0; ir_node *new_op; - ir_node *projs[pn_Load_max]; ia32_am_flavour_t am_flav = ia32_am_B; - ia32_collect_Projs(node, projs, pn_Load_max); - /* address might be a constant (symconst or absolute address) */ if (is_ia32_Const(new_ptr)) { lptr = noreg; @@ -1617,7 +1569,6 @@ static ir_node *gen_Load(ir_node *node) { } if (mode_is_float(mode)) { - FP_USED(env_cg); if (USE_SSE2(env_cg)) { new_op = new_rd_ia32_xLoad(dbgi, irg, block, lptr, noreg, new_mem); res_mode = mode_xmm; @@ -1626,18 +1577,17 @@ static ir_node *gen_Load(ir_node *node) { res_mode = mode_vfp; } } else { - new_op = new_rd_ia32_Load(dbgi, irg, block, lptr, noreg, new_mem); - res_mode = mode_Iu; - } + if(mode == mode_b) + mode = mode_Iu; - /* - check for special case: the loaded value might not be used - */ - if (be_get_Proj_for_pn(node, pn_Load_res) == NULL) { - /* add a result proj and a Keep to produce a pseudo use */ - ir_node *proj = new_r_Proj(irg, block, new_op, mode_Iu, - pn_ia32_Load_res); - be_new_Keep(arch_get_irn_reg_class(env_cg->arch_env, proj, -1), irg, block, 1, &proj); + /* create a conv node with address mode for smaller modes */ + if(get_mode_size_bits(mode) < 32) { + new_op = new_rd_ia32_Conv_I2I(dbgi, irg, block, lptr, noreg, noreg, + new_mem, mode); + } else { + new_op = new_rd_ia32_Load(dbgi, irg, block, lptr, noreg, new_mem); + } + res_mode = mode_Iu; } /* base is a constant address */ @@ -1703,8 +1653,6 @@ static ir_node *gen_Store(ir_node *node) { } if (mode_is_float(mode)) { - FP_USED(env_cg); - new_val = be_transform_node(val); if (USE_SSE2(env_cg)) { new_op = new_rd_ia32_xStore(dbgi, irg, block, sptr, noreg, new_val, @@ -1714,10 +1662,9 @@ static ir_node *gen_Store(ir_node *node) { new_mem, mode); } } else { - new_val = try_create_Immediate(val, 0); - if(new_val == NULL) { - new_val = be_transform_node(val); - } + new_val = create_immediate_or_transform(val, 0); + if(mode == mode_b) + mode = mode_Iu; if (get_mode_size_bits(mode) == 8) { new_op = new_rd_ia32_Store8Bit(dbgi, irg, block, sptr, noreg, @@ -1753,7 +1700,109 @@ static ir_node *gen_Store(ir_node *node) { return new_op; } +static ir_node *maybe_scale_up(ir_node *new_op, ir_mode *mode, dbg_info *dbgi) +{ + ir_mode *tgt_mode; + ir_node *block; + + if(get_mode_size_bits(mode) == 32) + return new_op; + if(mode == mode_b) + return new_op; + if(is_ia32_Immediate(new_op)) + return new_op; + if(mode_is_signed(mode)) + tgt_mode = mode_Is; + else + tgt_mode = mode_Iu; + + block = get_nodes_block(new_op); + return create_I2I_Conv(mode, tgt_mode, dbgi, block, new_op); +} + +static ir_node *try_create_TestJmp(ir_node *block, dbg_info *dbgi, long pnc, + ir_node *cmp_left, ir_node *cmp_right) +{ + ir_node *new_cmp_left; + ir_node *new_cmp_right; + ir_node *and_left; + ir_node *and_right; + ir_node *res; + ir_node *noreg; + ir_node *nomem; + ir_mode *mode; + long pure_pnc = pnc & ~ia32_pn_Cmp_Unsigned; + + if(cmp_right != NULL && !is_Const_0(cmp_right)) + return NULL; + + if(is_And(cmp_left) && (pure_pnc == pn_Cmp_Eq || pure_pnc == pn_Cmp_Lg)) { + and_left = get_And_left(cmp_left); + and_right = get_And_right(cmp_left); + + mode = get_irn_mode(and_left); + new_cmp_left = be_transform_node(and_left); + new_cmp_right = create_immediate_or_transform(and_right, 0); + } else { + mode = get_irn_mode(cmp_left); + new_cmp_left = be_transform_node(cmp_left); + new_cmp_right = be_transform_node(cmp_left); + } + + assert(get_mode_size_bits(mode) <= 32); + new_cmp_left = maybe_scale_up(new_cmp_left, mode, dbgi); + new_cmp_right = maybe_scale_up(new_cmp_right, mode, dbgi); + noreg = ia32_new_NoReg_gp(env_cg); + nomem = new_NoMem(); + + res = new_rd_ia32_TestJmp(dbgi, current_ir_graph, block, noreg, noreg, + new_cmp_left, new_cmp_right, nomem, pnc); + set_ia32_am_support(res, ia32_am_Source, ia32_am_binary); + + return res; +} + +static ir_node *create_Switch(ir_node *node) +{ + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *sel = get_Cond_selector(node); + ir_node *new_sel = be_transform_node(sel); + ir_node *res; + int switch_min = INT_MAX; + const ir_edge_t *edge; + + assert(get_mode_size_bits(get_irn_mode(sel)) == 32); + + /* determine the smallest switch case value */ + foreach_out_edge(node, edge) { + ir_node *proj = get_edge_src_irn(edge); + int pn = get_Proj_proj(proj); + if(pn < switch_min) + switch_min = pn; + } + + if (switch_min != 0) { + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + + /* if smallest switch case is not 0 we need an additional sub */ + new_sel = new_rd_ia32_Lea(dbgi, irg, block, new_sel, noreg); + add_ia32_am_offs_int(new_sel, -switch_min); + set_ia32_am_flavour(new_sel, ia32_am_OB); + set_ia32_op_type(new_sel, ia32_AddrModeS); + + SET_IA32_ORIG_NODE(new_sel, ia32_get_old_node_name(env_cg, node)); + } + + res = new_rd_ia32_SwitchJmp(dbgi, irg, block, new_sel); + set_ia32_pncode(res, get_Cond_defaultProj(node)); + + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + + return res; +} /** * Transforms a Cond -> Proj[b] -> Cmp into a CondJmp, CondJmp_i or TestJmp @@ -1768,129 +1817,74 @@ static ir_node *gen_Cond(ir_node *node) { ir_mode *sel_mode = get_irn_mode(sel); ir_node *res = NULL; ir_node *noreg = ia32_new_NoReg_gp(env_cg); - ir_node *cnst, *expr; - - if (is_Proj(sel) && sel_mode == mode_b) { - ir_node *pred = get_Proj_pred(sel); - ir_node *cmp_a = get_Cmp_left(pred); - ir_node *new_cmp_a = be_transform_node(cmp_a); - ir_node *cmp_b = get_Cmp_right(pred); - ir_node *new_cmp_b = be_transform_node(cmp_b); - ir_mode *cmp_mode = get_irn_mode(cmp_a); - ir_node *nomem = new_NoMem(); - - int pnc = get_Proj_proj(sel); - if(mode_is_float(cmp_mode) || !mode_is_signed(cmp_mode)) { - pnc |= ia32_pn_Cmp_Unsigned; - } - - /* check if we can use a CondJmp with immediate */ - cnst = (env_cg->opt & IA32_OPT_IMMOPS) ? get_immediate_op(new_cmp_a, new_cmp_b) : NULL; - expr = get_expr_op(new_cmp_a, new_cmp_b); - - if (cnst != NULL && expr != NULL) { - /* immop has to be the right operand, we might need to flip pnc */ - if(cnst != new_cmp_b) { - pnc = get_inversed_pnc(pnc); - } + ir_node *cmp; + ir_node *cmp_a; + ir_node *cmp_b; + ir_node *new_cmp_a; + ir_node *new_cmp_b; + ir_mode *cmp_mode; + ir_node *nomem = new_NoMem(); + long pnc; - if ((pnc == pn_Cmp_Eq || pnc == pn_Cmp_Lg) && mode_needs_gp_reg(get_irn_mode(expr))) { - if (get_ia32_immop_type(cnst) == ia32_ImmConst && - classify_tarval(get_ia32_Immop_tarval(cnst)) == TV_CLASSIFY_NULL) - { - /* a Cmp A =/!= 0 */ - ir_node *op1 = expr; - ir_node *op2 = expr; - int is_and = 0; - - /* check, if expr is an only once used And operation */ - if (is_ia32_And(expr) && get_irn_n_edges(expr)) { - op1 = get_irn_n(expr, 2); - op2 = get_irn_n(expr, 3); - - is_and = (is_ia32_ImmConst(expr) || is_ia32_ImmSymConst(expr)); - } - res = new_rd_ia32_TestJmp(dbgi, irg, block, op1, op2); - set_ia32_pncode(res, pnc); + if (sel_mode != mode_b) { + return create_Switch(node); + } - if (is_and) { - copy_ia32_Immop_attr(res, expr); - } + if(!is_Proj(sel) || !is_Cmp(get_Proj_pred(sel))) { + /* it's some mode_b value but not a direct comparison -> create a + * testjmp */ + res = try_create_TestJmp(block, dbgi, pn_Cmp_Lg, sel, NULL); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + return res; + } - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); - return res; - } - } + cmp = get_Proj_pred(sel); + cmp_a = get_Cmp_left(cmp); + cmp_b = get_Cmp_right(cmp); + cmp_mode = get_irn_mode(cmp_a); + pnc = get_Proj_proj(sel); + if(mode_is_float(cmp_mode) || !mode_is_signed(cmp_mode)) { + pnc |= ia32_pn_Cmp_Unsigned; + } - if (mode_is_float(cmp_mode)) { - FP_USED(env_cg); - if (USE_SSE2(env_cg)) { - res = new_rd_ia32_xCondJmp(dbgi, irg, block, noreg, noreg, expr, noreg, nomem); - set_ia32_ls_mode(res, cmp_mode); - } else { - assert(0); - } - } - else { - assert(get_mode_size_bits(cmp_mode) == 32); - res = new_rd_ia32_CondJmp(dbgi, irg, block, noreg, noreg, expr, noreg, nomem); - } - copy_ia32_Immop_attr(res, cnst); - } - else { - ir_mode *cmp_mode = get_irn_mode(cmp_a); - - if (mode_is_float(cmp_mode)) { - FP_USED(env_cg); - if (USE_SSE2(env_cg)) { - res = new_rd_ia32_xCondJmp(dbgi, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); - set_ia32_ls_mode(res, cmp_mode); - } else { - ir_node *proj_eax; - res = new_rd_ia32_vfCondJmp(dbgi, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); - proj_eax = new_r_Proj(irg, block, res, mode_Iu, pn_ia32_vfCondJmp_temp_reg_eax); - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, &proj_eax); - } - } - else { - assert(get_mode_size_bits(cmp_mode) == 32); - res = new_rd_ia32_CondJmp(dbgi, irg, block, noreg, noreg, cmp_a, cmp_b, nomem); - set_ia32_commutative(res); - } + if(mode_needs_gp_reg(cmp_mode)) { + res = try_create_TestJmp(block, dbgi, pnc, cmp_a, cmp_b); + if(res != NULL) { + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + return res; } - - set_ia32_pncode(res, pnc); - // Matze: disabled for now, because the default collect_spills_walker - // is not able to detect the mode of the spilled value - // moreover, the lea optimize phase freely exchanges left/right - // without updating the pnc - //set_ia32_am_support(res, ia32_am_Source | ia32_am_binary); } - else { - /* determine the smallest switch case value */ - ir_node *new_sel = be_transform_node(sel); - int switch_min = INT_MAX; - const ir_edge_t *edge; - - foreach_out_edge(node, edge) { - int pn = get_Proj_proj(get_edge_src_irn(edge)); - switch_min = pn < switch_min ? pn : switch_min; - } - if (switch_min) { - /* if smallest switch case is not 0 we need an additional sub */ - res = new_rd_ia32_Lea(dbgi, irg, block, new_sel, noreg); - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); - add_ia32_am_offs_int(res, -switch_min); - set_ia32_am_flavour(res, ia32_am_OB); - set_ia32_op_type(res, ia32_AddrModeS); + new_cmp_a = be_transform_node(cmp_a); + new_cmp_b = create_immediate_or_transform(cmp_b, 0); + + if (mode_is_float(cmp_mode)) { + if (USE_SSE2(env_cg)) { + res = new_rd_ia32_xCondJmp(dbgi, irg, block, noreg, noreg, cmp_a, + cmp_b, nomem, pnc); + set_ia32_commutative(res); + set_ia32_am_support(res, ia32_am_Source, ia32_am_binary); + set_ia32_ls_mode(res, cmp_mode); + } else { + res = new_rd_ia32_vfCondJmp(dbgi, irg, block, cmp_a, cmp_b, pnc); + set_ia32_commutative(res); } + } else { + /** workaround smaller compare modes with converts... + * We could easily support 16bit compares, for 8 bit we have to set + * additional register constraints, which we don't do yet + */ + new_cmp_a = maybe_scale_up(new_cmp_a, cmp_mode, dbgi); + new_cmp_b = maybe_scale_up(new_cmp_b, cmp_mode, dbgi); - res = new_rd_ia32_SwitchJmp(dbgi, irg, block, switch_min ? res : new_sel, mode_T); - set_ia32_pncode(res, get_Cond_defaultProj(node)); + res = new_rd_ia32_CondJmp(dbgi, irg, block, noreg, noreg, + new_cmp_a, new_cmp_b, nomem, pnc); + set_ia32_commutative(res); + set_ia32_am_support(res, ia32_am_Source, ia32_am_binary); } SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); + return res; } @@ -1913,10 +1907,7 @@ static ir_node *gen_CopyB(ir_node *node) { ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); int size = get_type_size_bytes(get_CopyB_type(node)); - ir_mode *dst_mode = get_irn_mode(dst); - ir_mode *src_mode = get_irn_mode(src); int rem; - ir_node *in[3]; /* If we have to copy more than 32 bytes, we use REP MOVSx and */ /* then we need the size explicitly in ECX. */ @@ -1930,21 +1921,9 @@ static ir_node *gen_CopyB(ir_node *node) { res = new_rd_ia32_CopyB(dbgi, irg, block, new_dst, new_src, res, new_mem); set_ia32_Immop_tarval(res, new_tarval_from_long(rem, mode_Is)); - - /* ok: now attach Proj's because rep movsd will destroy esi, edi and ecx */ - in[0] = new_r_Proj(irg, block, res, dst_mode, pn_ia32_CopyB_DST); - in[1] = new_r_Proj(irg, block, res, src_mode, pn_ia32_CopyB_SRC); - in[2] = new_r_Proj(irg, block, res, mode_Iu, pn_ia32_CopyB_CNT); - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 3, in); - } - else { + } else { res = new_rd_ia32_CopyB_i(dbgi, irg, block, new_dst, new_src, new_mem); set_ia32_Immop_tarval(res, new_tarval_from_long(size, mode_Is)); - - /* ok: now attach Proj's because movsd will destroy esi and edi */ - in[0] = new_r_Proj(irg, block, res, dst_mode, pn_ia32_CopyB_i_DST); - in[1] = new_r_Proj(irg, block, res, src_mode, pn_ia32_CopyB_i_SRC); - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 2, in); } SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, node)); @@ -1966,25 +1945,126 @@ ir_node *gen_be_Copy(ir_node *node) } -#if 0 -/** - * Transforms a Mux node into CMov. - * - * @return The transformed node. - */ -static ir_node *gen_Mux(ir_node *node) { - ir_node *new_op = new_rd_ia32_CMov(env.dbgi, current_ir_graph, env.block, \ - get_Mux_sel(node), get_Mux_false(node), get_Mux_true(node), env.mode); +static ir_node *create_set(long pnc, ir_node *cmp_left, ir_node *cmp_right, + dbg_info *dbgi, ir_node *block) +{ + ir_graph *irg = current_ir_graph; + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_rd_NoMem(irg); + ir_mode *mode; + ir_node *new_cmp_left; + ir_node *new_cmp_right; + ir_node *res; - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); + /* can we use a test instruction? */ + if(cmp_right == NULL || is_Const_0(cmp_right)) { + long pure_pnc = pnc & ~ia32_pn_Cmp_Unsigned; + if(is_And(cmp_left) && + (pure_pnc == pn_Cmp_Eq || pure_pnc == pn_Cmp_Lg)) { + ir_node *and_left = get_And_left(cmp_left); + ir_node *and_right = get_And_right(cmp_left); + + mode = get_irn_mode(and_left); + new_cmp_left = be_transform_node(and_left); + new_cmp_right = create_immediate_or_transform(and_right, 0); + } else { + mode = get_irn_mode(cmp_left); + new_cmp_left = be_transform_node(cmp_left); + new_cmp_right = be_transform_node(cmp_left); + } - return new_op; + assert(get_mode_size_bits(mode) <= 32); + new_cmp_left = maybe_scale_up(new_cmp_left, mode, dbgi); + new_cmp_right = maybe_scale_up(new_cmp_right, mode, dbgi); + + res = new_rd_ia32_TestSet(dbgi, current_ir_graph, block, noreg, noreg, + new_cmp_left, new_cmp_right, nomem, pnc); + set_ia32_am_support(res, ia32_am_Source, ia32_am_binary); + + res = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, block, noreg, noreg, res, + nomem, mode_Bu); + + return res; + } + + mode = get_irn_mode(cmp_left); + + new_cmp_left = be_transform_node(cmp_left); + new_cmp_right = create_immediate_or_transform(cmp_right, 0); + + assert(get_mode_size_bits(mode) <= 32); + new_cmp_left = maybe_scale_up(new_cmp_left, mode, dbgi); + new_cmp_right = maybe_scale_up(new_cmp_right, mode, dbgi); + + res = new_rd_ia32_CmpSet(dbgi, irg, block, noreg, noreg, new_cmp_left, + new_cmp_right, nomem, pnc); + res = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, block, noreg, noreg, res, nomem, + mode_Bu); + + return res; } + +static ir_node *create_cmov(long pnc, ir_node *cmp_left, ir_node *cmp_right, + ir_node *val_true, ir_node *val_false, + dbg_info *dbgi, ir_node *block) +{ + ir_graph *irg = current_ir_graph; + ir_node *new_val_true = be_transform_node(val_true); + ir_node *new_val_false = be_transform_node(val_false); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_NoMem(); + ir_node *new_cmp_left; + ir_node *new_cmp_right; + ir_node *res; + + /* cmovs with unknowns are pointless... */ + if(is_Unknown(val_true)) { +#ifdef DEBUG_libfirm + ir_fprintf(stderr, "Optimisation warning: psi with unknown operand\n"); #endif + return new_val_false; + } + if(is_Unknown(val_false)) { +#ifdef DEBUG_libfirm + ir_fprintf(stderr, "Optimisation warning: psi with unknown operand\n"); +#endif + return new_val_true; + } + + /* can we use a test instruction? */ + if(is_Const_0(cmp_right)) { + long pure_pnc = pnc & ~ia32_pn_Cmp_Unsigned; + if(is_And(cmp_left) && + (pure_pnc == pn_Cmp_Eq || pure_pnc == pn_Cmp_Lg)) { + ir_node *and_left = get_And_left(cmp_left); + ir_node *and_right = get_And_right(cmp_left); + + new_cmp_left = be_transform_node(and_left); + new_cmp_right = create_immediate_or_transform(and_right, 0); + } else { + new_cmp_left = be_transform_node(cmp_left); + new_cmp_right = be_transform_node(cmp_left); + } + + res = new_rd_ia32_TestCMov(dbgi, current_ir_graph, block, noreg, noreg, + new_cmp_left, new_cmp_right, nomem, + new_val_true, new_val_false, pnc); + set_ia32_am_support(res, ia32_am_Source, ia32_am_binary); + + return res; + } + + new_cmp_left = be_transform_node(cmp_left); + new_cmp_right = create_immediate_or_transform(cmp_right, 0); + + res = new_rd_ia32_CmpCMov(dbgi, irg, block, noreg, noreg, new_cmp_left, + new_cmp_right, nomem, new_val_true, new_val_false, + pnc); + set_ia32_am_support(res, ia32_am_Source, ia32_am_binary); + + return res; +} -typedef ir_node *cmov_func_t(dbg_info *db, ir_graph *irg, ir_node *block, - ir_node *cmp_a, ir_node *cmp_b, ir_node *psi_true, - ir_node *psi_default); /** * Transforms a Psi node into CMov. @@ -1992,56 +2072,37 @@ typedef ir_node *cmov_func_t(dbg_info *db, ir_graph *irg, ir_node *block, * @return The transformed node. */ static ir_node *gen_Psi(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *psi_true = get_Psi_val(node, 0); - ir_node *psi_default = get_Psi_default(node); - ia32_code_gen_t *cg = env_cg; - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *cond = get_Psi_cond(node, 0); - ir_node *noreg = ia32_new_NoReg_gp(env_cg); - ir_node *nomem = new_NoMem(); + ir_node *psi_true = get_Psi_val(node, 0); + ir_node *psi_default = get_Psi_default(node); + ia32_code_gen_t *cg = env_cg; + ir_node *cond = get_Psi_cond(node, 0); + ir_node *block = be_transform_node(get_nodes_block(node)); + dbg_info *dbgi = get_irn_dbg_info(node); ir_node *new_op; - ir_node *cmp, *cmp_a, *cmp_b; - ir_node *new_cmp_a, *new_cmp_b; + ir_node *cmp_left; + ir_node *cmp_right; ir_mode *cmp_mode; - int pnc; + long pnc; assert(get_Psi_n_conds(node) == 1); assert(get_irn_mode(cond) == mode_b); + assert(mode_needs_gp_reg(get_irn_mode(node))); - if(is_And(cond) || is_Or(cond)) { - ir_node *new_cond = be_transform_node(cond); - ir_node *zero = new_rd_ia32_Immediate(NULL, irg, block, NULL, 0, 0); - arch_set_irn_register(env_cg->arch_env, zero, - &ia32_gp_regs[REG_GP_NOREG]); - - /* we have to compare the result against zero */ - new_cmp_a = new_cond; - new_cmp_b = zero; - cmp_mode = mode_Iu; + if(!is_Proj(cond) || !is_Cmp(get_Proj_pred(cond))) { + /* a mode_b value, we have to compare it against 0 */ + cmp_left = cond; + cmp_right = new_Const_long(mode_Iu, 0); pnc = pn_Cmp_Lg; + cmp_mode = mode_Iu; } else { - cmp = get_Proj_pred(cond); - cmp_a = get_Cmp_left(cmp); - cmp_b = get_Cmp_right(cmp); - cmp_mode = get_irn_mode(cmp_a); + ir_node *cmp = get_Proj_pred(cond); + + cmp_left = get_Cmp_left(cmp); + cmp_right = get_Cmp_right(cmp); + cmp_mode = get_irn_mode(cmp_left); pnc = get_Proj_proj(cond); - new_cmp_b = try_create_Immediate(cmp_b, 0); - if(new_cmp_b == NULL) { - new_cmp_b = try_create_Immediate(cmp_a, 0); - if(new_cmp_b != NULL) { - pnc = get_inversed_pnc(pnc); - new_cmp_a = be_transform_node(cmp_b); - } - } else { - new_cmp_a = be_transform_node(cmp_a); - } - if(new_cmp_b == NULL) { - new_cmp_a = be_transform_node(cmp_a); - new_cmp_b = be_transform_node(cmp_b); - } + assert(!mode_is_float(cmp_mode)); if (!mode_is_signed(cmp_mode)) { pnc |= ia32_pn_Cmp_Unsigned; @@ -2049,17 +2110,13 @@ static ir_node *gen_Psi(ir_node *node) { } if(is_Const_1(psi_true) && is_Const_0(psi_default)) { - new_op = new_rd_ia32_CmpSet(dbgi, irg, block, noreg, noreg, - new_cmp_a, new_cmp_b, nomem, pnc); + new_op = create_set(pnc, cmp_left, cmp_right, dbgi, block); } else if(is_Const_0(psi_true) && is_Const_1(psi_default)) { pnc = get_negated_pnc(pnc, cmp_mode); - new_op = new_rd_ia32_CmpSet(dbgi, irg, block, noreg, noreg, - new_cmp_a, new_cmp_b, nomem, pnc); + new_op = create_set(pnc, cmp_left, cmp_right, dbgi, block); } else { - ir_node *new_psi_true = be_transform_node(psi_true); - ir_node *new_psi_default = be_transform_node(psi_default); - new_op = new_rd_ia32_CmpCMov(dbgi, irg, block, new_cmp_a, new_cmp_b, - new_psi_true, new_psi_default, pnc); + new_op = create_cmov(pnc, cmp_left, cmp_right, psi_true, psi_default, + dbgi, block); } SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(cg, node)); return new_op; @@ -2106,6 +2163,7 @@ static ir_node *gen_x87_fp_to_gp(ir_node *node) { dbg_info *dbgi = get_irn_dbg_info(node); ir_node *noreg = ia32_new_NoReg_gp(cg); ir_node *trunc_mode = ia32_new_Fpu_truncate(cg); + ir_mode *mode = get_irn_mode(node); ir_node *fist, *load; /* do a fist */ @@ -2116,7 +2174,15 @@ static ir_node *gen_x87_fp_to_gp(ir_node *node) { set_ia32_use_frame(fist); set_ia32_op_type(fist, ia32_AddrModeD); set_ia32_am_flavour(fist, ia32_am_B); - set_ia32_ls_mode(fist, mode_Iu); + + assert(get_mode_size_bits(mode) <= 32); + /* exception we can only store signed 32 bit integers, so for unsigned + we store a 64bit (signed) integer and load the lower bits */ + if(get_mode_size_bits(mode) == 32 && !mode_is_signed(mode)) { + set_ia32_ls_mode(fist, mode_Ls); + } else { + set_ia32_ls_mode(fist, mode_Is); + } SET_IA32_ORIG_NODE(fist, ia32_get_old_node_name(cg, node)); /* do a Load */ @@ -2126,12 +2192,48 @@ static ir_node *gen_x87_fp_to_gp(ir_node *node) { set_ia32_use_frame(load); set_ia32_op_type(load, ia32_AddrModeS); set_ia32_am_flavour(load, ia32_am_B); - set_ia32_ls_mode(load, mode_Iu); + set_ia32_ls_mode(load, mode_Is); + if(get_ia32_ls_mode(fist) == mode_Ls) { + ia32_attr_t *attr = get_ia32_attr(load); + attr->data.need_64bit_stackent = 1; + } else { + ia32_attr_t *attr = get_ia32_attr(load); + attr->data.need_32bit_stackent = 1; + } SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(cg, node)); return new_r_Proj(irg, block, load, mode_Iu, pn_ia32_Load_res); } +static ir_node *create_strict_conv(ir_mode *tgt_mode, ir_node *node) +{ + ir_node *block = get_nodes_block(node); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_NoMem(); + ir_node *frame = get_irg_frame(irg); + ir_node *store, *load; + ir_node *res; + + store = new_rd_ia32_vfst(dbgi, irg, block, frame, noreg, node, nomem, + tgt_mode); + set_ia32_use_frame(store); + set_ia32_op_type(store, ia32_AddrModeD); + set_ia32_am_flavour(store, ia32_am_OB); + SET_IA32_ORIG_NODE(store, ia32_get_old_node_name(env_cg, node)); + + load = new_rd_ia32_vfld(dbgi, irg, block, frame, noreg, store, + tgt_mode); + set_ia32_use_frame(load); + set_ia32_op_type(load, ia32_AddrModeS); + set_ia32_am_flavour(load, ia32_am_OB); + SET_IA32_ORIG_NODE(load, ia32_get_old_node_name(env_cg, node)); + + res = new_r_Proj(irg, block, load, mode_E, pn_ia32_vfld_res); + return res; +} + /** * Create a conversion from general purpose to x87 register */ @@ -2143,23 +2245,29 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) { dbg_info *dbgi = get_irn_dbg_info(node); ir_node *noreg = ia32_new_NoReg_gp(env_cg); ir_node *nomem = new_NoMem(); + ir_mode *mode = get_irn_mode(op); + ir_mode *store_mode; ir_node *fild, *store; - int src_bits; + ir_node *res; + int src_bits; - /* first convert to 32 bit if necessary */ + /* first convert to 32 bit signed if necessary */ src_bits = get_mode_size_bits(src_mode); if (src_bits == 8) { - new_op = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, block, noreg, noreg, new_op, nomem); + new_op = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, block, noreg, noreg, new_op, nomem, + src_mode); set_ia32_am_support(new_op, ia32_am_Source, ia32_am_unary); - set_ia32_ls_mode(new_op, src_mode); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); + mode = mode_Is; } else if (src_bits < 32) { - new_op = new_rd_ia32_Conv_I2I(dbgi, irg, block, noreg, noreg, new_op, nomem); + new_op = new_rd_ia32_Conv_I2I(dbgi, irg, block, noreg, noreg, new_op, nomem, src_mode); set_ia32_am_support(new_op, ia32_am_Source, ia32_am_unary); - set_ia32_ls_mode(new_op, src_mode); SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); + mode = mode_Is; } + assert(get_mode_size_bits(mode) == 32); + /* do a store */ store = new_rd_ia32_Store(dbgi, irg, block, get_irg_frame(irg), noreg, new_op, nomem); @@ -2168,50 +2276,76 @@ static ir_node *gen_x87_gp_to_fp(ir_node *node, ir_mode *src_mode) { set_ia32_am_flavour(store, ia32_am_OB); set_ia32_ls_mode(store, mode_Iu); + /* exception for 32bit unsigned, do a 64bit spill+load */ + if(!mode_is_signed(mode)) { + ir_node *in[2]; + /* store a zero */ + ir_node *zero_const = create_Immediate_from_int(0); + + ir_node *zero_store = new_rd_ia32_Store(dbgi, irg, block, get_irg_frame(irg), noreg, + zero_const, nomem); + + set_ia32_use_frame(zero_store); + set_ia32_op_type(zero_store, ia32_AddrModeD); + add_ia32_am_offs_int(zero_store, 4); + set_ia32_ls_mode(zero_store, mode_Iu); + + in[0] = zero_store; + in[1] = store; + + store = new_rd_Sync(dbgi, irg, block, 2, in); + store_mode = mode_Ls; + } else { + store_mode = mode_Is; + } + /* do a fild */ fild = new_rd_ia32_vfild(dbgi, irg, block, get_irg_frame(irg), noreg, store); set_ia32_use_frame(fild); set_ia32_op_type(fild, ia32_AddrModeS); set_ia32_am_flavour(fild, ia32_am_OB); - set_ia32_ls_mode(fild, mode_Iu); + set_ia32_ls_mode(fild, store_mode); + + res = new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res); - return new_r_Proj(irg, block, fild, mode_vfp, pn_ia32_vfild_res); + return res; } -static ir_node *create_Strict_conv(ir_mode *src_mode, ir_mode *tgt_mode, - ir_node *node) +/** + * Crete a conversion from one integer mode into another one + */ +static ir_node *create_I2I_Conv(ir_mode *src_mode, ir_mode *tgt_mode, + dbg_info *dbgi, ir_node *new_block, + ir_node *new_op) { - ir_node *block = get_nodes_block(node); ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *noreg = ia32_new_NoReg_gp(env_cg); - ir_node *nomem = new_NoMem(); int src_bits = get_mode_size_bits(src_mode); int tgt_bits = get_mode_size_bits(tgt_mode); - ir_node *frame = get_irg_frame(irg); - ir_mode *smaller_mode; - ir_node *store, *load; + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_rd_NoMem(irg); ir_node *res; + ir_mode *smaller_mode; + int smaller_bits; - if(src_bits <= tgt_bits) + if (src_bits < tgt_bits) { smaller_mode = src_mode; - else + smaller_bits = src_bits; + } else { smaller_mode = tgt_mode; + smaller_bits = tgt_bits; + } - store = new_rd_ia32_vfst(dbgi, irg, block, frame, noreg, node, nomem, - smaller_mode); - set_ia32_use_frame(store); - set_ia32_op_type(store, ia32_AddrModeD); - set_ia32_am_flavour(store, ia32_am_OB); - - load = new_rd_ia32_vfld(dbgi, irg, block, frame, noreg, store, - smaller_mode); - set_ia32_use_frame(load); - set_ia32_op_type(load, ia32_AddrModeS); - set_ia32_am_flavour(load, ia32_am_OB); + DB((dbg, LEVEL_1, "create Conv(int, int) ...", src_mode, tgt_mode)); + if (smaller_bits == 8) { + res = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, new_block, noreg, noreg, + new_op, nomem, smaller_mode); + } else { + res = new_rd_ia32_Conv_I2I(dbgi, irg, new_block, noreg, noreg, new_op, + nomem, smaller_mode); + } + set_ia32_am_support(res, ia32_am_Source, ia32_am_unary); - res = new_r_Proj(irg, block, load, mode_E, pn_ia32_vfld_res); return res; } @@ -2228,12 +2362,18 @@ static ir_node *gen_Conv(ir_node *node) { dbg_info *dbgi = get_irn_dbg_info(node); ir_mode *src_mode = get_irn_mode(op); ir_mode *tgt_mode = get_irn_mode(node); - int src_bits = get_mode_size_bits(src_mode); - int tgt_bits = get_mode_size_bits(tgt_mode); + int src_bits = get_mode_size_bits(src_mode); + int tgt_bits = get_mode_size_bits(tgt_mode); ir_node *noreg = ia32_new_NoReg_gp(env_cg); ir_node *nomem = new_rd_NoMem(irg); ir_node *res; + if (src_mode == mode_b) { + assert(mode_is_int(tgt_mode)); + /* nothing to do, we already model bools as 0/1 ints */ + return new_op; + } + if (src_mode == tgt_mode) { if (get_Conv_strict(node)) { if (USE_SSE2(env_cg)) { @@ -2263,9 +2403,8 @@ static ir_node *gen_Conv(ir_node *node) { res = new_rd_ia32_Conv_FP2FP(dbgi, irg, block, noreg, noreg, new_op, nomem); set_ia32_ls_mode(res, tgt_mode); } else { - // Matze: TODO what about strict convs? if(get_Conv_strict(node)) { - res = create_Strict_conv(src_mode, tgt_mode, new_op); + res = create_strict_conv(tgt_mode, new_op); SET_IA32_ORIG_NODE(get_Proj_pred(res), ia32_get_old_node_name(env_cg, node)); return res; } @@ -2285,7 +2424,6 @@ static ir_node *gen_Conv(ir_node *node) { } else { /* we convert from int ... */ if (mode_is_float(tgt_mode)) { - FP_USED(env_cg); /* ... to float */ DB((dbg, LEVEL_1, "create Conv(int, float) ...")); if (USE_SSE2(env_cg)) { @@ -2295,35 +2433,28 @@ static ir_node *gen_Conv(ir_node *node) { set_ia32_am_support(res, ia32_am_Source, ia32_am_unary); } } else { - return gen_x87_gp_to_fp(node, src_mode); + res = gen_x87_gp_to_fp(node, src_mode); + if(get_Conv_strict(node)) { + res = create_strict_conv(tgt_mode, res); + SET_IA32_ORIG_NODE(get_Proj_pred(res), + ia32_get_old_node_name(env_cg, node)); + } + return res; } + } else if(tgt_mode == mode_b) { + /* mode_b lowering already took care that we only have 0/1 values */ + DB((dbg, LEVEL_1, "omitting unnecessary Conv(%+F, %+F) ...", + src_mode, tgt_mode)); + return new_op; } else { /* to int */ - ir_mode *smaller_mode; - int smaller_bits; - if (src_bits == tgt_bits) { - DB((dbg, LEVEL_1, "omitting unnecessary Conv(%+F, %+F) ...", src_mode, tgt_mode)); + DB((dbg, LEVEL_1, "omitting unnecessary Conv(%+F, %+F) ...", + src_mode, tgt_mode)); return new_op; } - if (src_bits < tgt_bits) { - smaller_mode = src_mode; - smaller_bits = src_bits; - } else { - smaller_mode = tgt_mode; - smaller_bits = tgt_bits; - } - - DB((dbg, LEVEL_1, "create Conv(int, int) ...", src_mode, tgt_mode)); - if (smaller_bits == 8) { - res = new_rd_ia32_Conv_I2I8Bit(dbgi, irg, block, noreg, noreg, new_op, nomem); - set_ia32_ls_mode(res, smaller_mode); - } else { - res = new_rd_ia32_Conv_I2I(dbgi, irg, block, noreg, noreg, new_op, nomem); - set_ia32_ls_mode(res, smaller_mode); - } - set_ia32_am_support(res, ia32_am_Source, ia32_am_unary); + res = create_I2I_Conv(src_mode, tgt_mode, dbgi, block, new_op); } } @@ -2377,8 +2508,7 @@ ir_node *try_create_Immediate(ir_node *node, char immediate_constraint_type) ir_node *block; mode = get_irn_mode(node); - if(!mode_is_int(mode) && !mode_is_character(mode) && - !mode_is_reference(mode)) { + if(!mode_is_int(mode) && !mode_is_reference(mode)) { return NULL; } @@ -2462,16 +2592,23 @@ ir_node *try_create_Immediate(ir_node *node, char immediate_constraint_type) irg = current_ir_graph; dbgi = get_irn_dbg_info(node); block = get_irg_start_block(irg); - res = new_rd_ia32_Immediate(dbgi, irg, block, symconst_ent, symconst_sign, - val); + res = new_rd_ia32_Immediate(dbgi, irg, block, symconst_ent, + symconst_sign, val); arch_set_irn_register(env_cg->arch_env, res, &ia32_gp_regs[REG_GP_NOREG]); - /* make sure we don't schedule stuff before the barrier */ - add_irn_dep(res, get_irg_frame(irg)); - return res; } +static +ir_node *create_immediate_or_transform(ir_node *node, char immediate_constraint_type) +{ + ir_node *new_node = try_create_Immediate(node, immediate_constraint_type); + if (new_node == NULL) { + new_node = be_transform_node(node); + } + return new_node; +} + typedef struct constraint_t constraint_t; struct constraint_t { int is_in; @@ -2748,9 +2885,6 @@ ir_node *gen_ASM(ir_node *node) struct obstack *obst; constraint_t parsed_constraint; - /* assembler could contain float statements */ - FP_USED(env_cg); - /* transform inputs */ arity = get_irn_arity(node); in = alloca(arity * sizeof(in[0])); @@ -2842,51 +2976,6 @@ ir_node *gen_ASM(ir_node *node) * ********************************************/ -static ir_node *gen_be_StackParam(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *ptr = get_irn_n(node, be_pos_StackParam_ptr); - ir_node *new_ptr = be_transform_node(ptr); - ir_node *new_op = NULL; - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *nomem = new_rd_NoMem(current_ir_graph); - ir_entity *ent = arch_get_frame_entity(env_cg->arch_env, node); - ir_mode *load_mode = get_irn_mode(node); - ir_node *noreg = ia32_new_NoReg_gp(env_cg); - ir_mode *proj_mode; - long pn_res; - - if (mode_is_float(load_mode)) { - FP_USED(env_cg); - if (USE_SSE2(env_cg)) { - new_op = new_rd_ia32_xLoad(dbgi, irg, block, new_ptr, noreg, nomem); - pn_res = pn_ia32_xLoad_res; - proj_mode = mode_xmm; - } else { - new_op = new_rd_ia32_vfld(dbgi, irg, block, new_ptr, noreg, nomem, load_mode); - pn_res = pn_ia32_vfld_res; - proj_mode = mode_vfp; - } - } else { - new_op = new_rd_ia32_Load(dbgi, irg, block, new_ptr, noreg, nomem); - proj_mode = mode_Iu; - pn_res = pn_ia32_Load_res; - } - - set_irn_pinned(new_op, op_pin_state_floats); - set_ia32_frame_ent(new_op, ent); - set_ia32_use_frame(new_op); - - set_ia32_op_type(new_op, ia32_AddrModeS); - set_ia32_am_flavour(new_op, ia32_am_B); - set_ia32_ls_mode(new_op, load_mode); - set_ia32_flags(new_op, get_ia32_flags(new_op) | arch_irn_flags_rematerializable); - - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); - - return new_rd_Proj(dbgi, irg, block, new_op, proj_mode, pn_res); -} - /** * Transforms a FrameAddr into an ia32 Add. */ @@ -2909,96 +2998,6 @@ static ir_node *gen_be_FrameAddr(ir_node *node) { return res; } -/** - * Transforms a FrameLoad into an ia32 Load. - */ -static ir_node *gen_be_FrameLoad(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *mem = get_irn_n(node, be_pos_FrameLoad_mem); - ir_node *new_mem = be_transform_node(mem); - ir_node *ptr = get_irn_n(node, be_pos_FrameLoad_ptr); - ir_node *new_ptr = be_transform_node(ptr); - ir_node *new_op = NULL; - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *noreg = ia32_new_NoReg_gp(env_cg); - ir_entity *ent = arch_get_frame_entity(env_cg->arch_env, node); - ir_mode *mode = get_type_mode(get_entity_type(ent)); - ir_node *projs[pn_Load_max]; - - ia32_collect_Projs(node, projs, pn_Load_max); - - if (mode_is_float(mode)) { - FP_USED(env_cg); - if (USE_SSE2(env_cg)) { - new_op = new_rd_ia32_xLoad(dbgi, irg, block, new_ptr, noreg, new_mem); - } - else { - new_op = new_rd_ia32_vfld(dbgi, irg, block, new_ptr, noreg, new_mem, mode); - } - } - else { - new_op = new_rd_ia32_Load(dbgi, irg, block, new_ptr, noreg, new_mem); - } - - set_irn_pinned(new_op, op_pin_state_floats); - set_ia32_frame_ent(new_op, ent); - set_ia32_use_frame(new_op); - - set_ia32_op_type(new_op, ia32_AddrModeS); - set_ia32_am_flavour(new_op, ia32_am_B); - set_ia32_ls_mode(new_op, mode); - set_ia32_flags(new_op, get_ia32_flags(new_op) | arch_irn_flags_rematerializable); - - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); - - return new_op; -} - - -/** - * Transforms a FrameStore into an ia32 Store. - */ -static ir_node *gen_be_FrameStore(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *mem = get_irn_n(node, be_pos_FrameStore_mem); - ir_node *new_mem = be_transform_node(mem); - ir_node *ptr = get_irn_n(node, be_pos_FrameStore_ptr); - ir_node *new_ptr = be_transform_node(ptr); - ir_node *val = get_irn_n(node, be_pos_FrameStore_val); - ir_node *new_val = be_transform_node(val); - ir_node *new_op = NULL; - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *noreg = ia32_new_NoReg_gp(env_cg); - ir_entity *ent = arch_get_frame_entity(env_cg->arch_env, node); - ir_mode *mode = get_irn_mode(val); - - if (mode_is_float(mode)) { - FP_USED(env_cg); - if (USE_SSE2(env_cg)) { - new_op = new_rd_ia32_xStore(dbgi, irg, block, new_ptr, noreg, new_val, new_mem); - } else { - new_op = new_rd_ia32_vfst(dbgi, irg, block, new_ptr, noreg, new_val, new_mem, mode); - } - } else if (get_mode_size_bits(mode) == 8) { - new_op = new_rd_ia32_Store8Bit(dbgi, irg, block, new_ptr, noreg, new_val, new_mem); - } else { - new_op = new_rd_ia32_Store(dbgi, irg, block, new_ptr, noreg, new_val, new_mem); - } - - set_ia32_frame_ent(new_op, ent); - set_ia32_use_frame(new_op); - - set_ia32_op_type(new_op, ia32_AddrModeD); - set_ia32_am_flavour(new_op, ia32_am_B); - set_ia32_ls_mode(new_op, mode); - - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); - - return new_op; -} - /** * In case SSE is used we need to copy the result from XMM0 to FPU TOS before return. */ @@ -3058,22 +3057,21 @@ static ir_node *gen_be_Return(ir_node *node) { noreg = ia32_new_NoReg_gp(env_cg); /* store xmm0 onto stack */ - sse_store = new_rd_ia32_xStoreSimple(dbgi, irg, block, frame, noreg, new_ret_val, new_ret_mem); + sse_store = new_rd_ia32_xStoreSimple(dbgi, irg, block, frame, noreg, + new_ret_val, new_ret_mem); set_ia32_ls_mode(sse_store, mode); set_ia32_op_type(sse_store, ia32_AddrModeD); set_ia32_use_frame(sse_store); set_ia32_am_flavour(sse_store, ia32_am_B); - /* load into st0 */ - fld = new_rd_ia32_SetST0(dbgi, irg, block, frame, noreg, sse_store); - set_ia32_ls_mode(fld, mode); + /* load into x87 register */ + fld = new_rd_ia32_vfld(dbgi, irg, block, frame, noreg, sse_store, mode); set_ia32_op_type(fld, ia32_AddrModeS); set_ia32_use_frame(fld); set_ia32_am_flavour(fld, ia32_am_B); - mproj = new_r_Proj(irg, block, fld, mode_M, pn_ia32_SetST0_M); - fld = new_r_Proj(irg, block, fld, mode_vfp, pn_ia32_SetST0_res); - arch_set_irn_register(env_cg->arch_env, fld, &ia32_vfp_regs[REG_VF0]); + mproj = new_r_Proj(irg, block, fld, mode_M, pn_ia32_vfld_M); + fld = new_r_Proj(irg, block, fld, mode_vfp, pn_ia32_vfld_res); /* create a new barrier */ arity = get_irn_arity(barrier); @@ -3119,10 +3117,7 @@ static ir_node *gen_be_AddSP(ir_node *node) { ir_node *nomem = new_NoMem(); ir_node *new_op; - new_sz = try_create_Immediate(sz, 0); - if(new_sz == NULL) { - new_sz = be_transform_node(sz); - } + new_sz = create_immediate_or_transform(sz, 0); /* ia32 stack grows in reverse direction, make a SubSP */ new_op = new_rd_ia32_SubSP(dbgi, irg, block, noreg, noreg, new_sp, new_sz, @@ -3148,10 +3143,7 @@ static ir_node *gen_be_SubSP(ir_node *node) { ir_node *nomem = new_NoMem(); ir_node *new_op; - new_sz = try_create_Immediate(sz, 0); - if(new_sz == NULL) { - new_sz = be_transform_node(sz); - } + new_sz = create_immediate_or_transform(sz, 0); /* ia32 stack grows in reverse direction, make an AddSP */ new_op = new_rd_ia32_AddSP(dbgi, irg, block, noreg, noreg, new_sp, new_sz, nomem); @@ -3170,10 +3162,21 @@ static ir_node *gen_Unknown(ir_node *node) { ir_mode *mode = get_irn_mode(node); if (mode_is_float(mode)) { +#if 0 + /* Unknown nodes are buggy in x87 sim, use zero for now... */ if (USE_SSE2(env_cg)) - return ia32_new_Unknown_xmm(env_cg); else return ia32_new_Unknown_vfp(env_cg); +#else + if (!USE_SSE2(env_cg)) { + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *block = get_irg_start_block(irg); + return new_rd_ia32_vfldz(dbgi, irg, block); + } else { + return ia32_new_Unknown_xmm(env_cg); + } +#endif } else if (mode_needs_gp_reg(mode)) { return ia32_new_Unknown_gp(env_cg); } else { @@ -3218,6 +3221,27 @@ static ir_node *gen_Phi(ir_node *node) { return phi; } +/** + * Transform IJmp + */ +static ir_node *gen_IJmp(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *new_op = be_transform_node(get_IJmp_target(node)); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_node *nomem = new_NoMem(); + ir_node *new_node; + + new_node = new_rd_ia32_IJmp(dbgi, irg, block, noreg, noreg, new_op, nomem); + set_ia32_am_support(new_node, ia32_am_Source, ia32_am_unary); + + SET_IA32_ORIG_NODE(new_node, ia32_get_old_node_name(env_cg, node)); + + return new_node; +} + + /********************************************************************** * _ _ _ * | | | | | | @@ -3239,7 +3263,8 @@ typedef ir_node *construct_store_func(dbg_info *db, ir_graph *irg, ir_node *bloc /** * Transforms a lowered Load into a "real" one. */ -static ir_node *gen_lowered_Load(ir_node *node, construct_load_func func, char fp_unit) { +static ir_node *gen_lowered_Load(ir_node *node, construct_load_func func) +{ ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *ptr = get_irn_n(node, 0); ir_node *new_ptr = be_transform_node(ptr); @@ -3251,16 +3276,6 @@ static ir_node *gen_lowered_Load(ir_node *node, construct_load_func func, char f ir_node *noreg = ia32_new_NoReg_gp(env_cg); ir_node *new_op; - /* - Could be that we have SSE2 unit, but due to 64Bit Div/Conv - lowering we have x87 nodes, so we need to enforce simulation. - */ - if (mode_is_float(mode)) { - FP_USED(env_cg); - if (fp_unit == fp_x87) - FORCE_x87(env_cg); - } - new_op = func(dbgi, irg, block, new_ptr, noreg, new_mem); set_ia32_op_type(new_op, ia32_AddrModeS); @@ -3270,7 +3285,7 @@ static ir_node *gen_lowered_Load(ir_node *node, construct_load_func func, char f set_ia32_am_sc(new_op, get_ia32_am_sc(node)); if (is_ia32_am_sc_sign(node)) set_ia32_am_sc_sign(new_op); - set_ia32_ls_mode(new_op, get_ia32_ls_mode(node)); + set_ia32_ls_mode(new_op, mode); if (is_ia32_use_frame(node)) { set_ia32_frame_ent(new_op, get_ia32_frame_ent(node)); set_ia32_use_frame(new_op); @@ -3282,9 +3297,10 @@ static ir_node *gen_lowered_Load(ir_node *node, construct_load_func func, char f } /** -* Transforms a lowered Store into a "real" one. -*/ -static ir_node *gen_lowered_Store(ir_node *node, construct_store_func func, char fp_unit) { + * Transforms a lowered Store into a "real" one. + */ +static ir_node *gen_lowered_Store(ir_node *node, construct_store_func func) +{ ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *ptr = get_irn_n(node, 0); ir_node *new_ptr = be_transform_node(ptr); @@ -3300,16 +3316,6 @@ static ir_node *gen_lowered_Store(ir_node *node, construct_store_func func, char long am_offs; ia32_am_flavour_t am_flav = ia32_B; - /* - Could be that we have SSE2 unit, but due to 64Bit Div/Conv - lowering we have x87 nodes, so we need to enforce simulation. - */ - if (mode_is_float(mode)) { - FP_USED(env_cg); - if (fp_unit == fp_x87) - FORCE_x87(env_cg); - } - new_op = func(dbgi, irg, block, new_ptr, noreg, new_val, new_mem); if ((am_offs = get_ia32_am_offs_int(node)) != 0) { @@ -3337,9 +3343,6 @@ static ir_node *gen_lowered_Store(ir_node *node, construct_store_func func, char */ #define GEN_LOWERED_OP(op) \ static ir_node *gen_ia32_l_##op(ir_node *node) { \ - ir_mode *mode = get_irn_mode(node); \ - if (mode_is_float(mode)) \ - FP_USED(env_cg); \ return gen_binop(node, get_binop_left(node), \ get_binop_right(node), new_rd_ia32_##op,0); \ } @@ -3347,7 +3350,6 @@ static ir_node *gen_lowered_Store(ir_node *node, construct_store_func func, char #define GEN_LOWERED_x87_OP(op) \ static ir_node *gen_ia32_l_##op(ir_node *node) { \ ir_node *new_op; \ - FORCE_x87(env_cg); \ new_op = gen_binop_x87_float(node, get_binop_left(node), \ get_binop_right(node), new_rd_ia32_##op); \ return new_op; \ @@ -3358,20 +3360,20 @@ static ir_node *gen_lowered_Store(ir_node *node, construct_store_func func, char return gen_unop(node, get_unop_op(node), new_rd_ia32_##op); \ } -#define GEN_LOWERED_SHIFT_OP(op) \ - static ir_node *gen_ia32_l_##op(ir_node *node) {\ - return gen_shift_binop(node, get_binop_left(node), \ - get_binop_right(node), new_rd_ia32_##op); \ +#define GEN_LOWERED_SHIFT_OP(l_op, op) \ + static ir_node *gen_ia32_##l_op(ir_node *node) { \ + return gen_shift_binop(node, get_irn_n(node, 0), \ + get_irn_n(node, 1), new_rd_ia32_##op); \ } -#define GEN_LOWERED_LOAD(op, fp_unit) \ - static ir_node *gen_ia32_l_##op(ir_node *node) {\ - return gen_lowered_Load(node, new_rd_ia32_##op, fp_unit); \ +#define GEN_LOWERED_LOAD(op) \ + static ir_node *gen_ia32_l_##op(ir_node *node) { \ + return gen_lowered_Load(node, new_rd_ia32_##op); \ } -#define GEN_LOWERED_STORE(op, fp_unit) \ - static ir_node *gen_ia32_l_##op(ir_node *node) {\ - return gen_lowered_Store(node, new_rd_ia32_##op, fp_unit); \ +#define GEN_LOWERED_STORE(op) \ + static ir_node *gen_ia32_l_##op(ir_node *node) { \ + return gen_lowered_Store(node, new_rd_ia32_##op); \ } GEN_LOWERED_OP(Adc) @@ -3386,13 +3388,58 @@ GEN_LOWERED_x87_OP(vfsub) GEN_LOWERED_UNOP(Neg) -GEN_LOWERED_LOAD(vfild, fp_x87) -GEN_LOWERED_LOAD(Load, fp_none) -/*GEN_LOWERED_STORE(vfist, fp_x87) - *TODO +GEN_LOWERED_LOAD(vfild) +GEN_LOWERED_LOAD(Load) +GEN_LOWERED_STORE(Store) + +/** + * Transforms a l_vfist into a "real" vfist node. + * + * @param env The transformation environment + * @return the created ia32 vfist node */ -GEN_LOWERED_STORE(Store, fp_none) +static ir_node *gen_ia32_l_vfist(ir_node *node) { + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *ptr = get_irn_n(node, 0); + ir_node *new_ptr = be_transform_node(ptr); + ir_node *val = get_irn_n(node, 1); + ir_node *new_val = be_transform_node(val); + ir_node *mem = get_irn_n(node, 2); + ir_node *new_mem = be_transform_node(mem); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_node *noreg = ia32_new_NoReg_gp(env_cg); + ir_mode *mode = get_ia32_ls_mode(node); + ir_node *trunc_mode = ia32_new_Fpu_truncate(env_cg); + ir_node *new_op; + long am_offs; + ia32_am_flavour_t am_flav = ia32_B; + + new_op = new_rd_ia32_vfist(dbgi, irg, block, new_ptr, noreg, new_val, + trunc_mode, new_mem); + + if ((am_offs = get_ia32_am_offs_int(node)) != 0) { + am_flav |= ia32_O; + add_ia32_am_offs_int(new_op, am_offs); + } + + set_ia32_op_type(new_op, ia32_AddrModeD); + set_ia32_am_flavour(new_op, am_flav); + set_ia32_ls_mode(new_op, mode); + set_ia32_frame_ent(new_op, get_ia32_frame_ent(node)); + set_ia32_use_frame(new_op); + + SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, node)); + + return new_op; +} +/** + * Transforms a l_vfdiv into a "real" vfdiv node. + * + * @param env The transformation environment + * @return the created ia32 vfdiv node + */ static ir_node *gen_ia32_l_vfdiv(ir_node *node) { ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *left = get_binop_left(node); @@ -3402,8 +3449,7 @@ static ir_node *gen_ia32_l_vfdiv(ir_node *node) { ir_node *noreg = ia32_new_NoReg_gp(env_cg); ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *fpcw = be_abi_get_ignore_irn(env_cg->birg->abi, - &ia32_fp_cw_regs[REG_FPCW]); + ir_node *fpcw = get_fpcw(); ir_node *vfdiv; vfdiv = new_rd_ia32_vfdiv(dbgi, irg, block, noreg, noreg, new_left, @@ -3413,8 +3459,6 @@ static ir_node *gen_ia32_l_vfdiv(ir_node *node) { SET_IA32_ORIG_NODE(vfdiv, ia32_get_old_node_name(env_cg, node)); - FORCE_x87(env_cg); - return vfdiv; } @@ -3433,7 +3477,6 @@ static ir_node *gen_ia32_l_Mul(ir_node *node) { ir_node *noreg = ia32_new_NoReg_gp(env_cg); ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *in[2]; /* l_Mul is already a mode_T node, so we create the Mul in the normal way */ /* and then skip the result Proj, because all needed Projs are already there. */ @@ -3442,19 +3485,15 @@ static ir_node *gen_ia32_l_Mul(ir_node *node) { clear_ia32_commutative(muls); set_ia32_am_support(muls, ia32_am_Source, ia32_am_binary); - /* check if EAX and EDX proj exist, add missing one */ - in[0] = new_rd_Proj(dbgi, irg, block, muls, mode_Iu, pn_EAX); - in[1] = new_rd_Proj(dbgi, irg, block, muls, mode_Iu, pn_EDX); - be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 2, in); - SET_IA32_ORIG_NODE(muls, ia32_get_old_node_name(env_cg, node)); return muls; } -GEN_LOWERED_SHIFT_OP(Shl) -GEN_LOWERED_SHIFT_OP(Shr) -GEN_LOWERED_SHIFT_OP(Sar) +GEN_LOWERED_SHIFT_OP(l_ShlDep, Shl) +GEN_LOWERED_SHIFT_OP(l_ShrDep, Shr) +GEN_LOWERED_SHIFT_OP(l_Sar, Sar) +GEN_LOWERED_SHIFT_OP(l_SarDep, Sar) /** * Transforms a l_ShlD/l_ShrD into a ShlD/ShrD. Those nodes have 3 data inputs: @@ -3469,8 +3508,8 @@ static ir_node *gen_lowered_64bit_shifts(ir_node *node, ir_node *op1, ir_node *block = be_transform_node(get_nodes_block(node)); ir_node *new_op1 = be_transform_node(op1); ir_node *new_op2 = be_transform_node(op2); - ir_node *new_count = be_transform_node(count); ir_node *new_op = NULL; + ir_node *new_count = be_transform_node(count); ir_graph *irg = current_ir_graph; dbg_info *dbgi = get_irn_dbg_info(node); ir_node *noreg = ia32_new_NoReg_gp(env_cg); @@ -3674,12 +3713,16 @@ static ir_node *gen_Proj_be_AddSP(ir_node *node) { dbg_info *dbgi = get_irn_dbg_info(node); long proj = get_Proj_proj(node); - if (proj == pn_be_AddSP_res) { - ir_node *res = new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_AddSP_stack); + if (proj == pn_be_AddSP_sp) { + ir_node *res = new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, + pn_ia32_SubSP_stack); arch_set_irn_register(env_cg->arch_env, res, &ia32_gp_regs[REG_ESP]); return res; + } else if(proj == pn_be_AddSP_res) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, + pn_ia32_SubSP_addr); } else if (proj == pn_be_AddSP_M) { - return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_AddSP_M); + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_SubSP_M); } assert(0); @@ -3697,12 +3740,13 @@ static ir_node *gen_Proj_be_SubSP(ir_node *node) { dbg_info *dbgi = get_irn_dbg_info(node); long proj = get_Proj_proj(node); - if (proj == pn_be_SubSP_res) { - ir_node *res = new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, pn_ia32_SubSP_stack); + if (proj == pn_be_SubSP_sp) { + ir_node *res = new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, + pn_ia32_AddSP_stack); arch_set_irn_register(env_cg->arch_env, res, &ia32_gp_regs[REG_ESP]); return res; } else if (proj == pn_be_SubSP_M) { - return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_SubSP_M); + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_AddSP_M); } assert(0); @@ -3727,6 +3771,13 @@ static ir_node *gen_Proj_Load(ir_node *node) { } else if (proj == pn_Load_M) { return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, pn_ia32_Load_M); } + } else if(is_ia32_Conv_I2I(new_pred)) { + set_irn_mode(new_pred, mode_T); + if (proj == pn_Load_res) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_Iu, 0); + } else if (proj == pn_Load_M) { + return new_rd_Proj(dbgi, irg, block, new_pred, mode_M, 1); + } } else if (is_ia32_xLoad(new_pred)) { if (proj == pn_Load_res) { return new_rd_Proj(dbgi, irg, block, new_pred, mode_xmm, pn_ia32_xLoad_res); @@ -3903,13 +3954,15 @@ static ir_node *gen_Proj_tls(ir_node *node) { * Transform the Projs from a be_Call. */ static ir_node *gen_Proj_be_Call(ir_node *node) { - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *call = get_Proj_pred(node); - ir_node *new_call = be_transform_node(call); - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - long proj = get_Proj_proj(node); - ir_mode *mode = get_irn_mode(node); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *call = get_Proj_pred(node); + ir_node *new_call = be_transform_node(call); + ir_graph *irg = current_ir_graph; + dbg_info *dbgi = get_irn_dbg_info(node); + ir_type *method_type = be_Call_get_type(call); + int n_res = get_method_n_ress(method_type); + long proj = get_Proj_proj(node); + ir_mode *mode = get_irn_mode(node); ir_node *sse_load; const arch_register_class_t *cls; @@ -3931,28 +3984,34 @@ static ir_node *gen_Proj_be_Call(ir_node *node) { } if (call_res_pred == NULL || be_is_Call(call_res_pred)) { - return new_rd_Proj(dbgi, irg, block, new_call, mode_M, pn_be_Call_M_regular); + return new_rd_Proj(dbgi, irg, block, new_call, mode_M, + pn_be_Call_M_regular); } else { assert(is_ia32_xLoad(call_res_pred)); - return new_rd_Proj(dbgi, irg, block, call_res_pred, mode_M, pn_ia32_xLoad_M); + return new_rd_Proj(dbgi, irg, block, call_res_pred, mode_M, + pn_ia32_xLoad_M); } } - if (proj == pn_be_Call_first_res && mode_is_float(mode) && USE_SSE2(env_cg)) { + if (USE_SSE2(env_cg) && proj >= pn_be_Call_first_res + && proj < (pn_be_Call_first_res + n_res) && mode_is_float(mode) + && USE_SSE2(env_cg)) { ir_node *fstp; ir_node *frame = get_irg_frame(irg); ir_node *noreg = ia32_new_NoReg_gp(env_cg); - ir_node *p; + //ir_node *p; ir_node *call_mem = be_get_Proj_for_pn(call, pn_be_Call_M_regular); - ir_node *keepin[1]; - const arch_register_class_t *cls; + ir_node *call_res; - /* in case there is no memory output: create one to serialize the copy FPU -> SSE */ - call_mem = new_rd_Proj(dbgi, irg, block, new_call, mode_M, pn_be_Call_M_regular); + /* in case there is no memory output: create one to serialize the copy + FPU -> SSE */ + call_mem = new_rd_Proj(dbgi, irg, block, new_call, mode_M, + pn_be_Call_M_regular); + call_res = new_rd_Proj(dbgi, irg, block, new_call, mode, + pn_be_Call_first_res); /* store st(0) onto stack */ - fstp = new_rd_ia32_GetST0(dbgi, irg, block, frame, noreg, call_mem); - - set_ia32_ls_mode(fstp, mode); + fstp = new_rd_ia32_vfst(dbgi, irg, block, frame, noreg, call_res, + call_mem, mode); set_ia32_op_type(fstp, ia32_AddrModeD); set_ia32_use_frame(fstp); set_ia32_am_flavour(fstp, ia32_am_B); @@ -3964,8 +4023,10 @@ static ir_node *gen_Proj_be_Call(ir_node *node) { set_ia32_use_frame(sse_load); set_ia32_am_flavour(sse_load, ia32_am_B); - sse_load = new_rd_Proj(dbgi, irg, block, sse_load, mode_xmm, pn_ia32_xLoad_res); + sse_load = new_rd_Proj(dbgi, irg, block, sse_load, mode_xmm, + pn_ia32_xLoad_res); +#if 0 /* now: create new Keep whith all former ins and one additional in - the result Proj */ /* get a Proj representing a caller save register */ @@ -3975,18 +4036,14 @@ static ir_node *gen_Proj_be_Call(ir_node *node) { /* user of the the proj is the Keep */ p = get_edge_src_irn(get_irn_out_edge_first(p)); assert(be_is_Keep(p) && "Keep expected."); - - /* keep the result */ - cls = arch_get_irn_reg_class(env_cg->arch_env, sse_load, -1); - keepin[0] = sse_load; - be_new_Keep(cls, irg, block, 1, keepin); +#endif return sse_load; } /* transform call modes */ if (mode_is_data(mode)) { - cls = arch_get_irn_reg_class(env_cg->arch_env, node, -1); + cls = arch_get_irn_reg_class(env_cg->arch_env, node, -1); mode = cls->mode; } @@ -4001,64 +4058,25 @@ static ir_node *gen_Proj_Cmp(ir_node *node) /* normally Cmps are processed when looking at Cond nodes, but this case * can happen in complicated Psi conditions */ - ir_graph *irg = current_ir_graph; - dbg_info *dbgi = get_irn_dbg_info(node); - ir_node *block = be_transform_node(get_nodes_block(node)); - ir_node *cmp = get_Proj_pred(node); - long pnc = get_Proj_proj(node); - ir_node *cmp_left = get_Cmp_left(cmp); - ir_node *cmp_right = get_Cmp_right(cmp); - ir_node *new_cmp_left; - ir_node *new_cmp_right; - ir_node *noreg = ia32_new_NoReg_gp(env_cg); - ir_node *nomem = new_rd_NoMem(irg); - ir_mode *cmp_mode = get_irn_mode(cmp_left); - ir_node *new_op; + ir_node *cmp = get_Proj_pred(node); + long pnc = get_Proj_proj(node); + ir_node *cmp_left = get_Cmp_left(cmp); + ir_node *cmp_right = get_Cmp_right(cmp); + ir_mode *cmp_mode = get_irn_mode(cmp_left); + dbg_info *dbgi = get_irn_dbg_info(cmp); + ir_node *block = be_transform_node(get_nodes_block(node)); + ir_node *res; assert(!mode_is_float(cmp_mode)); - /* (a != b) -> (a ^ b) */ - if(pnc == pn_Cmp_Lg) { - if(is_Const_0(cmp_left)) { - new_op = be_transform_node(cmp_right); - } else if(is_Const_0(cmp_right)) { - new_op = be_transform_node(cmp_left); - } else { - new_op = gen_binop(cmp, cmp_left, cmp_right, new_rd_ia32_Xor, 1); - } - - return new_op; - } - /* TODO: - * (a == b) -> !(a ^ b) - * (a < 0) -> (a & 0x80000000) oder a >> 31 - * (a >= 0) -> (a >> 31) ^ 1 - */ - if(!mode_is_signed(cmp_mode)) { pnc |= ia32_pn_Cmp_Unsigned; } - new_cmp_right = try_create_Immediate(cmp_right, 0); - if(new_cmp_right == NULL) { - new_cmp_right = try_create_Immediate(cmp_left, 0); - if(new_cmp_right != NULL) { - pnc = get_inversed_pnc(pnc); - new_cmp_left = be_transform_node(cmp_right); - } - } else { - new_cmp_left = be_transform_node(cmp_left); - } - if(new_cmp_right == NULL) { - new_cmp_left = be_transform_node(cmp_left); - new_cmp_right = be_transform_node(cmp_right); - } - - new_op = new_rd_ia32_CmpSet(dbgi, irg, block, noreg, noreg, new_cmp_left, - new_cmp_right, nomem, pnc); - SET_IA32_ORIG_NODE(new_op, ia32_get_old_node_name(env_cg, cmp)); + res = create_set(pnc, cmp_left, cmp_right, dbgi, block); + SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(env_cg, cmp)); - return new_op; + return res; } /** @@ -4070,14 +4088,14 @@ static ir_node *gen_Proj(ir_node *node) { ir_node *pred = get_Proj_pred(node); long proj = get_Proj_proj(node); - if (is_Store(pred) || be_is_FrameStore(pred)) { + if (is_Store(pred)) { if (proj == pn_Store_M) { return be_transform_node(pred); } else { assert(0); return new_r_Bad(irg); } - } else if (is_Load(pred) || be_is_FrameLoad(pred)) { + } else if (is_Load(pred)) { return gen_Proj_Load(node); } else if (is_Div(pred) || is_Mod(pred) || is_DivMod(pred)) { return gen_Proj_DivMod(node); @@ -4108,7 +4126,11 @@ static ir_node *gen_Proj(ir_node *node) { if (node == be_get_old_anchor(anchor_tls)) { return gen_Proj_tls(node); } +#ifdef FIRM_EXT_GRS + } else if(!is_ia32_irn(pred)) { // Quick hack for SIMD optimization +#else } else { +#endif ir_node *new_pred = be_transform_node(pred); ir_node *block = be_transform_node(get_nodes_block(node)); ir_mode *mode = get_irn_mode(node); @@ -4128,8 +4150,9 @@ static ir_node *gen_Proj(ir_node *node) { /** * Enters all transform functions into the generic pointer */ -static void register_transformers(void) { - ir_op *op_Max, *op_Min, *op_Mulh; +static void register_transformers(void) +{ + ir_op *op_Mulh; /* first clear the generic function pointer for all ops */ clear_irp_opcodes_generic_func(); @@ -4166,11 +4189,11 @@ static void register_transformers(void) { GEN(ASM); GEN(CopyB); - //GEN(Mux); BAD(Mux); GEN(Psi); GEN(Proj); GEN(Phi); + GEN(IJmp); /* transform ops from intrinsic lowering */ GEN(ia32_l_Add); @@ -4181,9 +4204,10 @@ static void register_transformers(void) { GEN(ia32_l_Mul); GEN(ia32_l_Xor); GEN(ia32_l_IMul); - GEN(ia32_l_Shl); - GEN(ia32_l_Shr); + GEN(ia32_l_ShlDep); + GEN(ia32_l_ShrDep); GEN(ia32_l_Sar); + GEN(ia32_l_SarDep); GEN(ia32_l_ShlD); GEN(ia32_l_ShrD); GEN(ia32_l_vfdiv); @@ -4192,7 +4216,7 @@ static void register_transformers(void) { GEN(ia32_l_vfsub); GEN(ia32_l_vfild); GEN(ia32_l_Load); - /* GEN(ia32_l_vfist); TODO */ + GEN(ia32_l_vfist); GEN(ia32_l_Store); GEN(ia32_l_X87toSSE); GEN(ia32_l_SSEtoX87); @@ -4219,9 +4243,6 @@ static void register_transformers(void) { GEN(be_FrameAddr); //GEN(be_Call); GEN(be_Return); - GEN(be_FrameLoad); - GEN(be_FrameStore); - GEN(be_StackParam); GEN(be_AddSP); GEN(be_SubSP); GEN(be_Copy); @@ -4229,12 +4250,6 @@ static void register_transformers(void) { /* set the register for all Unknown nodes */ GEN(Unknown); - op_Max = get_op_Max(); - if (op_Max) - GEN(Max); - op_Min = get_op_Min(); - if (op_Min) - GEN(Min); op_Mulh = get_op_Mulh(); if (op_Mulh) GEN(Mulh); @@ -4255,13 +4270,92 @@ static void ia32_pretransform_node(void *arch_cg) { cg->noreg_gp = be_pre_transform_node(cg->noreg_gp); cg->noreg_vfp = be_pre_transform_node(cg->noreg_vfp); cg->noreg_xmm = be_pre_transform_node(cg->noreg_xmm); + get_fpcw(); +} + +/** + * Walker, checks if all ia32 nodes producing more than one result have + * its Projs, other wise creates new projs and keep them using a be_Keep node. + */ +static +void add_missing_keep_walker(ir_node *node, void *data) +{ + int n_outs, i; + unsigned found_projs = 0; + const ir_edge_t *edge; + ir_mode *mode = get_irn_mode(node); + ir_node *last_keep; + (void) data; + if(mode != mode_T) + return; + if(!is_ia32_irn(node)) + return; + + n_outs = get_ia32_n_res(node); + if(n_outs <= 0) + return; + if(is_ia32_SwitchJmp(node)) + return; + + assert(n_outs < (int) sizeof(unsigned) * 8); + foreach_out_edge(node, edge) { + ir_node *proj = get_edge_src_irn(edge); + int pn = get_Proj_proj(proj); + + assert(pn < n_outs); + found_projs |= 1 << pn; + } + + + /* are keeps missing? */ + last_keep = NULL; + for(i = 0; i < n_outs; ++i) { + ir_node *block; + ir_node *in[1]; + const arch_register_req_t *req; + const arch_register_class_t *class; + + if(found_projs & (1 << i)) { + continue; + } + + req = get_ia32_out_req(node, i); + class = req->cls; + if(class == NULL) { + continue; + } + + block = get_nodes_block(node); + in[0] = new_r_Proj(current_ir_graph, block, node, + arch_register_class_mode(class), i); + if(last_keep != NULL) { + be_Keep_add_node(last_keep, class, in[0]); + } else { + last_keep = be_new_Keep(class, current_ir_graph, block, 1, in); + } + } +} + +/** + * Adds missing keeps to nodes. Adds missing Proj nodes for unused outputs + * and keeps them. + */ +static +void add_missing_keeps(ia32_code_gen_t *cg) +{ + ir_graph *irg = be_get_birg_irg(cg->birg); + irg_walk_graph(irg, add_missing_keep_walker, NULL, NULL); } /* do the transformation */ void ia32_transform_graph(ia32_code_gen_t *cg) { register_transformers(); env_cg = cg; + initial_fpcw = NULL; be_transform_graph(cg->birg, ia32_pretransform_node, cg); + edges_verify(cg->irg); + add_missing_keeps(cg); + edges_verify(cg->irg); } void ia32_init_transform(void)