X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;f=ir%2Fbe%2Fia32%2Fia32_optimize.c;h=d844e9e1e144dcb6e733d10376af16b93074062c;hb=6b1c4998aa018f971620bb1eb72898ee7b905371;hp=e6b24f56d96f0dad1d0e90e7663112e5d005a672;hpb=fcb579b8959da1d7563b1a7b9f008a423ffdf75a;p=libfirm diff --git a/ir/be/ia32/ia32_optimize.c b/ir/be/ia32/ia32_optimize.c index e6b24f56d..d844e9e1e 100644 --- a/ir/be/ia32/ia32_optimize.c +++ b/ir/be/ia32/ia32_optimize.c @@ -23,9 +23,7 @@ * @author Matthias Braun, Christian Wuerdig * @version $Id$ */ -#ifdef HAVE_CONFIG_H #include "config.h" -#endif #include "irnode.h" #include "irprog_t.h" @@ -43,8 +41,8 @@ #include "../be_t.h" #include "../beabi.h" -#include "../benode_t.h" -#include "../besched_t.h" +#include "../benode.h" +#include "../besched.h" #include "../bepeephole.h" #include "ia32_new_nodes.h" @@ -141,40 +139,6 @@ check_shift_amount: produces_flag_zero : produces_no_flag; } -/** - * If the given node has not mode_T, creates a mode_T version (with a result Proj). - * - * @param node the node to change - * - * @return the new mode_T node (if the mode was changed) or node itself - */ -static ir_node *turn_into_mode_t(ir_node *node) -{ - ir_node *block; - ir_node *res_proj; - ir_node *new_node; - const arch_register_t *reg; - - if(get_irn_mode(node) == mode_T) - return node; - - assert(get_irn_mode(node) == mode_Iu); - - new_node = exact_copy(node); - set_irn_mode(new_node, mode_T); - - block = get_nodes_block(new_node); - res_proj = new_r_Proj(current_ir_graph, block, new_node, mode_Iu, - pn_ia32_res); - - reg = arch_get_irn_register(node); - arch_set_irn_register(res_proj, reg); - - sched_add_before(node, new_node); - be_peephole_exchange(node, res_proj); - return new_node; -} - /** * Replace Cmp(x, 0) by a Test(x, x) */ @@ -183,7 +147,6 @@ static void peephole_ia32_Cmp(ir_node *const node) ir_node *right; ia32_immediate_attr_t const *imm; dbg_info *dbgi; - ir_graph *irg; ir_node *block; ir_node *noreg; ir_node *nomem; @@ -208,26 +171,25 @@ static void peephole_ia32_Cmp(ir_node *const node) return; dbgi = get_irn_dbg_info(node); - irg = current_ir_graph; block = get_nodes_block(node); noreg = ia32_new_NoReg_gp(cg); - nomem = get_irg_no_mem(irg); + nomem = get_irg_no_mem(current_ir_graph); op = get_irn_n(node, n_ia32_Cmp_left); attr = get_irn_generic_attr(node); ins_permuted = attr->data.ins_permuted; cmp_unsigned = attr->data.cmp_unsigned; if (is_ia32_Cmp(node)) { - test = new_rd_ia32_Test(dbgi, irg, block, noreg, noreg, nomem, + test = new_bd_ia32_Test(dbgi, block, noreg, noreg, nomem, op, op, ins_permuted, cmp_unsigned); } else { - test = new_rd_ia32_Test8Bit(dbgi, irg, block, noreg, noreg, nomem, + test = new_bd_ia32_Test8Bit(dbgi, block, noreg, noreg, nomem, op, op, ins_permuted, cmp_unsigned); } set_ia32_ls_mode(test, get_ia32_ls_mode(node)); - reg = arch_get_irn_register(node); - arch_set_irn_register(test, reg); + reg = arch_irn_get_register(node, pn_ia32_Cmp_eflags); + arch_irn_set_register(test, pn_ia32_Test_eflags, reg); foreach_out_edge_safe(node, edge, tmp) { ir_node *const user = get_edge_src_irn(edge); @@ -243,95 +205,148 @@ static void peephole_ia32_Cmp(ir_node *const node) /** * Peephole optimization for Test instructions. - * We can remove the Test, if a zero flags was produced which is still - * live. + * - Remove the Test, if an appropriate flag was produced which is still live + * - Change a Test(x, c) to 8Bit, if 0 <= c < 256 (3 byte shorter opcode) */ static void peephole_ia32_Test(ir_node *node) { - ir_node *left = get_irn_n(node, n_ia32_Test_left); - ir_node *right = get_irn_n(node, n_ia32_Test_right); - ir_node *flags_proj; - ir_node *block; - ir_mode *flags_mode; - int pn = pn_ia32_res; - ir_node *schedpoint; - const ir_edge_t *edge; + ir_node *left = get_irn_n(node, n_ia32_Test_left); + ir_node *right = get_irn_n(node, n_ia32_Test_right); assert(n_ia32_Test_left == n_ia32_Test8Bit_left && n_ia32_Test_right == n_ia32_Test8Bit_right); - /* we need a test for 0 */ - if(left != right) - return; - - block = get_nodes_block(node); - if(get_nodes_block(left) != block) - return; - - if(is_Proj(left)) { - pn = get_Proj_proj(left); - left = get_Proj_pred(left); - } - - /* happens rarely, but if it does code will panic' */ - if (is_ia32_Unknown_GP(left)) - return; + if (left == right) { /* we need a test for 0 */ + ir_node *block = get_nodes_block(node); + int pn = pn_ia32_res; + ir_node *flags_proj; + ir_mode *flags_mode; + ir_node *schedpoint; + const ir_edge_t *edge; - /* walk schedule up and abort when we find left or some other node destroys - the flags */ - schedpoint = node; - for (;;) { - schedpoint = sched_prev(schedpoint); - if (schedpoint == left) - break; - if (arch_irn_is(schedpoint, modify_flags)) + if (get_nodes_block(left) != block) return; - if (schedpoint == block) - panic("couldn't find left"); - } - /* make sure only Lg/Eq tests are used */ - foreach_out_edge(node, edge) { - ir_node *user = get_edge_src_irn(edge); - int pnc = get_ia32_condcode(user); + if (is_Proj(left)) { + pn = get_Proj_proj(left); + left = get_Proj_pred(left); + } - if(pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) { + /* happens rarely, but if it does code will panic' */ + if (is_ia32_Unknown_GP(left)) return; + + /* walk schedule up and abort when we find left or some other node destroys + the flags */ + schedpoint = node; + for (;;) { + schedpoint = sched_prev(schedpoint); + if (schedpoint == left) + break; + if (arch_irn_is(schedpoint, modify_flags)) + return; + if (schedpoint == block) + panic("couldn't find left"); } - } - switch (produces_test_flag(left, pn)) { - case produces_flag_zero: - break; + /* make sure only Lg/Eq tests are used */ + foreach_out_edge(node, edge) { + ir_node *user = get_edge_src_irn(edge); + int pnc = get_ia32_condcode(user); - case produces_flag_carry: - foreach_out_edge(node, edge) { - ir_node *user = get_edge_src_irn(edge); - int pnc = get_ia32_condcode(user); + if(pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) { + return; + } + } - switch (pnc) { - case pn_Cmp_Eq: pnc = pn_Cmp_Ge | ia32_pn_Cmp_unsigned; break; - case pn_Cmp_Lg: pnc = pn_Cmp_Lt | ia32_pn_Cmp_unsigned; break; - default: panic("unexpected pn"); + switch (produces_test_flag(left, pn)) { + case produces_flag_zero: + break; + + case produces_flag_carry: + foreach_out_edge(node, edge) { + ir_node *user = get_edge_src_irn(edge); + int pnc = get_ia32_condcode(user); + + switch (pnc) { + case pn_Cmp_Eq: pnc = pn_Cmp_Ge | ia32_pn_Cmp_unsigned; break; + case pn_Cmp_Lg: pnc = pn_Cmp_Lt | ia32_pn_Cmp_unsigned; break; + default: panic("unexpected pn"); + } + set_ia32_condcode(user, pnc); } - set_ia32_condcode(user, pnc); + break; + + default: + return; + } + + if (get_irn_mode(left) != mode_T) { + set_irn_mode(left, mode_T); + + /* If there are other users, reroute them to result proj */ + if (get_irn_n_edges(left) != 2) { + ir_node *res = new_r_Proj(block, left, mode_Iu, pn_ia32_res); + + edges_reroute(left, res, current_ir_graph); + /* Reattach the result proj to left */ + set_Proj_pred(res, left); } - break; + } - default: - return; - } + flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode; + flags_proj = new_r_Proj(block, left, flags_mode, pn_ia32_flags); + arch_set_irn_register(flags_proj, &ia32_flags_regs[REG_EFLAGS]); + + assert(get_irn_mode(node) != mode_T); + + be_peephole_exchange(node, flags_proj); + } else if (is_ia32_Immediate(right)) { + ia32_immediate_attr_t const *const imm = get_ia32_immediate_attr_const(right); + unsigned offset; - left = turn_into_mode_t(left); + /* A test with a symconst is rather strange, but better safe than sorry */ + if (imm->symconst != NULL) + return; - flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode; - flags_proj = new_r_Proj(current_ir_graph, block, left, flags_mode, - pn_ia32_flags); - arch_set_irn_register(flags_proj, &ia32_flags_regs[REG_EFLAGS]); + offset = imm->offset; + if (get_ia32_op_type(node) == ia32_AddrModeS) { + ia32_attr_t *const attr = get_irn_generic_attr(node); + + if ((offset & 0xFFFFFF00) == 0) { + /* attr->am_offs += 0; */ + } else if ((offset & 0xFFFF00FF) == 0) { + ir_node *imm = ia32_create_Immediate(NULL, 0, offset >> 8); + set_irn_n(node, n_ia32_Test_right, imm); + attr->am_offs += 1; + } else if ((offset & 0xFF00FFFF) == 0) { + ir_node *imm = ia32_create_Immediate(NULL, 0, offset >> 16); + set_irn_n(node, n_ia32_Test_right, imm); + attr->am_offs += 2; + } else if ((offset & 0x00FFFFFF) == 0) { + ir_node *imm = ia32_create_Immediate(NULL, 0, offset >> 24); + set_irn_n(node, n_ia32_Test_right, imm); + attr->am_offs += 3; + } else { + return; + } + } else if (offset < 256) { + arch_register_t const* const reg = arch_get_irn_register(left); - assert(get_irn_mode(node) != mode_T); + if (reg != &ia32_gp_regs[REG_EAX] && + reg != &ia32_gp_regs[REG_EBX] && + reg != &ia32_gp_regs[REG_ECX] && + reg != &ia32_gp_regs[REG_EDX]) { + return; + } + } else { + return; + } - be_peephole_exchange(node, flags_proj); + /* Technically we should build a Test8Bit because of the register + * constraints, but nobody changes registers at this point anymore. */ + set_ia32_ls_mode(node, mode_Bu); + } } /** @@ -354,7 +369,7 @@ static void peephole_ia32_Return(ir_node *node) { /* the return node itself, ignore */ continue; case iro_Start: - case beo_RegParams: + case beo_Start: case beo_Barrier: /* ignore no code generated */ continue; @@ -477,20 +492,20 @@ static void peephole_IncSP_Store_to_push(ir_node *irn) mem = get_irn_n(store, n_ia32_mem); spreg = arch_get_irn_register(curr_sp); - push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, mem, val, curr_sp); + push = new_bd_ia32_Push(get_irn_dbg_info(store), block, noreg, noreg, mem, val, curr_sp); copy_mark(store, push); if (first_push == NULL) first_push = push; - sched_add_after(curr_sp, push); + sched_add_after(skip_Proj(curr_sp), push); /* create stackpointer Proj */ - curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack); + curr_sp = new_r_Proj(block, push, spmode, pn_ia32_Push_stack); arch_set_irn_register(curr_sp, spreg); /* create memory Proj */ - mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M); + mem_proj = new_r_Proj(block, push, mode_M, pn_ia32_Push_M); /* use the memproj now */ be_peephole_exchange(store, mem_proj); @@ -517,10 +532,12 @@ static void peephole_store_incsp(ir_node *store) dbg_info *dbgi; ir_node *node; ir_node *block; - ir_node *noref; + ir_node *noreg; ir_node *mem; ir_node *push; ir_node *val; + ir_node *base; + ir_node *index; ir_node *am_base = get_irn_n(store, n_ia32_Store_base); if (!be_is_IncSP(am_base) || get_nodes_block(am_base) != get_nodes_block(store)) @@ -569,7 +586,7 @@ static void peephole_store_incsp(ir_node *store) || get_ia32_op_type(node) == ia32_AddrModeD)) { int node_offset = get_ia32_am_offs_int(node); ir_mode *node_ls_mode = get_ia32_ls_mode(node); - int node_size = get_mode_size_bytes(node); + int node_size = get_mode_size_bytes(node_ls_mode); /* overlapping with our position? abort */ if (node_offset < my_offset + my_store_size && node_offset + node_size >= my_offset) @@ -587,9 +604,9 @@ static void peephole_store_incsp(ir_node *store) dbgi = get_irn_dbg_info(store); block = get_nodes_block(store); noreg = ia32_new_NoReg_gp(cg); - val = get_ia32_ + val = get_irn_n(store, n_ia32_Store_val); - push = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, mem, + push = new_bd_ia32_Push(dbgi, block, noreg, noreg, mem, create_push(dbgi, current_ir_graph, block, am_base, store); } @@ -598,7 +615,7 @@ static void peephole_store_incsp(ir_node *store) /** * Return true if a mode can be stored in the GP register set */ -static INLINE int mode_needs_gp_reg(ir_mode *mode) { +static inline int mode_needs_gp_reg(ir_mode *mode) { if (mode == mode_fpcw) return 0; if (get_mode_size_bits(mode) > 32) @@ -699,7 +716,7 @@ static void peephole_Load_IncSP_to_pop(ir_node *irn) if (loads[loadslot] != NULL) break; - dreg = arch_get_irn_register(node); + dreg = arch_irn_get_register(node, pn_ia32_Load_res); if (regmask & (1 << dreg->index)) { /* this register is already used */ break; @@ -729,7 +746,7 @@ static void peephole_Load_IncSP_to_pop(ir_node *irn) block = get_nodes_block(irn); irg = cg->irg; if (inc_ofs > 0) { - pred_sp = be_new_IncSP(esp, irg, block, pred_sp, -inc_ofs, be_get_IncSP_align(irn)); + pred_sp = be_new_IncSP(esp, block, pred_sp, -inc_ofs, be_get_IncSP_align(irn)); sched_add_before(irn, pred_sp); } @@ -741,15 +758,15 @@ static void peephole_Load_IncSP_to_pop(ir_node *irn) const arch_register_t *reg; mem = get_irn_n(load, n_ia32_mem); - reg = arch_get_irn_register(load); + reg = arch_irn_get_register(load, pn_ia32_Load_res); - pop = new_rd_ia32_Pop(get_irn_dbg_info(load), irg, block, mem, pred_sp); - arch_set_irn_register(pop, reg); + pop = new_bd_ia32_Pop(get_irn_dbg_info(load), block, mem, pred_sp); + arch_irn_set_register(pop, pn_ia32_Load_res, reg); copy_mark(load, pop); /* create stackpointer Proj */ - pred_sp = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack); + pred_sp = new_r_Proj(block, pop, mode_Iu, pn_ia32_Pop_stack); arch_set_irn_register(pred_sp, esp); sched_add_before(irn, pop); @@ -794,7 +811,6 @@ static const arch_register_t *get_free_gp_reg(void) * Creates a Pop instruction before the given schedule point. * * @param dbgi debug info - * @param irg the graph * @param block the block * @param stack the previous stack value * @param schedpoint the new node is added before this node @@ -802,7 +818,7 @@ static const arch_register_t *get_free_gp_reg(void) * * @return the new stack value */ -static ir_node *create_pop(dbg_info *dbgi, ir_graph *irg, ir_node *block, +static ir_node *create_pop(dbg_info *dbgi, ir_node *block, ir_node *stack, ir_node *schedpoint, const arch_register_t *reg) { @@ -812,17 +828,17 @@ static ir_node *create_pop(dbg_info *dbgi, ir_graph *irg, ir_node *block, ir_node *val; ir_node *in[1]; - pop = new_rd_ia32_Pop(dbgi, irg, block, new_NoMem(), stack); + pop = new_bd_ia32_Pop(dbgi, block, new_NoMem(), stack); - stack = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack); + stack = new_r_Proj(block, pop, mode_Iu, pn_ia32_Pop_stack); arch_set_irn_register(stack, esp); - val = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_res); + val = new_r_Proj(block, pop, mode_Iu, pn_ia32_Pop_res); arch_set_irn_register(val, reg); sched_add_before(schedpoint, pop); in[0] = val; - keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in); + keep = be_new_Keep(block, 1, in); sched_add_before(schedpoint, keep); return stack; @@ -832,7 +848,6 @@ static ir_node *create_pop(dbg_info *dbgi, ir_graph *irg, ir_node *block, * Creates a Push instruction before the given schedule point. * * @param dbgi debug info - * @param irg the graph * @param block the block * @param stack the previous stack value * @param schedpoint the new node is added before this node @@ -840,18 +855,18 @@ static ir_node *create_pop(dbg_info *dbgi, ir_graph *irg, ir_node *block, * * @return the new stack value */ -static ir_node *create_push(dbg_info *dbgi, ir_graph *irg, ir_node *block, +static ir_node *create_push(dbg_info *dbgi, ir_node *block, ir_node *stack, ir_node *schedpoint) { const arch_register_t *esp = &ia32_gp_regs[REG_ESP]; ir_node *val = ia32_new_Unknown_gp(cg); ir_node *noreg = ia32_new_NoReg_gp(cg); - ir_node *nomem = get_irg_no_mem(irg); - ir_node *push = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, nomem, val, stack); + ir_node *nomem = new_NoMem(); + ir_node *push = new_bd_ia32_Push(dbgi, block, noreg, noreg, nomem, val, stack); sched_add_before(schedpoint, push); - stack = new_r_Proj(irg, block, push, mode_Iu, pn_ia32_Push_stack); + stack = new_r_Proj(block, push, mode_Iu, pn_ia32_Push_stack); arch_set_irn_register(stack, esp); return stack; @@ -864,7 +879,6 @@ static void peephole_be_IncSP(ir_node *node) { const arch_register_t *esp = &ia32_gp_regs[REG_ESP]; const arch_register_t *reg; - ir_graph *irg = current_ir_graph; dbg_info *dbgi; ir_node *block; ir_node *stack; @@ -900,19 +914,19 @@ static void peephole_be_IncSP(ir_node *node) block = get_nodes_block(node); stack = be_get_IncSP_pred(node); - stack = create_pop(dbgi, irg, block, stack, node, reg); + stack = create_pop(dbgi, block, stack, node, reg); if (offset == -8) { - stack = create_pop(dbgi, irg, block, stack, node, reg); + stack = create_pop(dbgi, block, stack, node, reg); } } else { dbgi = get_irn_dbg_info(node); block = get_nodes_block(node); stack = be_get_IncSP_pred(node); - stack = create_push(dbgi, irg, block, stack, node); + stack = create_push(dbgi, block, stack, node); if (offset == +8) { - stack = create_push(dbgi, irg, block, stack, node); + stack = create_push(dbgi, block, stack, node); } } @@ -926,12 +940,9 @@ static void peephole_ia32_Const(ir_node *node) { const ia32_immediate_attr_t *attr = get_ia32_immediate_attr_const(node); const arch_register_t *reg; - ir_graph *irg = current_ir_graph; ir_node *block; dbg_info *dbgi; - ir_node *produceval; ir_node *xor; - ir_node *noreg; /* try to transform a mov 0, reg to xor reg reg */ if (attr->offset != 0 || attr->symconst != NULL) @@ -946,24 +957,18 @@ static void peephole_ia32_Const(ir_node *node) assert(be_peephole_get_reg_value(reg) == NULL); /* create xor(produceval, produceval) */ - block = get_nodes_block(node); - dbgi = get_irn_dbg_info(node); - produceval = new_rd_ia32_ProduceVal(dbgi, irg, block); - arch_set_irn_register(produceval, reg); - - noreg = ia32_new_NoReg_gp(cg); - xor = new_rd_ia32_Xor(dbgi, irg, block, noreg, noreg, new_NoMem(), - produceval, produceval); + block = get_nodes_block(node); + dbgi = get_irn_dbg_info(node); + xor = new_bd_ia32_Xor0(dbgi, block); arch_set_irn_register(xor, reg); - sched_add_before(node, produceval); sched_add_before(node, xor); copy_mark(node, xor); be_peephole_exchange(node, xor); } -static INLINE int is_noreg(ia32_code_gen_t *cg, const ir_node *node) +static inline int is_noreg(ia32_code_gen_t *cg, const ir_node *node) { return node == cg->noreg_gp; } @@ -972,8 +977,8 @@ static ir_node *create_immediate_from_int(int val) { ir_graph *irg = current_ir_graph; ir_node *start_block = get_irg_start_block(irg); - ir_node *immediate = new_rd_ia32_Immediate(NULL, irg, start_block, NULL, - 0, val); + ir_node *immediate + = new_bd_ia32_Immediate(NULL, start_block, NULL, 0, 0, val); arch_set_irn_register(immediate, &ia32_gp_regs[REG_GP_NOREG]); return immediate; @@ -981,14 +986,16 @@ static ir_node *create_immediate_from_int(int val) static ir_node *create_immediate_from_am(const ir_node *node) { - ir_graph *irg = get_irn_irg(node); ir_node *block = get_nodes_block(node); int offset = get_ia32_am_offs_int(node); int sc_sign = is_ia32_am_sc_sign(node); + const ia32_attr_t *attr = get_ia32_attr_const(node); + int sc_no_pic_adjust = attr->data.am_sc_no_pic_adjust; ir_entity *entity = get_ia32_am_sc(node); ir_node *res; - res = new_rd_ia32_Immediate(NULL, irg, block, entity, sc_sign, offset); + res = new_bd_ia32_Immediate(NULL, block, entity, sc_sign, sc_no_pic_adjust, + offset); arch_set_irn_register(res, &ia32_gp_regs[REG_GP_NOREG]); return res; } @@ -1014,7 +1021,6 @@ static int is_am_minus_one(const ir_node *node) */ static void peephole_ia32_Lea(ir_node *node) { - ir_graph *irg = current_ir_graph; ir_node *base; ir_node *index; const arch_register_t *base_reg; @@ -1032,7 +1038,7 @@ static void peephole_ia32_Lea(ir_node *node) assert(is_ia32_Lea(node)); - /* we can only do this if are allowed to globber the flags */ + /* we can only do this if it is allowed to clobber the flags */ if(be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL) return; @@ -1123,14 +1129,14 @@ make_add_immediate: if(is_am_one(node)) { dbgi = get_irn_dbg_info(node); block = get_nodes_block(node); - res = new_rd_ia32_Inc(dbgi, irg, block, op1); + res = new_bd_ia32_Inc(dbgi, block, op1); arch_set_irn_register(res, out_reg); goto exchange; } if(is_am_minus_one(node)) { dbgi = get_irn_dbg_info(node); block = get_nodes_block(node); - res = new_rd_ia32_Dec(dbgi, irg, block, op1); + res = new_bd_ia32_Dec(dbgi, block, op1); arch_set_irn_register(res, out_reg); goto exchange; } @@ -1142,7 +1148,7 @@ make_add: block = get_nodes_block(node); noreg = ia32_new_NoReg_gp(cg); nomem = new_NoMem(); - res = new_rd_ia32_Add(dbgi, irg, block, noreg, noreg, nomem, op1, op2); + res = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, op1, op2); arch_set_irn_register(res, out_reg); set_ia32_commutative(res); goto exchange; @@ -1152,12 +1158,12 @@ make_shl: block = get_nodes_block(node); noreg = ia32_new_NoReg_gp(cg); nomem = new_NoMem(); - res = new_rd_ia32_Shl(dbgi, irg, block, op1, op2); + res = new_bd_ia32_Shl(dbgi, block, op1, op2); arch_set_irn_register(res, out_reg); goto exchange; exchange: - SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, node)); + SET_IA32_ORIG_NODE(res, node); /* add new ADD/SHL to schedule */ DBG_OPT_LEA2ADD(node, res); @@ -1194,14 +1200,42 @@ static void peephole_ia32_Imul_split(ir_node *imul) /** * Replace xorps r,r and xorpd r,r by pxor r,r */ -static void peephole_ia32_xZero(ir_node *xor) { +static void peephole_ia32_xZero(ir_node *xor) +{ set_irn_op(xor, op_ia32_xPzero); } +/** + * Replace 16bit sign extension from ax to eax by shorter cwtl + */ +static void peephole_ia32_Conv_I2I(ir_node *node) +{ + const arch_register_t *eax = &ia32_gp_regs[REG_EAX]; + ir_mode *smaller_mode = get_ia32_ls_mode(node); + ir_node *val = get_irn_n(node, n_ia32_Conv_I2I_val); + dbg_info *dbgi; + ir_node *block; + ir_node *cwtl; + + if (get_mode_size_bits(smaller_mode) != 16 || + !mode_is_signed(smaller_mode) || + eax != arch_get_irn_register(val) || + eax != arch_irn_get_register(node, pn_ia32_Conv_I2I_res)) + return; + + dbgi = get_irn_dbg_info(node); + block = get_nodes_block(node); + cwtl = new_bd_ia32_Cwtl(dbgi, block, val); + arch_set_irn_register(cwtl, eax); + sched_add_before(node, cwtl); + be_peephole_exchange(node, cwtl); +} + /** * Register a peephole optimisation function. */ -static void register_peephole_optimisation(ir_op *op, peephole_opt_func func) { +static void register_peephole_optimisation(ir_op *op, peephole_opt_func func) +{ assert(op->ops.generic == NULL); op->ops.generic = (op_func)func; } @@ -1225,6 +1259,8 @@ void ia32_peephole_optimization(ia32_code_gen_t *new_cg) register_peephole_optimisation(op_ia32_IMul, peephole_ia32_Imul_split); if (ia32_cg_config.use_pxor) register_peephole_optimisation(op_ia32_xZero, peephole_ia32_xZero); + if (ia32_cg_config.use_short_sex_eax) + register_peephole_optimisation(op_ia32_Conv_I2I, peephole_ia32_Conv_I2I); be_peephole_opt(cg->birg); } @@ -1234,7 +1270,7 @@ void ia32_peephole_optimization(ia32_code_gen_t *new_cg) * all it's Projs are removed as well. * @param irn The irn to be removed from schedule */ -static INLINE void try_kill(ir_node *node) +static inline void try_kill(ir_node *node) { if(get_irn_mode(node) == mode_T) { const ir_edge_t *edge, *next; @@ -1408,6 +1444,9 @@ static void optimize_conv_conv(ir_node *node) } } + /* Some user (like Phis) won't be happy if we change the mode. */ + set_irn_mode(result_conv, get_irn_mode(node)); + /* kill the conv */ exchange(node, result_conv);