X-Git-Url: http://nsz.repo.hu/git/?a=blobdiff_plain;ds=sidebyside;f=ir%2Fbe%2Fia32%2Fia32_optimize.c;h=05e1470d730ca7fdd1d08dcf8635af867d99ec93;hb=5bb8cd35a8074112fa2da8b8f68d7c77918119e5;hp=d844e9e1e144dcb6e733d10376af16b93074062c;hpb=44dd4365d606bc60bbbf97200b89c903d788ce45;p=libfirm diff --git a/ir/be/ia32/ia32_optimize.c b/ir/be/ia32/ia32_optimize.c index d844e9e1e..05e1470d7 100644 --- a/ir/be/ia32/ia32_optimize.c +++ b/ir/be/ia32/ia32_optimize.c @@ -232,12 +232,8 @@ static void peephole_ia32_Test(ir_node *node) left = get_Proj_pred(left); } - /* happens rarely, but if it does code will panic' */ - if (is_ia32_Unknown_GP(left)) - return; - - /* walk schedule up and abort when we find left or some other node destroys - the flags */ + /* walk schedule up and abort when we find left or some other node + * destroys the flags */ schedpoint = node; for (;;) { schedpoint = sched_prev(schedpoint); @@ -254,7 +250,7 @@ static void peephole_ia32_Test(ir_node *node) ir_node *user = get_edge_src_irn(edge); int pnc = get_ia32_condcode(user); - if(pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) { + if (pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) { return; } } @@ -269,8 +265,8 @@ static void peephole_ia32_Test(ir_node *node) int pnc = get_ia32_condcode(user); switch (pnc) { - case pn_Cmp_Eq: pnc = pn_Cmp_Ge | ia32_pn_Cmp_unsigned; break; - case pn_Cmp_Lg: pnc = pn_Cmp_Lt | ia32_pn_Cmp_unsigned; break; + case pn_Cmp_Eq: pnc = ia32_pn_Cmp_not_carry; break; + case pn_Cmp_Lg: pnc = ia32_pn_Cmp_carry; break; default: panic("unexpected pn"); } set_ia32_condcode(user, pnc); @@ -286,7 +282,7 @@ static void peephole_ia32_Test(ir_node *node) /* If there are other users, reroute them to result proj */ if (get_irn_n_edges(left) != 2) { - ir_node *res = new_r_Proj(block, left, mode_Iu, pn_ia32_res); + ir_node *res = new_r_Proj(left, mode_Iu, pn_ia32_res); edges_reroute(left, res, current_ir_graph); /* Reattach the result proj to left */ @@ -295,7 +291,7 @@ static void peephole_ia32_Test(ir_node *node) } flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode; - flags_proj = new_r_Proj(block, left, flags_mode, pn_ia32_flags); + flags_proj = new_r_Proj(left, flags_mode, pn_ia32_flags); arch_set_irn_register(flags_proj, &ia32_flags_regs[REG_EFLAGS]); assert(get_irn_mode(node) != mode_T); @@ -354,7 +350,8 @@ static void peephole_ia32_Test(ir_node *node) * conditional jump or directly preceded by other jump instruction. * Can be avoided by placing a Rep prefix before the return. */ -static void peephole_ia32_Return(ir_node *node) { +static void peephole_ia32_Return(ir_node *node) +{ ir_node *block, *irn; if (!ia32_cg_config.use_pad_return) @@ -501,11 +498,11 @@ static void peephole_IncSP_Store_to_push(ir_node *irn) sched_add_after(skip_Proj(curr_sp), push); /* create stackpointer Proj */ - curr_sp = new_r_Proj(block, push, spmode, pn_ia32_Push_stack); + curr_sp = new_r_Proj(push, spmode, pn_ia32_Push_stack); arch_set_irn_register(curr_sp, spreg); /* create memory Proj */ - mem_proj = new_r_Proj(block, push, mode_M, pn_ia32_Push_M); + mem_proj = new_r_Proj(push, mode_M, pn_ia32_Push_M); /* use the memproj now */ be_peephole_exchange(store, mem_proj); @@ -527,6 +524,34 @@ static void peephole_IncSP_Store_to_push(ir_node *irn) } #if 0 +/** + * Creates a Push instruction before the given schedule point. + * + * @param dbgi debug info + * @param block the block + * @param stack the previous stack value + * @param schedpoint the new node is added before this node + * @param reg the register to pop + * + * @return the new stack value + */ +static ir_node *create_push(dbg_info *dbgi, ir_node *block, + ir_node *stack, ir_node *schedpoint) +{ + const arch_register_t *esp = &ia32_gp_regs[REG_ESP]; + + ir_node *val = ia32_new_NoReg_gp(cg); + ir_node *noreg = ia32_new_NoReg_gp(cg); + ir_node *nomem = new_NoMem(); + ir_node *push = new_bd_ia32_Push(dbgi, block, noreg, noreg, nomem, val, stack); + sched_add_before(schedpoint, push); + + stack = new_r_Proj(push, mode_Iu, pn_ia32_Push_stack); + arch_set_irn_register(stack, esp); + + return stack; +} + static void peephole_store_incsp(ir_node *store) { dbg_info *dbgi; @@ -615,7 +640,8 @@ static void peephole_store_incsp(ir_node *store) /** * Return true if a mode can be stored in the GP register set */ -static inline int mode_needs_gp_reg(ir_mode *mode) { +static inline int mode_needs_gp_reg(ir_mode *mode) +{ if (mode == mode_fpcw) return 0; if (get_mode_size_bits(mode) > 32) @@ -766,7 +792,7 @@ static void peephole_Load_IncSP_to_pop(ir_node *irn) copy_mark(load, pop); /* create stackpointer Proj */ - pred_sp = new_r_Proj(block, pop, mode_Iu, pn_ia32_Pop_stack); + pred_sp = new_r_Proj(pop, mode_Iu, pn_ia32_Pop_stack); arch_set_irn_register(pred_sp, esp); sched_add_before(irn, pop); @@ -795,12 +821,12 @@ static const arch_register_t *get_free_gp_reg(void) { int i; - for(i = 0; i < N_ia32_gp_REGS; ++i) { + for (i = 0; i < N_ia32_gp_REGS; ++i) { const arch_register_t *reg = &ia32_gp_regs[i]; - if(arch_register_type_is(reg, ignore)) + if (arch_register_type_is(reg, ignore)) continue; - if(be_peephole_get_value(CLASS_ia32_gp, i) == NULL) + if (be_peephole_get_value(CLASS_ia32_gp, i) == NULL) return &ia32_gp_regs[i]; } @@ -830,9 +856,9 @@ static ir_node *create_pop(dbg_info *dbgi, ir_node *block, pop = new_bd_ia32_Pop(dbgi, block, new_NoMem(), stack); - stack = new_r_Proj(block, pop, mode_Iu, pn_ia32_Pop_stack); + stack = new_r_Proj(pop, mode_Iu, pn_ia32_Pop_stack); arch_set_irn_register(stack, esp); - val = new_r_Proj(block, pop, mode_Iu, pn_ia32_Pop_res); + val = new_r_Proj(pop, mode_Iu, pn_ia32_Pop_res); arch_set_irn_register(val, reg); sched_add_before(schedpoint, pop); @@ -844,34 +870,6 @@ static ir_node *create_pop(dbg_info *dbgi, ir_node *block, return stack; } -/** - * Creates a Push instruction before the given schedule point. - * - * @param dbgi debug info - * @param block the block - * @param stack the previous stack value - * @param schedpoint the new node is added before this node - * @param reg the register to pop - * - * @return the new stack value - */ -static ir_node *create_push(dbg_info *dbgi, ir_node *block, - ir_node *stack, ir_node *schedpoint) -{ - const arch_register_t *esp = &ia32_gp_regs[REG_ESP]; - - ir_node *val = ia32_new_Unknown_gp(cg); - ir_node *noreg = ia32_new_NoReg_gp(cg); - ir_node *nomem = new_NoMem(); - ir_node *push = new_bd_ia32_Push(dbgi, block, noreg, noreg, nomem, val, stack); - sched_add_before(schedpoint, push); - - stack = new_r_Proj(block, push, mode_Iu, pn_ia32_Push_stack); - arch_set_irn_register(stack, esp); - - return stack; -} - /** * Optimize an IncSp by replacing it with Push/Pop. */ @@ -923,10 +921,14 @@ static void peephole_be_IncSP(ir_node *node) dbgi = get_irn_dbg_info(node); block = get_nodes_block(node); stack = be_get_IncSP_pred(node); - stack = create_push(dbgi, block, stack, node); + stack = new_bd_ia32_PushEax(dbgi, block, stack); + arch_set_irn_register(stack, esp); + sched_add_before(node, stack); if (offset == +8) { - stack = create_push(dbgi, block, stack, node); + stack = new_bd_ia32_PushEax(dbgi, block, stack); + arch_set_irn_register(stack, esp); + sched_add_before(node, stack); } } @@ -973,7 +975,7 @@ static inline int is_noreg(ia32_code_gen_t *cg, const ir_node *node) return node == cg->noreg_gp; } -static ir_node *create_immediate_from_int(int val) +ir_node *ia32_immediate_from_long(long val) { ir_graph *irg = current_ir_graph; ir_node *start_block = get_irg_start_block(irg); @@ -1039,26 +1041,26 @@ static void peephole_ia32_Lea(ir_node *node) assert(is_ia32_Lea(node)); /* we can only do this if it is allowed to clobber the flags */ - if(be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL) + if (be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL) return; base = get_irn_n(node, n_ia32_Lea_base); index = get_irn_n(node, n_ia32_Lea_index); - if(is_noreg(cg, base)) { + if (is_noreg(cg, base)) { base = NULL; base_reg = NULL; } else { base_reg = arch_get_irn_register(base); } - if(is_noreg(cg, index)) { + if (is_noreg(cg, index)) { index = NULL; index_reg = NULL; } else { index_reg = arch_get_irn_register(index); } - if(base == NULL && index == NULL) { + if (base == NULL && index == NULL) { /* we shouldn't construct these in the first place... */ #ifdef DEBUG_libfirm ir_fprintf(stderr, "Optimisation warning: found immediate only lea\n"); @@ -1071,7 +1073,7 @@ static void peephole_ia32_Lea(ir_node *node) assert(!is_ia32_need_stackent(node) || get_ia32_frame_ent(node) != NULL); /* check if we have immediates values (frame entities should already be * expressed in the offsets) */ - if(get_ia32_am_offs_int(node) != 0 || get_ia32_am_sc(node) != NULL) { + if (get_ia32_am_offs_int(node) != 0 || get_ia32_am_sc(node) != NULL) { has_immediates = 1; } else { has_immediates = 0; @@ -1079,10 +1081,10 @@ static void peephole_ia32_Lea(ir_node *node) /* we can transform leas where the out register is the same as either the * base or index register back to an Add or Shl */ - if(out_reg == base_reg) { - if(index == NULL) { + if (out_reg == base_reg) { + if (index == NULL) { #ifdef DEBUG_libfirm - if(!has_immediates) { + if (!has_immediates) { ir_fprintf(stderr, "Optimisation warning: found lea which is " "just a copy\n"); } @@ -1090,29 +1092,29 @@ static void peephole_ia32_Lea(ir_node *node) op1 = base; goto make_add_immediate; } - if(scale == 0 && !has_immediates) { + if (scale == 0 && !has_immediates) { op1 = base; op2 = index; goto make_add; } /* can't create an add */ return; - } else if(out_reg == index_reg) { - if(base == NULL) { - if(has_immediates && scale == 0) { + } else if (out_reg == index_reg) { + if (base == NULL) { + if (has_immediates && scale == 0) { op1 = index; goto make_add_immediate; - } else if(!has_immediates && scale > 0) { + } else if (!has_immediates && scale > 0) { op1 = index; - op2 = create_immediate_from_int(scale); + op2 = ia32_immediate_from_long(scale); goto make_shl; - } else if(!has_immediates) { + } else if (!has_immediates) { #ifdef DEBUG_libfirm ir_fprintf(stderr, "Optimisation warning: found lea which is " "just a copy\n"); #endif } - } else if(scale == 0 && !has_immediates) { + } else if (scale == 0 && !has_immediates) { op1 = index; op2 = base; goto make_add; @@ -1125,15 +1127,15 @@ static void peephole_ia32_Lea(ir_node *node) } make_add_immediate: - if(ia32_cg_config.use_incdec) { - if(is_am_one(node)) { + if (ia32_cg_config.use_incdec) { + if (is_am_one(node)) { dbgi = get_irn_dbg_info(node); block = get_nodes_block(node); res = new_bd_ia32_Inc(dbgi, block, op1); arch_set_irn_register(res, out_reg); goto exchange; } - if(is_am_minus_one(node)) { + if (is_am_minus_one(node)) { dbgi = get_irn_dbg_info(node); block = get_nodes_block(node); res = new_bd_ia32_Dec(dbgi, block, op1); @@ -1272,7 +1274,7 @@ void ia32_peephole_optimization(ia32_code_gen_t *new_cg) */ static inline void try_kill(ir_node *node) { - if(get_irn_mode(node) == mode_T) { + if (get_irn_mode(node) == mode_T) { const ir_edge_t *edge, *next; foreach_out_edge_safe(node, edge, next) { ir_node *proj = get_edge_src_irn(edge); @@ -1280,7 +1282,7 @@ static inline void try_kill(ir_node *node) } } - if(get_irn_n_edges(node) != 0) + if (get_irn_n_edges(node) != 0) return; if (sched_is_scheduled(node)) { @@ -1297,32 +1299,32 @@ static void optimize_conv_store(ir_node *node) ir_mode *conv_mode; ir_mode *store_mode; - if(!is_ia32_Store(node) && !is_ia32_Store8Bit(node)) + if (!is_ia32_Store(node) && !is_ia32_Store8Bit(node)) return; assert(n_ia32_Store_val == n_ia32_Store8Bit_val); pred_proj = get_irn_n(node, n_ia32_Store_val); - if(is_Proj(pred_proj)) { + if (is_Proj(pred_proj)) { pred = get_Proj_pred(pred_proj); } else { pred = pred_proj; } - if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred)) + if (!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred)) return; - if(get_ia32_op_type(pred) != ia32_Normal) + if (get_ia32_op_type(pred) != ia32_Normal) return; /* the store only stores the lower bits, so we only need the conv * it it shrinks the mode */ conv_mode = get_ia32_ls_mode(pred); store_mode = get_ia32_ls_mode(node); - if(get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode)) + if (get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode)) return; set_irn_n(node, n_ia32_Store_val, get_irn_n(pred, n_ia32_Conv_I2I_val)); - if(get_irn_n_edges(pred_proj) == 0) { + if (get_irn_n_edges(pred_proj) == 0) { kill_node(pred_proj); - if(pred != pred_proj) + if (pred != pred_proj) kill_node(pred); } } @@ -1338,25 +1340,25 @@ static void optimize_load_conv(ir_node *node) assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val); pred = get_irn_n(node, n_ia32_Conv_I2I_val); - if(!is_Proj(pred)) + if (!is_Proj(pred)) return; predpred = get_Proj_pred(pred); - if(!is_ia32_Load(predpred)) + if (!is_ia32_Load(predpred)) return; /* the load is sign extending the upper bits, so we only need the conv * if it shrinks the mode */ load_mode = get_ia32_ls_mode(predpred); conv_mode = get_ia32_ls_mode(node); - if(get_mode_size_bits(conv_mode) < get_mode_size_bits(load_mode)) + if (get_mode_size_bits(conv_mode) < get_mode_size_bits(load_mode)) return; - if(get_mode_sign(conv_mode) != get_mode_sign(load_mode)) { + if (get_mode_sign(conv_mode) != get_mode_sign(load_mode)) { /* change the load if it has only 1 user */ - if(get_irn_n_edges(pred) == 1) { + if (get_irn_n_edges(pred) == 1) { ir_mode *newmode; - if(get_mode_sign(conv_mode)) { + if (get_mode_sign(conv_mode)) { newmode = find_signed_mode(load_mode); } else { newmode = find_unsigned_mode(load_mode); @@ -1385,12 +1387,12 @@ static void optimize_conv_conv(ir_node *node) assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val); pred_proj = get_irn_n(node, n_ia32_Conv_I2I_val); - if(is_Proj(pred_proj)) + if (is_Proj(pred_proj)) pred = get_Proj_pred(pred_proj); else pred = pred_proj; - if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred)) + if (!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred)) return; /* we know that after a conv, the upper bits are sign extended @@ -1400,13 +1402,13 @@ static void optimize_conv_conv(ir_node *node) pred_mode = get_ia32_ls_mode(pred); pred_mode_bits = get_mode_size_bits(pred_mode); - if(conv_mode_bits == pred_mode_bits + if (conv_mode_bits == pred_mode_bits && get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) { result_conv = pred_proj; - } else if(conv_mode_bits <= pred_mode_bits) { + } else if (conv_mode_bits <= pred_mode_bits) { /* if 2nd conv is smaller then first conv, then we can always take the * 2nd conv */ - if(get_irn_n_edges(pred_proj) == 1) { + if (get_irn_n_edges(pred_proj) == 1) { result_conv = pred_proj; set_ia32_ls_mode(pred, conv_mode); @@ -1417,7 +1419,7 @@ static void optimize_conv_conv(ir_node *node) } } else { /* we don't want to end up with 2 loads, so we better do nothing */ - if(get_irn_mode(pred) == mode_T) { + if (get_irn_mode(pred) == mode_T) { return; } @@ -1432,11 +1434,11 @@ static void optimize_conv_conv(ir_node *node) } } else { /* if both convs have the same sign, then we can take the smaller one */ - if(get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) { + if (get_mode_sign(conv_mode) == get_mode_sign(pred_mode)) { result_conv = pred_proj; } else { /* no optimisation possible if smaller conv is sign-extend */ - if(mode_is_signed(pred_mode)) { + if (mode_is_signed(pred_mode)) { return; } /* we can take the smaller conv if it is unsigned */ @@ -1450,9 +1452,9 @@ static void optimize_conv_conv(ir_node *node) /* kill the conv */ exchange(node, result_conv); - if(get_irn_n_edges(pred_proj) == 0) { + if (get_irn_n_edges(pred_proj) == 0) { kill_node(pred_proj); - if(pred != pred_proj) + if (pred != pred_proj) kill_node(pred); } optimize_conv_conv(result_conv);