/*
- * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
+ * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
*
* This file is part of libFirm.
*
#include "height.h"
#include "irbitset.h"
#include "irprintf.h"
+#include "error.h"
#include "../be_t.h"
#include "../beabi.h"
#include "ia32_transform.h"
#include "ia32_dbg_stat.h"
#include "ia32_util.h"
+#include "ia32_architecture.h"
DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
static const arch_env_t *arch_env;
static ia32_code_gen_t *cg;
-typedef int is_op_func_t(const ir_node *n);
-typedef ir_node *load_func_t(dbg_info *db, ir_graph *irg, ir_node *block, ir_node *base, ir_node *index, ir_node *mem);
+static void peephole_IncSP_IncSP(ir_node *node);
-/**
- * checks if a node represents the NOREG value
- */
-static INLINE int be_is_NoReg(ia32_code_gen_t *cg, const ir_node *irn) {
- return irn == cg->noreg_gp || irn == cg->noreg_xmm || irn == cg->noreg_vfp;
+#if 0
+static void peephole_ia32_Store_IncSP_to_push(ir_node *node)
+{
+ ir_node *base = get_irn_n(node, n_ia32_Store_base);
+ ir_node *index = get_irn_n(node, n_ia32_Store_index);
+ ir_node *mem = get_irn_n(node, n_ia32_Store_mem);
+ ir_node *incsp = base;
+ ir_node *val;
+ ir_node *noreg;
+ ir_graph *irg;
+ ir_node *block;
+ dbg_info *dbgi;
+ ir_mode *mode;
+ ir_node *push;
+ ir_node *proj;
+ int offset;
+ int node_offset;
+
+ /* nomem inidicates the store doesn't alias with anything else */
+ if(!is_NoMem(mem))
+ return;
+
+ /* find an IncSP in front of us, we might have to skip barriers for this */
+ while(is_Proj(incsp)) {
+ ir_node *proj_pred = get_Proj_pred(incsp);
+ if(!be_is_Barrier(proj_pred))
+ return;
+ incsp = get_irn_n(proj_pred, get_Proj_proj(incsp));
+ }
+ if(!be_is_IncSP(incsp))
+ return;
+
+ peephole_IncSP_IncSP(incsp);
+
+ /* must be in the same block */
+ if(get_nodes_block(incsp) != get_nodes_block(node))
+ return;
+
+ if(!is_ia32_NoReg_GP(index) || get_ia32_am_sc(node) != NULL) {
+ panic("Invalid storeAM found (%+F)", node);
+ }
+
+ /* we should be the store to the end of the stackspace */
+ offset = be_get_IncSP_offset(incsp);
+ mode = get_ia32_ls_mode(node);
+ node_offset = get_ia32_am_offs_int(node);
+ if(node_offset != offset - get_mode_size_bytes(mode))
+ return;
+
+ /* we can use a push instead of the store */
+ irg = current_ir_graph;
+ block = get_nodes_block(node);
+ dbgi = get_irn_dbg_info(node);
+ noreg = ia32_new_NoReg_gp(cg);
+ base = be_get_IncSP_pred(incsp);
+ val = get_irn_n(node, n_ia32_Store_val);
+ push = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, mem, val, base);
+
+ proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
+
+ be_set_IncSP_offset(incsp, offset - get_mode_size_bytes(mode));
+
+ sched_add_before(node, push);
+ sched_remove(node);
+
+ be_peephole_before_exchange(node, proj);
+ exchange(node, proj);
+ be_peephole_after_exchange(proj);
+}
+
+static void peephole_ia32_Store(ir_node *node)
+{
+ peephole_ia32_Store_IncSP_to_push(node);
}
+#endif
+
+static int produces_zero_flag(ir_node *node, int pn)
+{
+ ir_node *count;
+ const ia32_immediate_attr_t *imm_attr;
+
+ if(!is_ia32_irn(node))
+ return 0;
+
+ if(pn >= 0) {
+ if(pn != pn_ia32_res)
+ return 0;
+ }
+
+ switch(get_ia32_irn_opcode(node)) {
+ case iro_ia32_Add:
+ case iro_ia32_Adc:
+ case iro_ia32_And:
+ case iro_ia32_Or:
+ case iro_ia32_Xor:
+ case iro_ia32_Sub:
+ case iro_ia32_Sbb:
+ case iro_ia32_Neg:
+ case iro_ia32_Inc:
+ case iro_ia32_Dec:
+ return 1;
+
+ case iro_ia32_ShlD:
+ case iro_ia32_ShrD:
+ case iro_ia32_Shl:
+ case iro_ia32_Shr:
+ case iro_ia32_Sar:
+ assert(n_ia32_ShlD_count == n_ia32_ShrD_count);
+ assert(n_ia32_Shl_count == n_ia32_Shr_count
+ && n_ia32_Shl_count == n_ia32_Sar_count);
+ if(is_ia32_ShlD(node) || is_ia32_ShrD(node)) {
+ count = get_irn_n(node, n_ia32_ShlD_count);
+ } else {
+ count = get_irn_n(node, n_ia32_Shl_count);
+ }
+ /* when shift count is zero the flags are not affected, so we can only
+ * do this for constants != 0 */
+ if(!is_ia32_Immediate(count))
+ return 0;
+
+ imm_attr = get_ia32_immediate_attr_const(count);
+ if(imm_attr->symconst != NULL)
+ return 0;
+ if((imm_attr->offset & 0x1f) == 0)
+ return 0;
+ return 1;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static ir_node *turn_into_mode_t(ir_node *node)
+{
+ ir_node *block;
+ ir_node *res_proj;
+ ir_node *new_node;
+ const arch_register_t *reg;
-/********************************************************************************************************
- * _____ _ _ ____ _ _ _ _ _
- * | __ \ | | | | / __ \ | | (_) (_) | | (_)
- * | |__) |__ ___ _ __ | |__ ___ | | ___ | | | |_ __ | |_ _ _ __ ___ _ ______ _| |_ _ ___ _ __
- * | ___/ _ \/ _ \ '_ \| '_ \ / _ \| |/ _ \ | | | | '_ \| __| | '_ ` _ \| |_ / _` | __| |/ _ \| '_ \
- * | | | __/ __/ |_) | | | | (_) | | __/ | |__| | |_) | |_| | | | | | | |/ / (_| | |_| | (_) | | | |
- * |_| \___|\___| .__/|_| |_|\___/|_|\___| \____/| .__/ \__|_|_| |_| |_|_/___\__,_|\__|_|\___/|_| |_|
- * | | | |
- * |_| |_|
- ********************************************************************************************************/
+ if(get_irn_mode(node) == mode_T)
+ return node;
+
+ assert(get_irn_mode(node) == mode_Iu);
+
+ new_node = exact_copy(node);
+ set_irn_mode(new_node, mode_T);
+
+ block = get_nodes_block(new_node);
+ res_proj = new_r_Proj(current_ir_graph, block, new_node, mode_Iu,
+ pn_ia32_res);
+
+ reg = arch_get_irn_register(arch_env, node);
+ arch_set_irn_register(arch_env, res_proj, reg);
+
+ be_peephole_before_exchange(node, res_proj);
+ sched_add_before(node, new_node);
+ sched_remove(node);
+ exchange(node, res_proj);
+ be_peephole_after_exchange(res_proj);
+
+ return new_node;
+}
+
+static void peephole_ia32_Test(ir_node *node)
+{
+ ir_node *left = get_irn_n(node, n_ia32_Test_left);
+ ir_node *right = get_irn_n(node, n_ia32_Test_right);
+ ir_node *flags_proj;
+ ir_node *block;
+ ir_mode *flags_mode;
+ int pn = -1;
+ ir_node *schedpoint;
+ const ir_edge_t *edge;
+
+ assert(n_ia32_Test_left == n_ia32_Test8Bit_left
+ && n_ia32_Test_right == n_ia32_Test8Bit_right);
+
+ /* we need a test for 0 */
+ if(left != right)
+ return;
+
+ block = get_nodes_block(node);
+ if(get_nodes_block(left) != block)
+ return;
+
+ if(is_Proj(left)) {
+ pn = get_Proj_proj(left);
+ left = get_Proj_pred(left);
+ }
+
+ /* happens rarely, but if it does code will panic' */
+ if (is_ia32_Unknown_GP(left))
+ return;
+
+ /* walk schedule up and abort when we find left or some other node destroys
+ the flags */
+ schedpoint = sched_prev(node);
+ while(schedpoint != left) {
+ schedpoint = sched_prev(schedpoint);
+ if(arch_irn_is(arch_env, schedpoint, modify_flags))
+ return;
+ if(schedpoint == block)
+ panic("couldn't find left");
+ }
+
+ /* make sure only Lg/Eq tests are used */
+ foreach_out_edge(node, edge) {
+ ir_node *user = get_edge_src_irn(edge);
+ int pnc = get_ia32_condcode(user);
+
+ if(pnc != pn_Cmp_Eq && pnc != pn_Cmp_Lg) {
+ return;
+ }
+ }
+
+ if(!produces_zero_flag(left, pn))
+ return;
+
+ left = turn_into_mode_t(left);
+
+ flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode;
+ flags_proj = new_r_Proj(current_ir_graph, block, left, flags_mode,
+ pn_ia32_flags);
+ arch_set_irn_register(arch_env, flags_proj, &ia32_flags_regs[REG_EFLAGS]);
+
+ assert(get_irn_mode(node) != mode_T);
+
+ be_peephole_before_exchange(node, flags_proj);
+ exchange(node, flags_proj);
+ sched_remove(node);
+ be_peephole_after_exchange(flags_proj);
+}
/**
- * NOTE: THESE PEEPHOLE OPTIMIZATIONS MUST BE CALLED AFTER SCHEDULING AND REGISTER ALLOCATION.
+ * AMD Athlon works faster when RET is not destination of
+ * conditional jump or directly preceded by other jump instruction.
+ * Can be avoided by placing a Rep prefix before the return.
*/
+static void peephole_ia32_Return(ir_node *node) {
+ ir_node *block, *irn;
+
+ if (!ia32_cg_config.use_pad_return)
+ return;
-// only optimize up to 48 stores behind IncSPs
+ block = get_nodes_block(node);
+
+ if (get_Block_n_cfgpreds(block) == 1) {
+ ir_node *pred = get_Block_cfgpred(block, 0);
+
+ if (is_Jmp(pred)) {
+ /* The block of the return has only one predecessor,
+ which jumps directly to this block.
+ This jump will be encoded as a fall through, so we
+ ignore it here.
+ However, the predecessor might be empty, so it must be
+ ensured that empty blocks are gone away ... */
+ return;
+ }
+ }
+
+ /* check if this return is the first on the block */
+ sched_foreach_reverse_from(node, irn) {
+ switch (get_irn_opcode(irn)) {
+ case beo_Return:
+ /* the return node itself, ignore */
+ continue;
+ case beo_Barrier:
+ /* ignore the barrier, no code generated */
+ continue;
+ case beo_IncSP:
+ /* arg, IncSP 0 nodes might occur, ignore these */
+ if (be_get_IncSP_offset(irn) == 0)
+ continue;
+ return;
+ case iro_Phi:
+ continue;
+ default:
+ return;
+ }
+ }
+ /* yep, return is the first real instruction in this block */
+#if 0
+ {
+ /* add an rep prefix to the return */
+ ir_node *rep = new_rd_ia32_RepPrefix(get_irn_dbg_info(node), current_ir_graph, block);
+ keep_alive(rep);
+ sched_add_before(node, rep);
+ }
+#else
+ /* ensure, that the 3 byte return is generated */
+ be_Return_set_emit_pop(node, 1);
+#endif
+}
+
+/* only optimize up to 48 stores behind IncSPs */
#define MAXPUSH_OPTIMIZE 48
/**
- * Tries to create pushs from IncSP,Store combinations
+ * Tries to create pushs from IncSP,Store combinations.
+ * The Stores are replaced by Push's, the IncSP is modified
+ * (possibly into IncSP 0, but not removed).
*/
static void peephole_IncSP_Store_to_push(ir_node *irn)
{
assert(be_is_IncSP(irn));
offset = be_get_IncSP_offset(irn);
- if(offset < 4)
+ if (offset < 4)
return;
/*
mem = get_irn_n(store, n_ia32_mem);
spreg = arch_get_irn_register(cg->arch_env, curr_sp);
- push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, mem, curr_sp, val);
-
- set_ia32_am_support(push, ia32_am_Source, ia32_am_unary);
+ push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, mem, val, curr_sp);
sched_add_before(irn, push);
be_set_IncSP_offset(irn, offset);
be_set_IncSP_pred(irn, curr_sp);
- be_peephole_node_replaced(irn, irn);
}
/**
be_set_IncSP_offset(node, offs);
predpred = be_get_IncSP_pred(pred);
- be_peephole_node_replaced(pred, predpred);
+ be_peephole_before_exchange(pred, predpred);
/* rewire dependency edges */
edges_reroute_kind(pred, predpred, EDGE_KIND_DEP, current_ir_graph);
be_set_IncSP_pred(node, predpred);
sched_remove(pred);
-
be_kill_node(pred);
+
+ be_peephole_after_exchange(predpred);
}
+/**
+ * Find a free GP register if possible, else return NULL.
+ */
static const arch_register_t *get_free_gp_reg(void)
{
int i;
return NULL;
}
+static ir_node *create_pop(dbg_info *dbgi, ir_graph *irg, ir_node *block,
+ ir_node *stack, ir_node *schedpoint,
+ const arch_register_t *free_reg)
+{
+ const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
+ ir_node *pop;
+ ir_node *keep;
+ ir_node *val;
+ ir_node *in[1];
+
+ pop = new_rd_ia32_Pop(dbgi, irg, block, new_NoMem(), stack);
+
+ stack = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack);
+ arch_set_irn_register(arch_env, stack, esp);
+ val = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_res);
+ arch_set_irn_register(arch_env, val, free_reg);
+
+ sched_add_before(schedpoint, pop);
+
+ in[0] = val;
+ keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
+ sched_add_before(schedpoint, keep);
+
+ return stack;
+}
+
static void peephole_be_IncSP(ir_node *node)
{
const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
const arch_register_t *reg;
- ir_graph *irg;
+ ir_graph *irg = current_ir_graph;
dbg_info *dbgi;
ir_node *block;
- ir_node *keep;
- ir_node *val;
- ir_node *pop;
- ir_node *noreg;
ir_node *stack;
int offset;
/* transform IncSP->Store combinations to Push where possible */
peephole_IncSP_Store_to_push(node);
- /* replace IncSP -4 by Pop freereg when possible */
- offset = be_get_IncSP_offset(node);
- if(offset != -4)
+ if (arch_get_irn_register(arch_env, node) != esp)
return;
- if(arch_get_irn_register(arch_env, node) != esp)
- return;
-
- reg = get_free_gp_reg();
- if(reg == NULL)
+ /* replace IncSP -4 by Pop freereg when possible */
+ offset = be_get_IncSP_offset(node);
+ if ((offset != -8 || ia32_cg_config.use_add_esp_8) &&
+ (offset != -4 || ia32_cg_config.use_add_esp_4) &&
+ (offset != +4 || ia32_cg_config.use_sub_esp_4) &&
+ (offset != +8 || ia32_cg_config.use_sub_esp_8))
return;
- irg = current_ir_graph;
- dbgi = get_irn_dbg_info(node);
- block = get_nodes_block(node);
- noreg = ia32_new_NoReg_gp(cg);
- stack = be_get_IncSP_pred(node);
- pop = new_rd_ia32_Pop(dbgi, irg, block, noreg, noreg, new_NoMem(), stack);
+ if (offset < 0) {
+ /* we need a free register for pop */
+ reg = get_free_gp_reg();
+ if(reg == NULL)
+ return;
- stack = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_stack);
- arch_set_irn_register(arch_env, stack, esp);
- val = new_r_Proj(irg, block, pop, mode_Iu, pn_ia32_Pop_res);
- arch_set_irn_register(arch_env, val, reg);
+ dbgi = get_irn_dbg_info(node);
+ block = get_nodes_block(node);
+ stack = be_get_IncSP_pred(node);
- sched_add_before(node, pop);
+ stack = create_pop(dbgi, irg, block, stack, node, reg);
- keep = sched_next(node);
- if(!be_is_Keep(keep)) {
- ir_node *in[1];
- in[0] = val;
- keep = be_new_Keep(&ia32_reg_classes[CLASS_ia32_gp], irg, block, 1, in);
- sched_add_before(node, keep);
+ if (offset == -8) {
+ stack = create_pop(dbgi, irg, block, stack, node, reg);
+ }
} else {
- be_Keep_add_node(keep, &ia32_reg_classes[CLASS_ia32_gp], val);
+ /* NIY: create pushs */
+ return;
}
- be_peephole_node_replaced(node, stack);
-
- exchange(node, stack);
+ be_peephole_before_exchange(node, stack);
sched_remove(node);
+ exchange(node, stack);
+ be_peephole_after_exchange(stack);
}
/**
ir_node *noreg;
/* try to transform a mov 0, reg to xor reg reg */
- if(attr->offset != 0 || attr->symconst != NULL)
+ if (attr->offset != 0 || attr->symconst != NULL)
+ return;
+ if (ia32_cg_config.use_mov_0)
return;
/* xor destroys the flags, so no-one must be using them */
- if(be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
+ if (be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
return;
reg = arch_get_irn_register(arch_env, node);
sched_add_before(node, produceval);
sched_add_before(node, xor);
- be_peephole_node_replaced(node, xor);
+ be_peephole_before_exchange(node, xor);
exchange(node, xor);
sched_remove(node);
+ be_peephole_after_exchange(xor);
}
static INLINE int is_noreg(ia32_code_gen_t *cg, const ir_node *node)
}
make_add_immediate:
- if(cg->isa->opt & IA32_OPT_INCDEC) {
+ if(ia32_cg_config.use_incdec) {
if(is_am_one(node)) {
dbgi = get_irn_dbg_info(node);
block = get_nodes_block(node);
SET_IA32_ORIG_NODE(res, ia32_get_old_node_name(cg, node));
/* add new ADD/SHL to schedule */
- sched_add_before(node, res);
-
DBG_OPT_LEA2ADD(node, res);
- /* remove the old LEA */
- sched_remove(node);
-
/* exchange the Add and the LEA */
- be_peephole_node_replaced(node, res);
+ be_peephole_before_exchange(node, res);
+ sched_add_before(node, res);
+ sched_remove(node);
exchange(node, res);
+ be_peephole_after_exchange(res);
+}
+
+/**
+ * Split a Imul mem, imm into a Load mem and Imul reg, imm if possible.
+ */
+static void peephole_ia32_Imul_split(ir_node *imul) {
+ const ir_node *right = get_irn_n(imul, n_ia32_IMul_right);
+ const arch_register_t *reg;
+ ir_node *load, *block, *base, *index, *mem, *res, *noreg;
+ dbg_info *dbgi;
+ ir_graph *irg;
+
+ if (! is_ia32_Immediate(right) || get_ia32_op_type(imul) != ia32_AddrModeS) {
+ /* no memory, imm form ignore */
+ return;
+ }
+ /* we need a free register */
+ reg = get_free_gp_reg();
+ if (reg == NULL)
+ return;
+
+ /* fine, we can rebuild it */
+ dbgi = get_irn_dbg_info(imul);
+ block = get_nodes_block(imul);
+ irg = current_ir_graph;
+ base = get_irn_n(imul, n_ia32_IMul_base);
+ index = get_irn_n(imul, n_ia32_IMul_index);
+ mem = get_irn_n(imul, n_ia32_IMul_mem);
+ load = new_rd_ia32_Load(dbgi, irg, block, base, index, mem);
+
+ /* copy all attributes */
+ set_irn_pinned(load, get_irn_pinned(imul));
+ set_ia32_op_type(load, ia32_AddrModeS);
+ set_ia32_ls_mode(load, get_ia32_ls_mode(imul));
+
+ set_ia32_am_scale(load, get_ia32_am_scale(imul));
+ set_ia32_am_sc(load, get_ia32_am_sc(imul));
+ set_ia32_am_offs_int(load, get_ia32_am_offs_int(imul));
+ if (is_ia32_am_sc_sign(imul))
+ set_ia32_am_sc_sign(load);
+ if (is_ia32_use_frame(imul))
+ set_ia32_use_frame(load);
+ set_ia32_frame_ent(load, get_ia32_frame_ent(imul));
+
+ sched_add_before(imul, load);
+
+ mem = new_rd_Proj(dbgi, irg, block, load, mode_M, pn_ia32_Load_M);
+ res = new_rd_Proj(dbgi, irg, block, load, mode_Iu, pn_ia32_Load_res);
+
+ arch_set_irn_register(arch_env, res, reg);
+ be_peephole_after_exchange(res);
+
+ set_irn_n(imul, n_ia32_IMul_mem, mem);
+ noreg = get_irn_n(imul, n_ia32_IMul_left);
+ set_irn_n(imul, n_ia32_IMul_left, res);
+ set_ia32_op_type(imul, ia32_Normal);
+}
+
+/**
+ * Replace xorps r,r and xorpd r,r by pxor r,r
+ */
+static void peephole_ia32_xZero(ir_node *xor) {
+ set_irn_op(xor, op_ia32_xPzero);
}
/**
*/
static void register_peephole_optimisation(ir_op *op, peephole_opt_func func) {
assert(op->ops.generic == NULL);
- op->ops.generic = (void*) func;
+ op->ops.generic = (op_func)func;
}
/* Perform peephole-optimizations. */
/* register peephole optimisations */
clear_irp_opcodes_generic_func();
register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
+ //register_peephole_optimisation(op_ia32_Store, peephole_ia32_Store);
register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
register_peephole_optimisation(op_ia32_Lea, peephole_ia32_Lea);
+ register_peephole_optimisation(op_ia32_Test, peephole_ia32_Test);
+ register_peephole_optimisation(op_ia32_Test8Bit, peephole_ia32_Test);
+ register_peephole_optimisation(op_be_Return, peephole_ia32_Return);
+ if (! ia32_cg_config.use_imul_mem_imm32)
+ register_peephole_optimisation(op_ia32_IMul, peephole_ia32_Imul_split);
+ if (ia32_cg_config.use_pxor)
+ register_peephole_optimisation(op_ia32_xZero, peephole_ia32_xZero);
be_peephole_opt(cg->birg);
}
static void optimize_conv_store(ir_node *node)
{
ir_node *pred;
+ ir_node *pred_proj;
ir_mode *conv_mode;
ir_mode *store_mode;
if(!is_ia32_Store(node) && !is_ia32_Store8Bit(node))
return;
- pred = get_irn_n(node, 2);
+ assert(n_ia32_Store_val == n_ia32_Store8Bit_val);
+ pred_proj = get_irn_n(node, n_ia32_Store_val);
+ if(is_Proj(pred_proj)) {
+ pred = get_Proj_pred(pred_proj);
+ } else {
+ pred = pred_proj;
+ }
if(!is_ia32_Conv_I2I(pred) && !is_ia32_Conv_I2I8Bit(pred))
return;
+ if(get_ia32_op_type(pred) != ia32_Normal)
+ return;
/* the store only stores the lower bits, so we only need the conv
* it it shrinks the mode */
if(get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode))
return;
- set_irn_n(node, 2, get_irn_n(pred, 2));
- if(get_irn_n_edges(pred) == 0) {
- be_kill_node(pred);
+ set_irn_n(node, n_ia32_Store_val, get_irn_n(pred, n_ia32_Conv_I2I_val));
+ if(get_irn_n_edges(pred_proj) == 0) {
+ be_kill_node(pred_proj);
+ if(pred != pred_proj)
+ be_kill_node(pred);
}
}
if (!is_ia32_Conv_I2I(node) && !is_ia32_Conv_I2I8Bit(node))
return;
- pred = get_irn_n(node, 2);
+ assert(n_ia32_Conv_I2I_val == n_ia32_Conv_I2I8Bit_val);
+ pred = get_irn_n(node, n_ia32_Conv_I2I_val);
if(!is_Proj(pred))
return;
/* kill the conv */
exchange(node, result_conv);
- if(get_irn_n_edges(pred) == 0) {
- be_kill_node(pred);
+ if(get_irn_n_edges(pred_proj) == 0) {
+ be_kill_node(pred_proj);
+ if(pred != pred_proj)
+ be_kill_node(pred);
}
optimize_conv_conv(result_conv);
}