produces_flag_zero : produces_no_flag;
}
-/**
- * If the given node has not mode_T, creates a mode_T version (with a result Proj).
- *
- * @param node the node to change
- *
- * @return the new mode_T node (if the mode was changed) or node itself
- */
-static ir_node *turn_into_mode_t(ir_node *node)
-{
- ir_node *block;
- ir_node *res_proj;
- ir_node *new_node;
- const arch_register_t *reg;
-
- if(get_irn_mode(node) == mode_T)
- return node;
-
- assert(get_irn_mode(node) == mode_Iu);
-
- new_node = exact_copy(node);
- set_irn_mode(new_node, mode_T);
-
- block = get_nodes_block(new_node);
- res_proj = new_r_Proj(current_ir_graph, block, new_node, mode_Iu,
- pn_ia32_res);
-
- reg = arch_get_irn_register(node);
- arch_set_irn_register(res_proj, reg);
-
- sched_add_before(node, new_node);
- be_peephole_exchange(node, res_proj);
- return new_node;
-}
-
/**
* Replace Cmp(x, 0) by a Test(x, x)
*/
return;
}
- left = turn_into_mode_t(left);
+ if (get_irn_mode(left) != mode_T) {
+ set_irn_mode(left, mode_T);
+
+ /* If there are other users, reroute them to result proj */
+ if (get_irn_n_edges(left) != 2) {
+ ir_node *res = new_r_Proj(current_ir_graph, block, left,
+ mode_Iu, pn_ia32_res);
+
+ edges_reroute(left, res, current_ir_graph);
+ /* Reattach the result proj to left */
+ set_Proj_pred(res, left);
+ }
+ }
flags_mode = ia32_reg_classes[CLASS_ia32_flags].mode;
flags_proj = new_r_Proj(current_ir_graph, block, left, flags_mode,
if ((offset & 0xFFFFFF00) == 0) {
/* attr->am_offs += 0; */
} else if ((offset & 0xFFFF00FF) == 0) {
- ir_node *imm = create_Immediate(NULL, 0, offset >> 8);
+ ir_node *imm = ia32_create_Immediate(NULL, 0, offset >> 8);
set_irn_n(node, n_ia32_Test_right, imm);
attr->am_offs += 1;
} else if ((offset & 0xFF00FFFF) == 0) {
- ir_node *imm = create_Immediate(NULL, 0, offset >> 16);
+ ir_node *imm = ia32_create_Immediate(NULL, 0, offset >> 16);
set_irn_n(node, n_ia32_Test_right, imm);
attr->am_offs += 2;
} else if ((offset & 0x00FFFFFF) == 0) {
- ir_node *imm = create_Immediate(NULL, 0, offset >> 24);
+ ir_node *imm = ia32_create_Immediate(NULL, 0, offset >> 24);
set_irn_n(node, n_ia32_Test_right, imm);
attr->am_offs += 3;
} else {
const arch_register_t *reg;
ir_node *block;
dbg_info *dbgi;
- ir_node *produceval;
ir_node *xor;
- ir_node *noreg;
/* try to transform a mov 0, reg to xor reg reg */
if (attr->offset != 0 || attr->symconst != NULL)
assert(be_peephole_get_reg_value(reg) == NULL);
/* create xor(produceval, produceval) */
- block = get_nodes_block(node);
- dbgi = get_irn_dbg_info(node);
- produceval = new_bd_ia32_ProduceVal(dbgi, block);
- arch_set_irn_register(produceval, reg);
-
- noreg = ia32_new_NoReg_gp(cg);
- xor = new_bd_ia32_Xor(dbgi, block, noreg, noreg, new_NoMem(), produceval,
- produceval);
+ block = get_nodes_block(node);
+ dbgi = get_irn_dbg_info(node);
+ xor = new_bd_ia32_Xor0(dbgi, block);
arch_set_irn_register(xor, reg);
- sched_add_before(node, produceval);
sched_add_before(node, xor);
copy_mark(node, xor);
{
ir_graph *irg = current_ir_graph;
ir_node *start_block = get_irg_start_block(irg);
- ir_node *immediate = new_bd_ia32_Immediate(NULL, start_block, NULL, 0,
- val);
+ ir_node *immediate
+ = new_bd_ia32_Immediate(NULL, start_block, NULL, 0, 0, val);
arch_set_irn_register(immediate, &ia32_gp_regs[REG_GP_NOREG]);
return immediate;
ir_node *block = get_nodes_block(node);
int offset = get_ia32_am_offs_int(node);
int sc_sign = is_ia32_am_sc_sign(node);
+ const ia32_attr_t *attr = get_ia32_attr_const(node);
+ int sc_no_pic_adjust = attr->data.am_sc_no_pic_adjust;
ir_entity *entity = get_ia32_am_sc(node);
ir_node *res;
- res = new_bd_ia32_Immediate(NULL, block, entity, sc_sign, offset);
+ res = new_bd_ia32_Immediate(NULL, block, entity, sc_sign, sc_no_pic_adjust,
+ offset);
arch_set_irn_register(res, &ia32_gp_regs[REG_GP_NOREG]);
return res;
}
assert(is_ia32_Lea(node));
- /* we can only do this if are allowed to globber the flags */
+ /* we can only do this if it is allowed to clobber the flags */
if(be_peephole_get_value(CLASS_ia32_flags, REG_EFLAGS) != NULL)
return;
/**
* Replace xorps r,r and xorpd r,r by pxor r,r
*/
-static void peephole_ia32_xZero(ir_node *xor) {
+static void peephole_ia32_xZero(ir_node *xor)
+{
set_irn_op(xor, op_ia32_xPzero);
}
+/**
+ * Replace 16bit sign extension from ax to eax by shorter cwtl
+ */
+static void peephole_ia32_Conv_I2I(ir_node *node)
+{
+ const arch_register_t *eax = &ia32_gp_regs[REG_EAX];
+ ir_mode *smaller_mode = get_ia32_ls_mode(node);
+ ir_node *val = get_irn_n(node, n_ia32_Conv_I2I_val);
+ dbg_info *dbgi;
+ ir_node *block;
+ ir_node *cwtl;
+
+ if (get_mode_size_bits(smaller_mode) != 16 ||
+ !mode_is_signed(smaller_mode) ||
+ eax != arch_get_irn_register(val) ||
+ eax != arch_irn_get_register(node, pn_ia32_Conv_I2I_res))
+ return;
+
+ dbgi = get_irn_dbg_info(node);
+ block = get_nodes_block(node);
+ cwtl = new_bd_ia32_Cwtl(dbgi, block, val);
+ arch_set_irn_register(cwtl, eax);
+ sched_add_before(node, cwtl);
+ be_peephole_exchange(node, cwtl);
+}
+
/**
* Register a peephole optimisation function.
*/
-static void register_peephole_optimisation(ir_op *op, peephole_opt_func func) {
+static void register_peephole_optimisation(ir_op *op, peephole_opt_func func)
+{
assert(op->ops.generic == NULL);
op->ops.generic = (op_func)func;
}
register_peephole_optimisation(op_ia32_IMul, peephole_ia32_Imul_split);
if (ia32_cg_config.use_pxor)
register_peephole_optimisation(op_ia32_xZero, peephole_ia32_xZero);
+ if (ia32_cg_config.use_short_sex_eax)
+ register_peephole_optimisation(op_ia32_Conv_I2I, peephole_ia32_Conv_I2I);
be_peephole_opt(cg->birg);
}