#include "ia32_optimize.h"
#include "bearch_ia32_t.h"
#include "gen_ia32_regalloc_if.h"
+#include "ia32_common_transform.h"
#include "ia32_transform.h"
#include "ia32_dbg_stat.h"
#include "ia32_util.h"
reg = arch_get_irn_register(arch_env, node);
arch_set_irn_register(arch_env, res_proj, reg);
- be_peephole_before_exchange(node, res_proj);
sched_add_before(node, new_node);
- sched_remove(node);
- exchange(node, res_proj);
- be_peephole_after_exchange(res_proj);
-
+ be_peephole_exchange(node, res_proj);
return new_node;
}
+/**
+ * Replace Cmp(x, 0) by a Test(x, x)
+ */
+static void peephole_ia32_Cmp(ir_node *const node)
+{
+ ir_node *right;
+ ia32_immediate_attr_t const *imm;
+ dbg_info *dbgi;
+ ir_graph *irg;
+ ir_node *block;
+ ir_node *noreg;
+ ir_node *nomem;
+ ir_node *op;
+ ia32_attr_t const *attr;
+ int ins_permuted;
+ int cmp_unsigned;
+ ir_node *test;
+ arch_register_t const *reg;
+ ir_edge_t const *edge;
+ ir_edge_t const *tmp;
+
+ if (get_ia32_op_type(node) != ia32_Normal)
+ return;
+
+ right = get_irn_n(node, n_ia32_Cmp_right);
+ if (!is_ia32_Immediate(right))
+ return;
+
+ imm = get_ia32_immediate_attr_const(right);
+ if (imm->symconst != NULL || imm->offset != 0)
+ return;
+
+ dbgi = get_irn_dbg_info(node);
+ irg = current_ir_graph;
+ block = get_nodes_block(node);
+ noreg = ia32_new_NoReg_gp(cg);
+ nomem = get_irg_no_mem(irg);
+ op = get_irn_n(node, n_ia32_Cmp_left);
+ attr = get_irn_generic_attr(node);
+ ins_permuted = attr->data.ins_permuted;
+ cmp_unsigned = attr->data.cmp_unsigned;
+
+ if (is_ia32_Cmp(node)) {
+ test = new_rd_ia32_Test(dbgi, irg, block, noreg, noreg, nomem,
+ op, op, ins_permuted, cmp_unsigned);
+ } else {
+ test = new_rd_ia32_Test8Bit(dbgi, irg, block, noreg, noreg, nomem,
+ op, op, ins_permuted, cmp_unsigned);
+ }
+ set_ia32_ls_mode(test, get_ia32_ls_mode(node));
+
+ reg = arch_get_irn_register(arch_env, node);
+ arch_set_irn_register(arch_env, test, reg);
+
+ foreach_out_edge_safe(node, edge, tmp) {
+ ir_node *const user = get_edge_src_irn(edge);
+
+ if (is_Proj(user))
+ exchange(user, test);
+ }
+
+ sched_add_before(node, test);
+ be_peephole_exchange(node, test);
+}
+
/**
* Peephole optimization for Test instructions.
* We can remove the Test, if a zero flags was produced which is still
assert(get_irn_mode(node) != mode_T);
- be_peephole_before_exchange(node, flags_proj);
- exchange(node, flags_proj);
- sched_remove(node);
- be_peephole_after_exchange(flags_proj);
+ be_peephole_exchange(node, flags_proj);
}
/**
block = get_nodes_block(node);
- if (get_Block_n_cfgpreds(block) == 1) {
- ir_node *pred = get_Block_cfgpred(block, 0);
-
- if (is_Jmp(pred)) {
- /* The block of the return has only one predecessor,
- which jumps directly to this block.
- This jump will be encoded as a fall through, so we
- ignore it here.
- However, the predecessor might be empty, so it must be
- ensured that empty blocks are gone away ... */
- return;
- }
- }
-
/* check if this return is the first on the block */
sched_foreach_reverse_from(node, irn) {
switch (get_irn_opcode(irn)) {
return;
}
}
- /* yep, return is the first real instruction in this block */
-#if 0
- {
- /* add an rep prefix to the return */
- ir_node *rep = new_rd_ia32_RepPrefix(get_irn_dbg_info(node), current_ir_graph, block);
- keep_alive(rep);
- sched_add_before(node, rep);
- }
-#else
+
/* ensure, that the 3 byte return is generated */
be_Return_set_emit_pop(node, 1);
-#endif
}
/* only optimize up to 48 stores behind IncSPs */
*/
static void peephole_IncSP_Store_to_push(ir_node *irn)
{
- int i, maxslot, inc_ofs;
- ir_node *node;
- ir_node *stores[MAXPUSH_OPTIMIZE];
- ir_node *block;
- ir_graph *irg;
- ir_node *curr_sp;
- ir_mode *spmode;
+ int i;
+ int maxslot;
+ int inc_ofs;
+ ir_node *node;
+ ir_node *stores[MAXPUSH_OPTIMIZE];
+ ir_node *block;
+ ir_graph *irg;
+ ir_node *curr_sp;
+ ir_mode *spmode;
+ ir_node *first_push = NULL;
+ ir_edge_t const *edge;
+ ir_edge_t const *next;
memset(stores, 0, sizeof(stores));
/* unfortunately we can't support the full AMs possible for push at the
* moment. TODO: fix this */
- if (get_ia32_am_scale(node) > 0 || !is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
+ if (!is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
break;
offset = get_ia32_am_offs_int(node);
/* we should NEVER access uninitialized stack BELOW the current SP */
assert(offset >= 0);
- offset = inc_ofs - 4 - offset;
-
/* storing at half-slots is bad */
if ((offset & 3) != 0)
break;
- if (offset < 0 || offset >= MAXPUSH_OPTIMIZE * 4)
+ if (inc_ofs - 4 < offset || offset >= MAXPUSH_OPTIMIZE * 4)
continue;
storeslot = offset >> 2;
maxslot = storeslot;
}
- curr_sp = be_get_IncSP_pred(irn);
+ curr_sp = irn;
+
+ for (i = -1; i < maxslot; ++i) {
+ if (stores[i + 1] == NULL)
+ break;
+ }
/* walk through the Stores and create Pushs for them */
block = get_nodes_block(irn);
spmode = get_irn_mode(irn);
irg = cg->irg;
- for (i = 0; i <= maxslot; ++i) {
+ for (; i >= 0; --i) {
const arch_register_t *spreg;
ir_node *push;
ir_node *val, *mem, *mem_proj;
ir_node *store = stores[i];
ir_node *noreg = ia32_new_NoReg_gp(cg);
- if (store == NULL)
- break;
-
val = get_irn_n(store, n_ia32_unary_op);
mem = get_irn_n(store, n_ia32_mem);
spreg = arch_get_irn_register(cg->arch_env, curr_sp);
push = new_rd_ia32_Push(get_irn_dbg_info(store), irg, block, noreg, noreg, mem, val, curr_sp);
- sched_add_before(irn, push);
+ if (first_push == NULL)
+ first_push = push;
+
+ sched_add_after(curr_sp, push);
/* create stackpointer Proj */
curr_sp = new_r_Proj(irg, block, push, spmode, pn_ia32_Push_stack);
mem_proj = new_r_Proj(irg, block, push, mode_M, pn_ia32_Push_M);
/* use the memproj now */
- exchange(store, mem_proj);
-
- /* we can remove the Store now */
- sched_remove(store);
+ be_peephole_exchange(store, mem_proj);
inc_ofs -= 4;
}
+ foreach_out_edge_safe(irn, edge, next) {
+ ir_node *const src = get_edge_src_irn(edge);
+ int const pos = get_edge_src_pos(edge);
+
+ if (src == first_push)
+ continue;
+
+ set_irn_n(src, pos, curr_sp);
+ }
+
be_set_IncSP_offset(irn, inc_ofs);
- be_set_IncSP_pred(irn, curr_sp);
+}
+
+/**
+ * Return true if a mode can be stored in the GP register set
+ */
+static INLINE int mode_needs_gp_reg(ir_mode *mode) {
+ if (mode == mode_fpcw)
+ return 0;
+ if (get_mode_size_bits(mode) > 32)
+ return 0;
+ return mode_is_int(mode) || mode_is_reference(mode) || mode == mode_b;
}
/**
static void peephole_Load_IncSP_to_pop(ir_node *irn)
{
const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
- int i, maxslot, inc_ofs;
+ int i, maxslot, inc_ofs, ofs;
ir_node *node, *pred_sp, *block;
ir_node *loads[MAXPUSH_OPTIMIZE];
ir_graph *irg;
unsigned regmask = 0;
+ unsigned copymask = ~0;
memset(loads, 0, sizeof(loads));
assert(be_is_IncSP(irn));
maxslot = -1;
pred_sp = be_get_IncSP_pred(irn);
for (node = sched_prev(irn); !sched_is_end(node); node = sched_prev(node)) {
- ir_node *mem;
int offset;
int loadslot;
- const arch_register_t *dreg;
+ const arch_register_t *sreg, *dreg;
/* it has to be a Load */
if (!is_ia32_Load(node)) {
if (be_is_Copy(node)) {
- if (get_irn_mode(node) != mode_Iu) {
+ if (!mode_needs_gp_reg(get_irn_mode(node))) {
/* not a GP copy, ignore */
continue;
}
dreg = arch_get_irn_register(arch_env, node);
- if (regmask & (1 << dreg->index)) {
+ sreg = arch_get_irn_register(arch_env, be_get_Copy_op(node));
+ if (regmask & copymask & (1 << sreg->index)) {
+ break;
+ }
+ if (regmask & copymask & (1 << dreg->index)) {
break;
}
- /* we CAN skip Copies if the destination is not in our regmask, ie
- none of our future Pop will overwrite it */
- regmask |= (1 << dreg->index);
+ /* we CAN skip Copies if neither the destination nor the source
+ * is not in our regmask, ie none of our future Pop will overwrite it */
+ regmask |= (1 << dreg->index) | (1 << sreg->index);
+ copymask &= ~((1 << dreg->index) | (1 << sreg->index));
continue;
}
break;
}
/* we can handle only GP loads */
- if (get_ia32_ls_mode(node) != mode_Iu)
+ if (!mode_needs_gp_reg(get_ia32_ls_mode(node)))
continue;
/* it has to use our predecessor sp value */
- if (get_irn_n(node, n_ia32_base) != pred_sp)
- continue;
- /* Load has to be attached to Spill-Mem */
- mem = skip_Proj(get_irn_n(node, n_ia32_mem));
- if (!is_Phi(mem) && !is_ia32_Store(mem) && !is_ia32_Push(mem))
- continue;
+ if (get_irn_n(node, n_ia32_base) != pred_sp) {
+ /* it would be ok if this load does not use a Pop result,
+ * but we do not check this */
+ break;
+ }
/* should have NO index */
- if (get_ia32_am_scale(node) > 0 || !is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
+ if (!is_ia32_NoReg_GP(get_irn_n(node, n_ia32_index)))
break;
offset = get_ia32_am_offs_int(node);
if (offset < 0 || offset >= MAXPUSH_OPTIMIZE * 4)
continue;
+ /* ignore those outside the possible windows */
+ if (offset > inc_ofs - 4)
+ continue;
loadslot = offset >> 2;
/* loading from the same slot twice is bad (and shouldn't happen...) */
if (maxslot < 0)
return;
- /* walk through the Loads and create Pops for them */
+ /* find the first slot */
for (i = maxslot; i >= 0; --i) {
ir_node *load = loads[i];
if (load == NULL)
break;
- inc_ofs -= 4;
}
+ ofs = inc_ofs - (maxslot + 1) * 4;
+ inc_ofs = (i+1) * 4;
+
/* create a new IncSP if needed */
block = get_nodes_block(irn);
irg = cg->irg;
- if (inc_ofs != 0) {
- assert(inc_ofs > 0);
+ if (inc_ofs > 0) {
pred_sp = be_new_IncSP(esp, irg, block, pred_sp, -inc_ofs, be_get_IncSP_align(irn));
sched_add_before(irn, pred_sp);
}
+ /* walk through the Loads and create Pops for them */
for (++i; i <= maxslot; ++i) {
ir_node *load = loads[i];
ir_node *mem, *pop;
set_Proj_pred(proj, pop);
}
-
/* we can remove the Load now */
sched_remove(load);
kill_node(load);
-
}
- be_set_IncSP_offset(irn, 0);
- be_set_IncSP_pred(irn, pred_sp);
+ be_set_IncSP_offset(irn, -ofs);
+ be_set_IncSP_pred(irn, pred_sp);
}
* @return the new stack value
*/
static ir_node *create_push(dbg_info *dbgi, ir_graph *irg, ir_node *block,
- ir_node *stack, ir_node *schedpoint,
- const arch_register_t *reg)
+ ir_node *stack, ir_node *schedpoint)
{
const arch_register_t *esp = &ia32_gp_regs[REG_ESP];
- ir_node *noreg, *nomem, *push, *val;
- val = new_rd_ia32_ProduceVal(NULL, irg, block);
- arch_set_irn_register(arch_env, val, reg);
- sched_add_before(schedpoint, val);
-
- noreg = ia32_new_NoReg_gp(cg);
- nomem = get_irg_no_mem(irg);
- push = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, nomem, val, stack);
+ ir_node *val = ia32_new_Unknown_gp(cg);
+ ir_node *noreg = ia32_new_NoReg_gp(cg);
+ ir_node *nomem = get_irg_no_mem(irg);
+ ir_node *push = new_rd_ia32_Push(dbgi, irg, block, noreg, noreg, nomem, val, stack);
sched_add_before(schedpoint, push);
stack = new_r_Proj(irg, block, push, mode_Iu, pn_ia32_Push_stack);
dbgi = get_irn_dbg_info(node);
block = get_nodes_block(node);
stack = be_get_IncSP_pred(node);
- reg = &ia32_gp_regs[REG_EAX];
-
- stack = create_push(dbgi, irg, block, stack, node, reg);
+ stack = create_push(dbgi, irg, block, stack, node);
if (offset == +8) {
- stack = create_push(dbgi, irg, block, stack, node, reg);
+ stack = create_push(dbgi, irg, block, stack, node);
}
}
- be_peephole_before_exchange(node, stack);
- sched_remove(node);
- exchange(node, stack);
- be_peephole_after_exchange(stack);
+ be_peephole_exchange(node, stack);
}
/**
sched_add_before(node, produceval);
sched_add_before(node, xor);
- be_peephole_before_exchange(node, xor);
- exchange(node, xor);
- sched_remove(node);
- be_peephole_after_exchange(xor);
+ be_peephole_exchange(node, xor);
}
static INLINE int is_noreg(ia32_code_gen_t *cg, const ir_node *node)
DBG_OPT_LEA2ADD(node, res);
/* exchange the Add and the LEA */
- be_peephole_before_exchange(node, res);
sched_add_before(node, res);
- sched_remove(node);
- exchange(node, res);
- be_peephole_after_exchange(res);
+ be_peephole_exchange(node, res);
}
/**
res = new_rd_Proj(dbgi, irg, block, load, mode_Iu, pn_ia32_Load_res);
arch_set_irn_register(arch_env, res, reg);
- be_peephole_after_exchange(res);
+ be_peephole_new_node(res);
set_irn_n(imul, n_ia32_IMul_mem, mem);
noreg = get_irn_n(imul, n_ia32_IMul_left);
/* register peephole optimisations */
clear_irp_opcodes_generic_func();
- register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
- register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
- register_peephole_optimisation(op_ia32_Lea, peephole_ia32_Lea);
- register_peephole_optimisation(op_ia32_Test, peephole_ia32_Test);
+ register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
+ register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
+ register_peephole_optimisation(op_ia32_Lea, peephole_ia32_Lea);
+ register_peephole_optimisation(op_ia32_Cmp, peephole_ia32_Cmp);
+ register_peephole_optimisation(op_ia32_Cmp8Bit, peephole_ia32_Cmp);
+ register_peephole_optimisation(op_ia32_Test, peephole_ia32_Test);
register_peephole_optimisation(op_ia32_Test8Bit, peephole_ia32_Test);
- register_peephole_optimisation(op_be_Return, peephole_ia32_Return);
+ register_peephole_optimisation(op_be_Return, peephole_ia32_Return);
if (! ia32_cg_config.use_imul_mem_imm32)
register_peephole_optimisation(op_ia32_IMul, peephole_ia32_Imul_split);
if (ia32_cg_config.use_pxor)
sched_remove(node);
}
- be_kill_node(node);
+ kill_node(node);
}
static void optimize_conv_store(ir_node *node)
set_irn_n(node, n_ia32_Store_val, get_irn_n(pred, n_ia32_Conv_I2I_val));
if(get_irn_n_edges(pred_proj) == 0) {
- be_kill_node(pred_proj);
+ kill_node(pred_proj);
if(pred != pred_proj)
- be_kill_node(pred);
+ kill_node(pred);
}
}
exchange(node, result_conv);
if(get_irn_n_edges(pred_proj) == 0) {
- be_kill_node(pred_proj);
+ kill_node(pred_proj);
if(pred != pred_proj)
- be_kill_node(pred);
+ kill_node(pred);
}
optimize_conv_conv(result_conv);
}