int ins_permuted;
ir_node *test;
arch_register_t const *reg;
- ir_edge_t const *edge;
- ir_edge_t const *tmp;
if (get_ia32_op_type(node) != ia32_Normal)
return;
reg = arch_get_irn_register_out(node, pn_ia32_Cmp_eflags);
arch_set_irn_register_out(test, pn_ia32_Test_eflags, reg);
- foreach_out_edge_safe(node, edge, tmp) {
+ foreach_out_edge_safe(node, edge) {
ir_node *const user = get_edge_src_irn(edge);
if (is_Proj(user))
ir_mode *flags_mode;
ir_mode *op_mode;
ir_node *schedpoint;
- const ir_edge_t *edge;
produces_flag_t produced;
if (get_nodes_block(left) != block)
/* If there are other users, reroute them to result proj */
if (get_irn_n_edges(op) != 2) {
ir_node *res = new_r_Proj(op, mode_Iu, pn_ia32_res);
-
- edges_reroute(op, res);
- /* Reattach the result proj to left */
- set_Proj_pred(res, op);
+ edges_reroute_except(op, res, res);
}
} else {
if (get_irn_n_edges(left) == 2)
*/
static void peephole_ia32_Return(ir_node *node)
{
- ir_node *irn;
-
if (!ia32_cg_config.use_pad_return)
return;
*/
static void peephole_IncSP_Store_to_push(ir_node *irn)
{
- int i;
- int maxslot;
- int inc_ofs;
- ir_node *node;
- ir_node *stores[MAXPUSH_OPTIMIZE];
- ir_node *block;
- ir_graph *irg;
- ir_node *curr_sp;
- ir_mode *spmode;
- ir_node *first_push = NULL;
- ir_edge_t const *edge;
- ir_edge_t const *next;
+ int i;
+ int maxslot;
+ int inc_ofs;
+ ir_node *node;
+ ir_node *stores[MAXPUSH_OPTIMIZE];
+ ir_node *block;
+ ir_graph *irg;
+ ir_node *curr_sp;
+ ir_mode *spmode;
+ ir_node *first_push = NULL;
memset(stores, 0, sizeof(stores));
mem_proj = new_r_Proj(push, mode_M, pn_ia32_Push_M);
/* rewire Store Projs */
- foreach_out_edge_safe(store, edge, next) {
+ foreach_out_edge_safe(store, edge) {
ir_node *proj = get_edge_src_irn(edge);
if (!is_Proj(proj))
continue;
inc_ofs -= 4;
}
- foreach_out_edge_safe(irn, edge, next) {
+ foreach_out_edge_safe(irn, edge) {
ir_node *const src = get_edge_src_irn(edge);
int const pos = get_edge_src_pos(edge);
static void peephole_store_incsp(ir_node *store)
{
dbg_info *dbgi;
- ir_node *node;
ir_node *block;
ir_node *noreg;
ir_node *mem;
for (++i; i <= maxslot; ++i) {
ir_node *load = loads[i];
ir_node *mem, *pop;
- const ir_edge_t *edge, *tmp;
const arch_register_t *reg;
mem = get_irn_n(load, n_ia32_mem);
sched_add_before(irn, pop);
/* rewire now */
- foreach_out_edge_safe(load, edge, tmp) {
+ foreach_out_edge_safe(load, edge) {
ir_node *proj = get_edge_src_irn(edge);
set_Proj_pred(proj, pop);
*/
static void peephole_ia32_Lea(ir_node *node)
{
- ir_graph *irg;
ir_node *base;
ir_node *index;
const arch_register_t *base_reg;
dbg_info *dbgi;
ir_node *block;
ir_node *res;
- ir_node *noreg;
- ir_node *nomem;
assert(is_ia32_Lea(node));
make_add:
dbgi = get_irn_dbg_info(node);
block = get_nodes_block(node);
- irg = get_irn_irg(node);
- noreg = ia32_new_NoReg_gp(irg);
- nomem = get_irg_no_mem(irg);
+ ir_graph *irg = get_irn_irg(node);
+ ir_node *noreg = ia32_new_NoReg_gp(irg);
+ ir_node *nomem = get_irg_no_mem(irg);
res = new_bd_ia32_Add(dbgi, block, noreg, noreg, nomem, op1, op2);
arch_set_irn_register(res, out_reg);
set_ia32_commutative(res);
make_shl:
dbgi = get_irn_dbg_info(node);
block = get_nodes_block(node);
- irg = get_irn_irg(node);
- noreg = ia32_new_NoReg_gp(irg);
- nomem = get_irg_no_mem(irg);
res = new_bd_ia32_Shl(dbgi, block, op1, op2);
arch_set_irn_register(res, out_reg);
goto exchange;
*/
/* pass 1 */
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
register_peephole_optimisation(op_ia32_Cmp, peephole_ia32_Cmp);
register_peephole_optimisation(op_ia32_Cmp8Bit, peephole_ia32_Cmp);
register_peephole_optimisation(op_ia32_Lea, peephole_ia32_Lea);
be_peephole_opt(irg);
/* pass 2 */
- clear_irp_opcodes_generic_func();
+ ir_clear_opcodes_generic_func();
register_peephole_optimisation(op_ia32_Const, peephole_ia32_Const);
register_peephole_optimisation(op_be_IncSP, peephole_be_IncSP);
register_peephole_optimisation(op_ia32_Test, peephole_ia32_Test);
static inline void try_kill(ir_node *node)
{
if (get_irn_mode(node) == mode_T) {
- const ir_edge_t *edge, *next;
- foreach_out_edge_safe(node, edge, next) {
+ foreach_out_edge_safe(node, edge) {
ir_node *proj = get_edge_src_irn(edge);
try_kill(proj);
}
if (get_mode_size_bits(conv_mode) < get_mode_size_bits(store_mode))
return;
+ ir_fprintf(stderr, "Optimisation warning: unoptimized ia32 Store(Conv) (%+F, %+F)\n", node, pred);
set_irn_n(node, n_ia32_Store_val, get_irn_n(pred, n_ia32_Conv_I2I_val));
if (get_irn_n_edges(pred_proj) == 0) {
kill_node(pred_proj);
}
/* kill the conv */
+ ir_fprintf(stderr, "Optimisation warning: unoptimized ia32 Conv(Load) (%+F, %+F)\n", node, predpred);
exchange(node, pred);
}
}
}
+ ir_fprintf(stderr, "Optimisation warning: unoptimized ia32 Conv(Conv) (%+F, %+F)\n", node, pred);
/* Some user (like Phis) won't be happy if we change the mode. */
set_irn_mode(result_conv, get_irn_mode(node));