assert(n < n_in);
in[n++] = get_irn_n(irn, i);
}
- keep = be_new_Keep(get_nodes_block(irn), n_in, in);
+ keep = be_new_Barrier(get_nodes_block(irn), n_in, in);
+ keep_alive(keep);
sched_add_before(irn, keep);
}
static void kill_barriers(ir_graph *irg)
{
ir_node *end_blk = get_irg_end_block(irg);
- ir_node *start_blk;
+ ir_node *start_blk = get_irg_start_block(irg);
int i;
/* skip the barrier on all return blocks */
ir_node *be_ret = get_Block_cfgpred(end_blk, i);
ir_node *ret_blk = get_nodes_block(be_ret);
+ if (ret_blk == start_blk)
+ continue;
+
skip_barrier(ret_blk, irg);
}
return pred;
}
-void be_peephole_opt(be_irg_t *birg)
+void be_peephole_opt(ir_graph *irg)
{
- ir_graph *irg = be_get_birg_irg(birg);
- unsigned n_classes;
- unsigned i;
+ unsigned n_classes;
+ unsigned i;
/* barrier nodes are used for register allocations. They hinders
* peephole optimizations, so remove them here. */
/* we sometimes find BadE nodes in float apps like optest_float.c or
* kahansum.c for example... */
- be_liveness_invalidate(birg->lv);
- be_liveness_assure_sets(be_assure_liveness(birg));
+ be_liveness_invalidate(be_get_irg_liveness(irg));
+ be_liveness_assure_sets(be_assure_liveness(irg));
- arch_env = be_get_birg_arch_env(birg);
- lv = be_get_birg_liveness(birg);
+ arch_env = be_get_irg_arch_env(irg);
+ lv = be_get_irg_liveness(irg);
n_classes = arch_env_get_n_reg_class(arch_env);
register_values = XMALLOCN(ir_node**, n_classes);