/**
* must be called from peephole optimisations before a node will be killed
* and its users will be redirected to new_node.
- * so bepeephole can update it's internal state.
+ * so bepeephole can update its internal state.
*
- * Note: killing a node and rewiring os only allowed if new_node produces
+ * Note: killing a node and rewiring is only allowed if new_node produces
* the same registers as old_node.
*/
static void be_peephole_before_exchange(const ir_node *old_node,
}
}
-static void kill_node_and_preds(ir_node *node)
-{
- ir_graph *irg = get_irn_irg(node);
- int arity, i;
-
- arity = get_irn_arity(node);
- for (i = 0; i < arity; ++i) {
- ir_node *pred = get_irn_n(node, i);
-
- set_irn_n(node, i, new_r_Bad(irg));
- if (get_irn_n_edges(pred) != 0)
- continue;
-
- kill_node_and_preds(pred);
- }
-
- if (!is_Proj(node))
- sched_remove(node);
- kill_node(node);
-}
-
-static void keep_alive_barrier_operand(ir_node *block, const ir_node* barrier, int pos)
-{
- ir_node *operand = get_irn_n(barrier, pos);
- ir_node *keep = sched_next(skip_Proj(operand));
-
- /* There already is a keep in the schedule. */
- if (be_is_Keep(keep)) {
- const arch_register_class_t *cls = arch_get_irn_reg_class(barrier, pos);
-
- be_Keep_add_node(keep, cls, operand);
- }
- else {
- ir_node *in[1] = {operand};
-
- keep = be_new_Keep(block, 1, in);
- sched_add_after(skip_Proj(operand), keep);
- }
-}
-
-/**
- * Walk through the block schedule and skip all barrier nodes.
- */
-static void skip_barrier(ir_node *block, ir_graph *irg)
-{
- ir_node *irn;
-
- sched_foreach_reverse(block, irn) {
- int arity;
- unsigned *used;
- size_t n_used;
- const ir_edge_t *edge, *next;
-
- if (!be_is_Barrier(irn))
- continue;
-
- /* track which outputs are actually used, as we have to create
- * keep nodes for unused outputs */
- arity = get_irn_arity(irn);
- rbitset_alloca(used, arity);
-
- foreach_out_edge_safe(irn, edge, next) {
- ir_node *proj = get_edge_src_irn(edge);
- int pn;
- ir_node *pred;
-
- if (is_Anchor(proj))
- continue;
-
- pn = (int) get_Proj_proj(proj);
- pred = get_irn_n(irn, pn);
-
- rbitset_set(used, pn);
-
- /* We may need to reschedule be_Keeps to keep live-ranges short. */
- if (get_irn_n_edges(proj) == 1) {
- const ir_edge_t *proj_edge = get_irn_out_edge_first(proj);
- ir_node *proj_succ = get_edge_src_irn(proj_edge);
-
- if (be_is_Keep(proj_succ)) {
- int succ_arity = get_irn_arity(proj_succ);
-
- keep_alive_barrier_operand(block, irn, pn);
-
- /* Disconnect old be_Keep. */
- if (succ_arity > 1) {
- int edge_pos = get_edge_src_pos(proj_edge);
- int new_arity = succ_arity - 1;
- int pos;
- int new_pos = 0;
- ir_node **ins;
-
- NEW_ARR_A(ir_node *, ins, succ_arity);
- for (pos = 0; pos < succ_arity; ++pos) {
- if (pos != edge_pos)
- ins[new_pos++] = get_irn_n(proj_succ, pos);
- }
-
- set_irn_in(proj_succ, new_arity, ins);
- }
- else {
- sched_remove(proj_succ);
- kill_node(proj_succ);
- }
- }
- }
-
- edges_reroute_kind(proj, pred, EDGE_KIND_NORMAL, irg);
- edges_reroute_kind(proj, pred, EDGE_KIND_DEP, irg);
- }
-
- /* the barrier also had the effect of a Keep for unused inputs.
- * we now have to create an explicit Keep for them */
- n_used = rbitset_popcount(used, arity);
- if (n_used < (size_t) arity) {
- int i;
-
- for (i = 0; i < arity; ++i) {
- if (rbitset_is_set(used, i))
- continue;
-
- keep_alive_barrier_operand(block, irn, i);
- }
- }
-
- kill_node_and_preds(irn);
- break;
- }
-}
-
-/**
- * Kill the Barrier nodes for better peephole optimization.
- */
-static void kill_barriers(ir_graph *irg)
-{
- ir_node *end_blk = get_irg_end_block(irg);
- ir_node *start_blk = get_irg_start_block(irg);
- int i;
-
- /* skip the barrier on all return blocks */
- for (i = get_Block_n_cfgpreds(end_blk) - 1; i >= 0; --i) {
- ir_node *be_ret = get_Block_cfgpred(end_blk, i);
- ir_node *ret_blk = get_nodes_block(be_ret);
-
- if (ret_blk == start_blk)
- continue;
-
- skip_barrier(ret_blk, irg);
- }
-
- /* skip the barrier on the start block */
- start_blk = get_irg_start_block(irg);
- skip_barrier(start_blk, irg);
-}
-
/**
* Check whether the node has only one user. Explicitly ignore the anchor.
*/
-static int has_only_one_user(ir_node *node)
+bool be_has_only_one_user(ir_node *node)
{
int n = get_irn_n_edges(node);
+ int n_users;
const ir_edge_t *edge;
if (n <= 1)
return 1;
- if (n > 2)
- return 0;
-
+ n_users = 0;
foreach_out_edge(node, edge) {
ir_node *src = get_edge_src_irn(edge);
- if (is_Anchor(src))
- return 1;
+ /* ignore anchor and keep-alive edges */
+ if (is_Anchor(src) || is_End(src))
+ continue;
+ n_users++;
}
- return 0;
+ return n_users == 1;
}
/*
if (!be_is_IncSP(pred))
return node;
- if (!has_only_one_user(pred))
+ if (!be_has_only_one_user(pred))
return node;
pred_offs = be_get_IncSP_offset(pred);
curr_offs = be_get_IncSP_offset(node);
-
- if (pred_offs == BE_STACK_FRAME_SIZE_EXPAND) {
- if (curr_offs != BE_STACK_FRAME_SIZE_SHRINK) {
- return node;
- }
- offs = 0;
- } else if (pred_offs == BE_STACK_FRAME_SIZE_SHRINK) {
- if (curr_offs != BE_STACK_FRAME_SIZE_EXPAND) {
- return node;
- }
- offs = 0;
- } else if (curr_offs == BE_STACK_FRAME_SIZE_EXPAND ||
- curr_offs == BE_STACK_FRAME_SIZE_SHRINK) {
- return node;
- } else {
- offs = curr_offs + pred_offs;
- }
+ offs = curr_offs + pred_offs;
/* add node offset to pred and remove our IncSP */
be_set_IncSP_offset(pred, offs);
unsigned n_classes;
unsigned i;
- /* barrier nodes are used for register allocations. They hinders
- * peephole optimizations, so remove them here. */
- kill_barriers(irg);
-
/* we sometimes find BadE nodes in float apps like optest_float.c or
* kahansum.c for example... */
be_liveness_invalidate(be_get_irg_liveness(irg));
xfree(register_values);
}
-BE_REGISTER_MODULE_CONSTRUCTOR(be_init_peephole);
+BE_REGISTER_MODULE_CONSTRUCTOR(be_init_peephole)
void be_init_peephole(void)
{
FIRM_DBG_REGISTER(dbg, "firm.be.peephole");