cls_idx = arch_register_class_index(cls);
//assert(register_values[cls_idx][reg_idx] != NULL);
- DBG((dbg, LEVEL_1, "Clear Register %s\n", reg->name));
+ DB((dbg, LEVEL_1, "Clear Register %s\n", reg->name));
register_values[cls_idx][reg_idx] = NULL;
}
reg_idx = arch_register_get_index(reg);
cls_idx = arch_register_class_index(cls);
- DBG((dbg, LEVEL_1, "Set Register %s: %+F\n", reg->name, node));
+ DB((dbg, LEVEL_1, "Set Register %s: %+F\n", reg->name, node));
register_values[cls_idx][reg_idx] = node;
}
unsigned reg_idx;
unsigned cls_idx;
- DBG((dbg, LEVEL_1, "About to exchange and kill %+F with %+F\n", old_node, new_node));
+ DB((dbg, LEVEL_1, "About to exchange and kill %+F with %+F\n", old_node, new_node));
if (current_node == old_node) {
/* next node to be processed will be killed. Its scheduling predecessor
}
assert(lv->nodes && "live sets must be computed");
- DBG((dbg, LEVEL_1, "\nProcessing block %+F (from end)\n", block));
+ DB((dbg, LEVEL_1, "\nProcessing block %+F (from end)\n", block));
be_lv_foreach(lv, block, be_lv_state_end, l) {
ir_node *node = be_lv_get_irn(lv, block, l);
set_reg_value(node);
}
- DBG((dbg, LEVEL_1, "\nstart processing\n"));
+ DB((dbg, LEVEL_1, "\nstart processing\n"));
/* walk the block from last insn to the first */
current_node = sched_last(block);
if (peephole_node == NULL)
continue;
+ DB((dbg, LEVEL_2, "optimize %+F\n", current_node));
peephole_node(current_node);
assert(!is_Bad(current_node));
}
/**
* Walk through the block schedule and skip all barrier nodes.
*/
-static void skip_barrier(ir_node *ret_blk, ir_graph *irg)
+static void skip_barrier(ir_node *block, ir_graph *irg)
{
ir_node *irn;
- sched_foreach_reverse(ret_blk, irn) {
+ sched_foreach_reverse(block, irn) {
+ int arity;
+ unsigned *used;
+ unsigned n_used;
const ir_edge_t *edge, *next;
if (!be_is_Barrier(irn))
continue;
+ /* track which outputs are actually used, as we have to create
+ * keep nodes for unused outputs */
+ arity = get_irn_arity(irn);
+ rbitset_alloca(used, arity);
+
foreach_out_edge_safe(irn, edge, next) {
ir_node *proj = get_edge_src_irn(edge);
int pn;
pn = (int) get_Proj_proj(proj);
pred = get_irn_n(irn, pn);
+ rbitset_set(used, pn);
+
edges_reroute_kind(proj, pred, EDGE_KIND_NORMAL, irg);
edges_reroute_kind(proj, pred, EDGE_KIND_DEP, irg);
}
+ /* the barrier also had the effect of a Keep for unused inputs.
+ * we now have to create an explicit Keep for them */
+ n_used = rbitset_popcount(used, arity);
+ if (n_used < (unsigned) arity) {
+ int n_in = arity - (int) n_used;
+ ir_node **in = ALLOCAN(ir_node*, n_in);
+ int i = 0;
+ int n = 0;
+ ir_node *keep;
+
+ for (i = 0; i < arity; ++i) {
+ if (rbitset_is_set(used, i))
+ continue;
+ assert(n < n_in);
+ in[n++] = get_irn_n(irn, i);
+ }
+ keep = be_new_Barrier(get_nodes_block(irn), n_in, in);
+ keep_alive(keep);
+ sched_add_before(irn, keep);
+ }
+
kill_node_and_preds(irn);
break;
}
static void kill_barriers(ir_graph *irg)
{
ir_node *end_blk = get_irg_end_block(irg);
- ir_node *start_blk;
+ ir_node *start_blk = get_irg_start_block(irg);
int i;
/* skip the barrier on all return blocks */
ir_node *be_ret = get_Block_cfgpred(end_blk, i);
ir_node *ret_blk = get_nodes_block(be_ret);
+ if (ret_blk == start_blk)
+ continue;
+
skip_barrier(ret_blk, irg);
}
return pred;
}
-void be_peephole_opt(be_irg_t *birg)
+void be_peephole_opt(ir_graph *irg)
{
- ir_graph *irg = be_get_birg_irg(birg);
- unsigned n_classes;
- unsigned i;
+ unsigned n_classes;
+ unsigned i;
/* barrier nodes are used for register allocations. They hinders
* peephole optimizations, so remove them here. */
/* we sometimes find BadE nodes in float apps like optest_float.c or
* kahansum.c for example... */
- be_liveness_invalidate(birg->lv);
- be_liveness_assure_sets(be_assure_liveness(birg));
+ be_liveness_invalidate(be_get_irg_liveness(irg));
+ be_liveness_assure_sets(be_assure_liveness(irg));
- arch_env = be_get_birg_arch_env(birg);
- lv = be_get_birg_liveness(birg);
+ arch_env = be_get_irg_arch_env(irg);
+ lv = be_get_irg_liveness(irg);
n_classes = arch_env_get_n_reg_class(arch_env);
- register_values = ALLOCAN(ir_node**, n_classes);
+ register_values = XMALLOCN(ir_node**, n_classes);
for (i = 0; i < n_classes; ++i) {
const arch_register_class_t *cls = arch_env_get_reg_class(arch_env, i);
unsigned n_regs = arch_register_class_n_regs(cls);
- register_values[i] = ALLOCAN(ir_node*, n_regs);
+ register_values[i] = XMALLOCN(ir_node*, n_regs);
}
irg_block_walk_graph(irg, process_block, NULL, NULL);
+
+ for (i = 0; i < n_classes; ++i) {
+ xfree(register_values[i]);
+ }
+ xfree(register_values);
}
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_peephole);