DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
+/* see comment in compute_liveness() */
+#define LV_COMPUTE_SORTED
#define LV_STD_SIZE 64
#define LV_USE_BINARY_SEARCH
#undef LV_INTESIVE_CHECKS
static void *lv_phase_data_init(ir_phase *phase, ir_node *irn, void *old)
{
struct _be_lv_info_t *info = phase_alloc(phase, LV_STD_SIZE * sizeof(info[0]));
+ (void) irn;
+ (void) old;
+
memset(info, 0, LV_STD_SIZE * sizeof(info[0]));
info[0].u.head.n_size = LV_STD_SIZE - 1;
return info;
}
+static void collect_nodes(ir_node *irn, void *data)
+{
+ struct obstack *obst = data;
+ if (is_liveness_node(irn))
+ obstack_ptr_grow(obst, irn);
+}
+
+static int node_idx_cmp(const void *a, const void *b)
+{
+ const ir_node *p = *(ir_node **) a;
+ const ir_node *q = *(ir_node **) b;
+ int ia = get_irn_idx(p);
+ int ib = get_irn_idx(q);
+ return ia - ib;
+}
+
static void compute_liveness(be_lv_t *lv)
{
+ struct obstack obst;
struct _lv_walker_t w;
+ ir_node **nodes;
+ int i, n;
+
+ obstack_init(&obst);
+ irg_walk_graph(lv->irg, collect_nodes, NULL, &obst);
+ n = obstack_object_size(&obst) / sizeof(nodes[0]);
+ nodes = obstack_finish(&obst);
+
+ /*
+ * inserting the variables sorted by their ID is probably
+ * more efficient since the binary sorted set insertion
+ * will not need to move arounf the data.
+ * However, if sorting the variables a priori pays off
+ * needs to be checked, hence the define.
+ */
+#ifdef LV_COMPUTE_SORTED
+ qsort(nodes, n, sizeof(nodes[0]), node_idx_cmp);
+#endif
+
w.lv = lv;
- w.data = bitset_malloc(get_irg_last_idx(lv->irg));
- irg_walk_graph(lv->irg, liveness_for_node, NULL, &w);
- bitset_free(w.data);
+ w.data = bitset_obstack_alloc(&obst, get_irg_last_idx(lv->irg));
+
+ for (i = 0; i < n; ++i)
+ liveness_for_node(nodes[i], &w);
+
+ obstack_free(&obst, NULL);
+ register_hook(hook_node_info, &lv->hook_info);
+}
+
+void be_liveness_assure_sets(be_lv_t *lv)
+{
+ if (!lv->nodes) {
+ lv->nodes = bitset_malloc(2 * get_irg_last_idx(lv->irg));
+ phase_init(&lv->ph, "liveness", lv->irg, PHASE_DEFAULT_GROWTH, lv_phase_data_init, NULL);
+ compute_liveness(lv);
+ }
+}
+
+void be_liveness_assure_chk(be_lv_t *lv)
+{
+#ifndef USE_LIVE_CHK
+ be_liveness_assure_sets(lv);
+#else
+ (void) lv;
+#endif
+}
+
+void be_liveness_invalidate(be_lv_t *lv)
+{
+ if (lv && lv->nodes) {
+ unregister_hook(hook_node_info, &lv->hook_info);
+ phase_free(&lv->ph);
+ bitset_free(lv->nodes);
+ lv->nodes = NULL;
+ }
}
/* Compute the inter block liveness for a graph. */
memset(lv, 0, sizeof(lv[0]));
lv->irg = irg;
- lv->nodes = bitset_malloc(2 * get_irg_last_idx(irg));
+#ifdef USE_LIVE_CHK
+ lv->lvc = lv_chk_new(irg);
+#endif
lv->hook_info.context = lv;
lv->hook_info.hook._hook_node_info = lv_dump_block;
- register_hook(hook_node_info, &lv->hook_info);
- phase_init(&lv->ph, "liveness", irg, PHASE_DEFAULT_GROWTH, lv_phase_data_init, NULL);
- compute_liveness(lv);
return lv;
}
void be_liveness_free(be_lv_t *lv)
{
- unregister_hook(hook_node_info, &lv->hook_info);
- phase_free(&lv->ph);
- bitset_free(lv->nodes);
+ be_liveness_invalidate(lv);
free(lv);
}
void be_liveness_remove(be_lv_t *lv, ir_node *irn)
{
- unsigned idx = get_irn_idx(irn);
- struct _lv_walker_t w;
+ if (lv->nodes) {
+ unsigned idx = get_irn_idx(irn);
+ struct _lv_walker_t w;
- /*
- * Removes a single irn from the liveness information.
- * Since an irn can only be live at blocks dominated by the block of its
- * definition, we only have to process that dominance subtree.
- */
- w.lv = lv;
- w.data = irn;
- dom_tree_walk(get_nodes_block(irn), lv_remove_irn_walker, NULL, &w);
- if(idx < bitset_size(lv->nodes))
- bitset_clear(lv->nodes, idx);
+ /*
+ * Removes a single irn from the liveness information.
+ * Since an irn can only be live at blocks dominated by the block of its
+ * definition, we only have to process that dominance subtree.
+ */
+ w.lv = lv;
+ w.data = irn;
+ dom_tree_walk(get_nodes_block(irn), lv_remove_irn_walker, NULL, &w);
+ if(idx < bitset_size(lv->nodes))
+ bitset_clear(lv->nodes, idx);
+ }
}
void be_liveness_introduce(be_lv_t *lv, ir_node *irn)
{
- struct _lv_walker_t w;
- w.lv = lv;
- w.data = bitset_malloc(get_irg_last_idx(lv->irg));
- liveness_for_node(irn, &w);
- bitset_free(w.data);
+ if (lv->nodes) {
+ struct _lv_walker_t w;
+ w.lv = lv;
+ w.data = bitset_malloc(get_irg_last_idx(lv->irg));
+ liveness_for_node(irn, &w);
+ bitset_free(w.data);
+ }
}
void be_liveness_update(be_lv_t *lv, ir_node *irn)
be_liveness_introduce(lv, irn);
}
-static void lv_add_missing_walker(ir_node *irn, void *data)
-{
- struct _lv_walker_t *w = data;
- if(!is_Block(irn) && !bitset_contains_irn(w->lv->nodes, irn)) {
- liveness_for_node(irn, w);
- }
-}
-
-void be_liveness_add_missing(be_lv_t *lv)
-{
- struct _lv_walker_t w;
- w.lv = lv;
- w.data = bitset_malloc(get_irg_last_idx(lv->irg));
- irg_walk_graph(lv->irg, lv_add_missing_walker, NULL, &w);
- bitset_free(w.data);
-}
-
static void lv_check_walker(ir_node *bl, void *data)
{
struct _lv_walker_t *w = data;
return !problem_found;
}
-pset *be_liveness_transfer(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *irn, pset *live)
-{
- int i, n;
-
- /* You should better break out of your loop when hitting the first phi function. */
- assert(!is_Phi(irn) && "liveness_transfer produces invalid results for phi nodes");
-
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
- ir_node *del = pset_remove_ptr(live, irn);
- (void) del;
- assert(irn == del);
- }
-
- for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
- ir_node *op = get_irn_n(irn, i);
-
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, op))
- pset_insert_ptr(live, op);
- }
-
- return live;
-}
-
-void be_liveness_transfer_ir_nodeset(const arch_env_t *arch_env,
- const arch_register_class_t *cls,
- ir_node *node, ir_nodeset_t *nodeset)
+void be_liveness_transfer(const arch_env_t *arch_env,
+ const arch_register_class_t *cls,
+ ir_node *node, ir_nodeset_t *nodeset)
{
int i, arity;
* function. */
assert(!is_Phi(node) && "liveness_transfer produces invalid results for phi nodes");
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
+#ifndef SCHEDULE_PROJS
+ if (get_irn_mode(node) == mode_T) {
+ const ir_edge_t *edge;
+
+ foreach_out_edge(node, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+
+ if (arch_irn_consider_in_reg_alloc(arch_env, cls, proj)) {
+ ir_nodeset_remove(nodeset, proj);
+ }
+ }
+ }
+#endif
+
+ if (arch_irn_consider_in_reg_alloc(arch_env, cls, node)) {
ir_nodeset_remove(nodeset, node);
}
arity = get_irn_arity(node);
- for(i = 0; i < arity; ++i) {
+ for (i = 0; i < arity; ++i) {
ir_node *op = get_irn_n(node, i);
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, op))
+ if (arch_irn_consider_in_reg_alloc(arch_env, cls, op))
ir_nodeset_insert(nodeset, op);
}
}
-pset *be_liveness_end_of_block(const be_lv_t *lv, const arch_env_t *arch_env, const arch_register_class_t *cls, const ir_node *bl, pset *live)
-{
- int i;
- be_lv_foreach(lv, bl, be_lv_state_end, i) {
- ir_node *irn = be_lv_get_irn(lv, bl, i);
- if(arch_irn_consider_in_reg_alloc(arch_env, cls, irn))
- pset_insert_ptr(live, irn);
- }
-
- return live;
-}
-
-void be_liveness_end_of_block_ir_nodeset(const be_lv_t *lv,
- const arch_env_t *arch_env,
- const arch_register_class_t *cls,
- const ir_node *block,
- ir_nodeset_t *live)
+void be_liveness_end_of_block(const be_lv_t *lv, const arch_env_t *arch_env,
+ const arch_register_class_t *cls,
+ const ir_node *block, ir_nodeset_t *live)
{
int i;
+ assert(lv->nodes && "live sets must be computed");
be_lv_foreach(lv, block, be_lv_state_end, i) {
ir_node *node = be_lv_get_irn(lv, block, i);
if(!arch_irn_consider_in_reg_alloc(arch_env, cls, node))
-pset *be_liveness_nodes_live_at(const be_lv_t *lv, const arch_env_t *arch_env, const arch_register_class_t *cls, const ir_node *pos, pset *live)
+void be_liveness_nodes_live_at(const be_lv_t *lv, const arch_env_t *arch_env,
+ const arch_register_class_t *cls,
+ const ir_node *pos, ir_nodeset_t *live)
{
const ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
ir_node *irn;
* exit immediately, so that this node is still live
*/
if(irn == pos)
- return live;
+ return;
be_liveness_transfer(arch_env, cls, irn, live);
}
-
- return live;
}
-pset *be_liveness_nodes_live_at_input(const be_lv_t *lv, const arch_env_t *arch_env, const arch_register_class_t *cls, const ir_node *pos, pset *live)
+void be_liveness_nodes_live_at_input(const be_lv_t *lv,
+ const arch_env_t *arch_env,
+ const arch_register_class_t *cls,
+ const ir_node *pos, ir_nodeset_t *live)
{
const ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
ir_node *irn;
+ assert(lv->nodes && "live sets must be computed");
be_liveness_end_of_block(lv, arch_env, cls, bl, live);
sched_foreach_reverse(bl, irn) {
be_liveness_transfer(arch_env, cls, irn, live);
if(irn == pos)
- return live;
+ return;
}
-
- return live;
}
static void collect_node(ir_node *irn, void *data)
obstack_ptr_grow(obst, irn);
}
-void be_live_chk_compare(be_irg_t *birg)
+void be_live_chk_compare(be_lv_t *lv, lv_chk_t *lvc)
{
- ir_graph *irg = be_get_birg_irg(birg);
+ ir_graph *irg = lv->irg;
struct obstack obst;
ir_node **nodes;
ir_node **blocks;
- be_lv_t *lv;
- lv_chk_t *lvc;
int i, j;
- be_assure_liveness(birg);
- be_assure_liveness_chk(birg);
- lv = be_get_birg_liveness(birg);
- lvc = be_get_birg_liveness_chk(birg);
-
obstack_init(&obst);
irg_block_walk_graph(irg, collect_node, NULL, &obst);