+static void liveness_for_node(ir_node *irn, void *data)
+{
+ struct _lv_walker_t *walker = data;
+ be_lv_t *lv = walker->lv;
+ bitset_t *visited = walker->data;
+ const ir_edge_t *edge;
+ ir_node *def_block;
+
+ /* Don't compute liveness information for non-data nodes. */
+ if(!is_liveness_node(irn))
+ return;
+
+ bitset_clear_all(visited);
+ def_block = get_nodes_block(irn);
+
+ /* Go over all uses of the value */
+ foreach_out_edge(irn, edge) {
+ ir_node *use = edge->src;
+ ir_node *use_block;
+
+ DBG((dbg, LEVEL_4, "%+F: use at %+F, pos %d in %+F\n", irn, use, edge->pos, get_block(use)));
+ assert(get_irn_n(use, edge->pos) == irn);
+
+ /*
+ * If the usage is no data node, skip this use, since it does not
+ * affect the liveness of the node.
+ */
+ if(!is_liveness_node(use))
+ continue;
+
+ /* Get the block where the usage is in. */
+ use_block = get_nodes_block(use);
+
+ /*
+ * If the use is a phi function, determine the corresponding block
+ * through which the value reaches the phi function and mark the
+ * value as live out of that block.
+ */
+ if(is_Phi(use)) {
+ ir_node *pred_block = get_Block_cfgpred_block(use_block, edge->pos);
+ live_end_at_block(lv, irn, pred_block, visited, 0);
+ }
+
+ /*
+ * Else, the value is live in at this block. Mark it and call live
+ * out on the predecessors.
+ */
+ else if(def_block != use_block) {
+ int i, n;
+
+ mark_live_in(lv, use_block, irn);
+
+ for(i = 0, n = get_Block_n_cfgpreds(use_block); i < n; ++i) {
+ ir_node *pred_block = get_Block_cfgpred_block(use_block, i);
+ live_end_at_block(lv, irn, pred_block, visited, 1);
+ }
+ }
+ }
+}
+
+static void lv_remove_irn_walker(ir_node *bl, void *data)
+{
+ lv_remove_walker_t *w = data;
+ be_lv_remove(w->lv, bl, w->irn);
+}
+
+static const char *lv_flags_to_str(unsigned flags)
+{
+ static const char *states[] = {
+ "---",
+ "i--",
+ "-e-",
+ "ie-",
+ "--o",
+ "i-o",
+ "-eo",
+ "ieo"
+ };
+
+ return states[flags & 7];
+}
+
+static void lv_dump_block(void *context, FILE *f, const ir_node *bl)
+{
+ if(is_Block(bl)) {
+ be_lv_t *lv = context;
+ struct _be_lv_info_t *info = phase_get_irn_data(&lv->ph, bl);
+
+ fprintf(f, "liveness:\n");
+ if(info) {
+ unsigned n = info[0].u.head.n_members;
+ unsigned i;
+
+ for(i = 0; i < n; ++i) {
+ struct _be_lv_info_node_t *n = &info[i+1].u.node;
+ ir_fprintf(f, "%s %+F\n", lv_flags_to_str(n->flags), get_idx_irn(lv->irg, n->idx));
+ }
+ }
+ }
+}
+
+static void *lv_phase_data_init(ir_phase *phase, const ir_node *irn, void *old)
+{
+ struct _be_lv_info_t *info = phase_alloc(phase, LV_STD_SIZE * sizeof(info[0]));
+ (void) irn;
+ (void) old;
+
+ memset(info, 0, LV_STD_SIZE * sizeof(info[0]));
+ info[0].u.head.n_size = LV_STD_SIZE - 1;
+ return info;
+}
+
+static void collect_nodes(ir_node *irn, void *data)
+{
+ struct obstack *obst = data;
+ if (is_liveness_node(irn))
+ obstack_ptr_grow(obst, irn);
+}
+
+static int node_idx_cmp(const void *a, const void *b)
+{
+ const ir_node *p = *(ir_node **) a;
+ const ir_node *q = *(ir_node **) b;
+ int ia = get_irn_idx(p);
+ int ib = get_irn_idx(q);
+ return ia - ib;
+}
+
+static void compute_liveness(be_lv_t *lv)
+{
+ struct obstack obst;
+ struct _lv_walker_t w;
+ ir_node **nodes;
+ int i, n;
+
+ stat_ev_tim_push();
+ obstack_init(&obst);
+ irg_walk_graph(lv->irg, collect_nodes, NULL, &obst);
+ n = obstack_object_size(&obst) / sizeof(nodes[0]);
+ nodes = obstack_finish(&obst);
+
+ /*
+ * inserting the variables sorted by their ID is probably
+ * more efficient since the binary sorted set insertion
+ * will not need to move around the data.
+ * However, if sorting the variables a priori pays off
+ * needs to be checked, hence the define.
+ */
+#ifdef LV_COMPUTE_SORTED
+ qsort(nodes, n, sizeof(nodes[0]), node_idx_cmp);
+#endif
+
+ w.lv = lv;
+ w.data = bitset_obstack_alloc(&obst, get_irg_last_idx(lv->irg));
+
+ for (i = 0; i < n; ++i)
+ liveness_for_node(nodes[i], &w);
+
+ obstack_free(&obst, NULL);
+ register_hook(hook_node_info, &lv->hook_info);
+ stat_ev_tim_pop("be_lv_sets_cons");
+}
+
+void be_liveness_assure_sets(be_lv_t *lv)
+{
+ if (!lv->nodes) {
+ BE_TIMER_PUSH(t_live);
+
+ lv->nodes = bitset_malloc(2 * get_irg_last_idx(lv->irg));
+ phase_init(&lv->ph, "liveness", lv->irg, PHASE_DEFAULT_GROWTH, lv_phase_data_init, NULL);
+ compute_liveness(lv);
+ /* be_live_chk_compare(lv, lv->lvc); */
+
+ BE_TIMER_POP(t_live);
+ }
+}
+
+void be_liveness_assure_chk(be_lv_t *lv)
+{
+#ifndef USE_LIVE_CHK
+ BE_TIMER_PUSH(t_verify);
+ be_liveness_assure_sets(lv);
+ BE_TIMER_POP(t_verify);
+#else
+ (void) lv;
+#endif
+}
+
+void be_liveness_invalidate(be_lv_t *lv)
+{
+ if (lv && lv->nodes) {
+ unregister_hook(hook_node_info, &lv->hook_info);
+ phase_free(&lv->ph);
+ bitset_free(lv->nodes);
+ lv->nodes = NULL;
+ }
+}
+
+/* Compute the inter block liveness for a graph. */
+be_lv_t *be_liveness(const be_irg_t *birg)
+{
+ be_lv_t *lv = xmalloc(sizeof(lv[0]));
+
+ memset(lv, 0, sizeof(lv[0]));
+ lv->irg = be_get_birg_irg(birg);
+ lv->birg = birg;
+#ifdef USE_LIVE_CHK
+ lv->dfs = dfs_new(&absgraph_irg_cfg_succ, lv->irg);
+ lv->lvc = lv_chk_new(lv->irg, lv->dfs);
+#endif
+ lv->hook_info.context = lv;
+ lv->hook_info.hook._hook_node_info = lv_dump_block;
+
+ return lv;
+}
+
+void be_liveness_recompute(be_lv_t *lv)
+{
+ unsigned last_idx;
+
+ BE_TIMER_PUSH(t_live);
+ last_idx = get_irg_last_idx(lv->irg);
+ if(last_idx >= bitset_size(lv->nodes)) {
+ bitset_free(lv->nodes);
+ lv->nodes = bitset_malloc(last_idx * 2);
+ } else
+ bitset_clear_all(lv->nodes);
+
+ phase_free(&lv->ph);
+ phase_init(&lv->ph, "liveness", lv->irg, PHASE_DEFAULT_GROWTH, lv_phase_data_init, NULL);
+ compute_liveness(lv);
+
+ BE_TIMER_POP(t_live);
+}
+
+
+void be_liveness_free(be_lv_t *lv)
+{
+ be_liveness_invalidate(lv);
+#ifdef USE_LIVE_CHK
+ lv_chk_free(lv->lvc);
+ dfs_free(lv->dfs);
+#endif
+ xfree(lv);
+}
+
+void be_liveness_remove(be_lv_t *lv, const ir_node *irn)
+{
+ if (lv->nodes) {
+ unsigned idx = get_irn_idx(irn);
+ lv_remove_walker_t w;
+
+ /*
+ * Removes a single irn from the liveness information.
+ * Since an irn can only be live at blocks dominated by the block of its
+ * definition, we only have to process that dominance subtree.
+ */
+ w.lv = lv;
+ w.irn = irn;
+ dom_tree_walk(get_nodes_block(irn), lv_remove_irn_walker, NULL, &w);
+ if(idx < bitset_size(lv->nodes))
+ bitset_clear(lv->nodes, idx);
+ }
+}
+
+void be_liveness_introduce(be_lv_t *lv, ir_node *irn)
+{
+ if (lv->nodes) {
+ struct _lv_walker_t w;
+ w.lv = lv;
+ w.data = bitset_malloc(get_irg_last_idx(lv->irg));
+ liveness_for_node(irn, &w);
+ bitset_free(w.data);
+ }
+}
+
+void be_liveness_update(be_lv_t *lv, ir_node *irn)
+{
+ be_liveness_remove(lv, irn);
+ be_liveness_introduce(lv, irn);
+}
+
+static void lv_check_walker(ir_node *bl, void *data)
+{
+ struct _lv_walker_t *w = data;
+ be_lv_t *lv = w->lv;
+ be_lv_t *fresh = w->data;
+
+ struct _be_lv_info_t *curr = phase_get_irn_data(&lv->ph, bl);
+ struct _be_lv_info_t *fr = phase_get_irn_data(&fresh->ph, bl);
+
+ if(!fr && curr && curr[0].u.head.n_members > 0) {
+ unsigned i;
+
+ ir_fprintf(stderr, "%+F liveness should be empty but current liveness contains:\n", bl);
+ for(i = 0; i < curr[0].u.head.n_members; ++i) {
+ ir_fprintf(stderr, "\t%+F\n", get_idx_irn(lv->irg, curr[1 + i].u.node.idx));
+ }
+ }
+
+ else if(curr) {
+ unsigned n_curr = curr[0].u.head.n_members;
+ unsigned n_fresh = fr[0].u.head.n_members;
+
+ unsigned i;
+
+ if(n_curr != n_fresh) {
+ ir_fprintf(stderr, "%+F: liveness set sizes differ. curr %d, correct %d\n", bl, n_curr, n_fresh);
+
+ ir_fprintf(stderr, "current:\n");
+ for(i = 0; i < n_curr; ++i) {
+ struct _be_lv_info_node_t *n = &curr[1 + i].u.node;
+ ir_fprintf(stderr, "%+F %u %+F %s\n", bl, i, get_idx_irn(lv->irg, n->idx), lv_flags_to_str(n->flags));
+ }
+
+ ir_fprintf(stderr, "correct:\n");
+ for(i = 0; i < n_fresh; ++i) {
+ struct _be_lv_info_node_t *n = &fr[1 + i].u.node;
+ ir_fprintf(stderr, "%+F %u %+F %s\n", bl, i, get_idx_irn(lv->irg, n->idx), lv_flags_to_str(n->flags));
+ }
+ }
+ }
+}
+
+void be_liveness_check(be_lv_t *lv)
+{
+ struct _lv_walker_t w;
+ be_lv_t *fresh = be_liveness(lv->birg);
+
+ w.lv = lv;
+ w.data = fresh;
+ irg_block_walk_graph(lv->irg, lv_check_walker, NULL, &w);
+ be_liveness_free(fresh);
+}
+
+
+static void lv_dump_block_walker(ir_node *irn, void *data)
+{
+ struct _lv_walker_t *w = data;
+ if(is_Block(irn))
+ lv_dump_block(w->lv, w->data, irn);
+}
+
+
+/* Dump the liveness information for a graph. */
+void be_liveness_dump(const be_lv_t *lv, FILE *f)
+{
+ struct _lv_walker_t w;
+
+ w.lv = (be_lv_t *) lv;
+ w.data = f;
+ irg_block_walk_graph(lv->irg, lv_dump_block_walker, NULL, &w);
+}
+
+/* Dump the liveness information for a graph. */
+void be_liveness_dumpto(const be_lv_t *lv, const char *cls_name)
+{
+ FILE *f;
+ char buf[128];
+ ir_snprintf(buf, sizeof(buf), "%F_%s-live.txt", lv->irg, cls_name);
+ if((f = fopen(buf, "wt")) != NULL) {
+ be_liveness_dump(lv, f);
+ fclose(f);
+ }
+}
+
+/**
+ * Walker: checks the every predecessors of a node dominate
+ * the note.
+ */
+static void dom_check(ir_node *irn, void *data)
+{
+ int *problem_found = data;
+
+ if(!is_Block(irn) && irn != get_irg_end(get_irn_irg(irn))) {
+ int i, n;
+ ir_node *bl = get_nodes_block(irn);
+
+ for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
+ ir_node *op = get_irn_n(irn, i);
+ ir_node *def_bl = get_nodes_block(op);
+ ir_node *use_bl = bl;
+
+ if(is_Phi(irn))
+ use_bl = get_Block_cfgpred_block(bl, i);
+
+ if(get_irn_opcode(use_bl) != iro_Bad
+ && get_irn_opcode(def_bl) != iro_Bad
+ && !block_dominates(def_bl, use_bl)) {
+ ir_fprintf(stderr, "Verify warning: %+F in %+F must dominate %+F for user %+F (%s)\n", op, def_bl, use_bl, irn, get_irg_dump_name(get_irn_irg(op)));
+ *problem_found = 1;
+ }
+ }
+ }
+}
+
+/* Check, if the SSA dominance property is fulfilled. */
+int be_check_dominance(ir_graph *irg)