+ unsigned last_idx = get_irg_last_idx(lv->irg);
+ if(last_idx >= bitset_size(lv->nodes)) {
+ bitset_free(lv->nodes);
+ lv->nodes = bitset_malloc(last_idx * 2);
+ }
+
+ else
+ bitset_clear_all(lv->nodes);
+
+ phase_free(&lv->ph);
+ phase_init(&lv->ph, "liveness", lv->irg, PHASE_DEFAULT_GROWTH, lv_phase_data_init);
+ compute_liveness(lv);
+}
+
+
+void be_liveness_free(be_lv_t *lv)
+{
+ unregister_hook(hook_node_info, &lv->hook_info);
+ phase_free(&lv->ph);
+ bitset_free(lv->nodes);
+ free(lv);
+}
+
+void be_liveness_remove(be_lv_t *lv, ir_node *irn)
+{
+ unsigned idx = get_irn_idx(irn);
+ struct _lv_walker_t w;
+
+ /*
+ * Removes a single irn from the liveness information.
+ * Since an irn can only be live at blocks dominated by the block of its
+ * definition, we only have to process that dominance subtree.
+ */
+ w.lv = lv;
+ w.data = irn;
+ dom_tree_walk(get_nodes_block(irn), lv_remove_irn_walker, NULL, &w);
+ if(idx <= bitset_size(lv->nodes))
+ bitset_clear(lv->nodes, idx);
+}
+
+void be_liveness_introduce(be_lv_t *lv, ir_node *irn)
+{
+ struct _lv_walker_t w;
+ w.lv = lv;
+ w.data = bitset_malloc(get_irg_last_idx(lv->irg));
+ liveness_for_node(irn, &w);
+ bitset_free(w.data);
+}
+
+void be_liveness_update(be_lv_t *lv, ir_node *irn)
+{
+ be_liveness_remove(lv, irn);
+ be_liveness_introduce(lv, irn);
+}
+
+static void lv_add_missing_walker(ir_node *irn, void *data)
+{
+ struct _lv_walker_t *w = data;
+ if(!is_Block(irn) && !bitset_contains_irn(w->lv->nodes, irn)) {
+ liveness_for_node(irn, w);
+ }
+}
+
+void be_liveness_add_missing(be_lv_t *lv)
+{
+ struct _lv_walker_t w;
+ w.lv = lv;
+ w.data = bitset_malloc(get_irg_last_idx(lv->irg));
+ irg_walk_graph(lv->irg, lv_add_missing_walker, NULL, &w);
+ bitset_free(w.data);
+}
+
+static void lv_check_walker(ir_node *bl, void *data)
+{
+ struct _lv_walker_t *w = data;
+ be_lv_t *lv = w->lv;
+ be_lv_t *fresh = w->data;
+
+ struct _be_lv_info_t *curr = phase_get_irn_data(&lv->ph, bl);
+ struct _be_lv_info_t *fr = phase_get_irn_data(&fresh->ph, bl);
+
+ if(!fr && curr && curr[0].u.head.n_members > 0) {
+ unsigned i;
+
+ ir_fprintf(stderr, "%+F liveness should be empty but current liveness contains:\n", bl);
+ for(i = 0; i < curr[0].u.head.n_members; ++i) {
+ ir_fprintf(stderr, "\t%+F\n", get_idx_irn(lv->irg, curr[1 + i].u.node.idx));
+ }
+ }
+
+ else if(curr) {
+ unsigned n_curr = curr[0].u.head.n_members;
+ unsigned n_fresh = fr[0].u.head.n_members;
+
+ unsigned i;
+
+ if(n_curr != n_fresh) {
+ ir_fprintf(stderr, "%+F: liveness set sizes differ. curr %d, correct %d\n", bl, n_curr, n_fresh);
+
+ ir_fprintf(stderr, "current:\n");
+ for(i = 0; i < n_curr; ++i) {
+ struct _be_lv_info_node_t *n = &curr[1 + i].u.node;
+ ir_fprintf(stderr, "%+F %u %+F %s\n", bl, i, get_idx_irn(lv->irg, n->idx), lv_flags_to_str(n->flags));
+ }
+
+ ir_fprintf(stderr, "correct:\n");
+ for(i = 0; i < n_fresh; ++i) {
+ struct _be_lv_info_node_t *n = &fr[1 + i].u.node;
+ ir_fprintf(stderr, "%+F %u %+F %s\n", bl, i, get_idx_irn(lv->irg, n->idx), lv_flags_to_str(n->flags));
+ }
+ }
+ }
+}
+
+void be_liveness_check(be_lv_t *lv)
+{
+ struct _lv_walker_t w;
+ be_lv_t *fresh = be_liveness(lv->irg);
+
+ w.lv = lv;
+ w.data = fresh;
+ irg_block_walk_graph(lv->irg, lv_check_walker, NULL, &w);
+ be_liveness_free(fresh);
+}
+
+
+static void lv_dump_block_walker(ir_node *irn, void *data)
+{
+ struct _lv_walker_t *w = data;
+ if(is_Block(irn))
+ lv_dump_block(w->lv, w->data, irn);
+}
+
+
+/* Dump the liveness information for a graph. */
+void be_liveness_dump(const be_lv_t *lv, FILE *f)
+{
+ struct _lv_walker_t w;
+
+ w.lv = (be_lv_t *) lv;
+ w.data = f;
+ irg_block_walk_graph(lv->irg, lv_dump_block_walker, NULL, &w);
+}
+
+/* Dump the liveness information for a graph. */
+void be_liveness_dumpto(const be_lv_t *lv, const char *cls_name)
+{
+ FILE *f;
+ char buf[128];
+ ir_snprintf(buf, sizeof(buf), "%F_%s-live.txt", lv->irg, cls_name);
+ if((f = fopen(buf, "wt")) != NULL) {
+ be_liveness_dump(lv, f);
+ fclose(f);
+ }
+}
+
+/**
+ * Walker: checks the every predecessors of a node dominate
+ * the note.
+ */
+static void dom_check(ir_node *irn, void *data)
+{
+ int *problem_found = data;
+
+ if(!is_Block(irn) && irn != get_irg_end(get_irn_irg(irn))) {
+ int i, n;
+ ir_node *bl = get_nodes_block(irn);
+
+ for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
+ ir_node *op = get_irn_n(irn, i);
+ ir_node *def_bl = get_nodes_block(op);
+ ir_node *use_bl = bl;
+
+ if(is_Phi(irn))
+ use_bl = get_Block_cfgpred_block(bl, i);
+
+ if(get_irn_opcode(use_bl) != iro_Bad
+ && get_irn_opcode(def_bl) != iro_Bad
+ && !block_dominates(def_bl, use_bl)) {
+ ir_fprintf(stderr, "Verify warning: %+F in %+F must dominate %+F for user %+F (%s)\n", op, def_bl, use_bl, irn, get_irg_dump_name(get_irn_irg(op)));
+ *problem_found = 1;
+ }
+ }
+ }
+}
+
+/* Check, if the SSA dominance property is fulfilled. */
+int be_check_dominance(ir_graph *irg)
+{
+ int problem_found = 0;
+
+ irg_walk_graph(irg, dom_check, NULL, &problem_found);
+
+ return !problem_found;
+}
+
+pset *be_liveness_transfer(const arch_env_t *arch_env, const arch_register_class_t *cls, ir_node *irn, pset *live)
+{
+ int i, n;
+ ir_node *x;
+ FIRM_DBG_REGISTER(firm_dbg_module_t *dbg, DBG_MODULE);
+
+ DEBUG_ONLY(
+ DBG((dbg, LEVEL_1, "%+F\n", irn));
+ for(x = pset_first(live); x; x = pset_next(live))
+ DBG((dbg, LEVEL_1, "\tlive: %+F\n", x));
+ )
+
+ /* You should better break out of your loop when hitting the first phi function. */
+ assert(!is_Phi(irn) && "liveness_transfer produces invalid results for phi nodes");
+
+ if(arch_irn_consider_in_reg_alloc(arch_env, cls, irn)) {
+ ir_node *del = pset_remove_ptr(live, irn);
+ assert(irn == del);
+ }
+
+ for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
+ ir_node *op = get_irn_n(irn, i);
+
+ if(arch_irn_consider_in_reg_alloc(arch_env, cls, op))
+ pset_insert_ptr(live, op);
+ }
+
+ return live;
+}
+
+pset *be_liveness_end_of_block(const be_lv_t *lv, const arch_env_t *arch_env, const arch_register_class_t *cls, const ir_node *bl, pset *live)
+{
+ int i;
+ be_lv_foreach(lv, bl, be_lv_state_end, i) {
+ ir_node *irn = be_lv_get_irn(lv, bl, i);
+ if(arch_irn_consider_in_reg_alloc(arch_env, cls, irn))
+ pset_insert_ptr(live, irn);
+ }
+
+ return live;
+}
+
+pset *be_liveness_nodes_live_at(const be_lv_t *lv, const arch_env_t *arch_env, const arch_register_class_t *cls, const ir_node *pos, pset *live)
+{
+ const ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
+ ir_node *irn;
+
+ be_liveness_end_of_block(lv, arch_env, cls, bl, live);
+ sched_foreach_reverse(bl, irn) {
+ /*
+ * If we encounter the node we want to insert the Perm after,
+ * exit immediately, so that this node is still live
+ */
+ if(irn == pos)
+ return live;
+
+ be_liveness_transfer(arch_env, cls, irn, live);
+ }
+
+ return live;