- pset *copies = pset_new_ptr(2 * n);
- pset *copy_blocks = pset_new_ptr(2 * n);
- int save_optimize = get_optimize();
- int save_normalize = get_opt_normalize();
- firm_dbg_module_t *dbg = DBG_MODULE;
- int i;
-
- firm_dbg_set_mask(dbg, DBG_LEVEL);
- DBG((dbg, LEVEL_1, "Introducing following copies of %+F\n", orig));
-
- /* Fill the sets. */
- pset_insert_ptr(copies, orig);
- pset_insert_ptr(copy_blocks, get_nodes_block(orig));
-
- /*
- * All phis using the original value are also copies of it
- * and must be present in the copies set.
- */
- for(i = 0; i < n; ++i) {
- DBG((dbg, LEVEL_1,
- " %+F in block %+F\n", copy_nodes[i], get_nodes_block(copy_nodes[i])));
- pset_insert_ptr(copies, copy_nodes[i]);
- pset_insert_ptr(copy_blocks, get_nodes_block(copy_nodes[i]));
- }
-
- /*
- * Disable optimization so that the phi functions do not
- * disappear.
- */
- set_optimize(0);
- set_opt_normalize(0);
-
- /*
- * Place the phi functions and reroute the usages.
- */
- place_phi_functions(orig, copies, copy_blocks, info);
- fix_usages(orig, copies, copy_blocks);
- remove_odd_phis(copies);
-
- /* reset the optimizations */
- set_optimize(save_optimize);
- set_opt_normalize(save_normalize);
-
- del_pset(copies);
- del_pset(copy_blocks);
+ int n = pset_count(nodes);
+ pset *blocks = pset_new_ptr(n);
+ pset *phi_blocks = pset_new_ptr(n);
+ int save_optimize = get_optimize();
+ int save_normalize = get_opt_normalize();
+ int phis_set_created = 0;
+ FIRM_DBG_REGISTER(firm_dbg_module_t *dbg, DBG_MODULE);
+
+ ir_node *irn;
+
+ /* We need to collect the phi functions to compute their liveness. */
+ if(lv && !phis) {
+ phis_set_created = 1;
+ phis = pset_new_ptr_default();
+ }
+
+ DBG((dbg, LEVEL_1, "Introducing following copies for:\n"));
+
+ /* Fill the sets. */
+ for(irn = pset_first(nodes); irn; irn = pset_next(nodes)) {
+ ir_node *bl = get_nodes_block(irn);
+ pset_insert_ptr(blocks, bl);
+ DBG((dbg, LEVEL_1, "\t%+F in %+F\n", irn, bl));
+ }
+
+ /*
+ * Disable optimization so that the phi functions do not
+ * disappear.
+ */
+ set_optimize(0);
+ set_opt_normalize(0);
+
+ /*
+ * Place the phi functions and reroute the usages.
+ */
+ determine_phi_blocks(nodes, blocks, phi_blocks, df);
+ fix_usages(nodes, blocks, phi_blocks, phis, ignore_uses);
+
+ /* reset the optimizations */
+ set_optimize(save_optimize);
+ set_opt_normalize(save_normalize);
+
+ del_pset(phi_blocks);
+ del_pset(blocks);
+
+ /* Recompute the liveness (if wanted) of the original nodes, the copies and the inserted phis. */
+ if(lv) {
+#if 1
+ foreach_pset(nodes, irn)
+ be_liveness_update(lv, irn);
+
+ foreach_pset(phis, irn)
+ be_liveness_introduce(lv, irn);
+#else
+ be_liveness_recompute(lv);
+#endif
+ }
+
+ /* Free the phi set of we created it. */
+ if(phis_set_created)
+ del_pset(phis);
+
+}
+
+void be_ssa_constr_set_phis(be_dom_front_info_t *df, be_lv_t *lv, pset *nodes, pset *phis)
+{
+ pset *empty_set = be_empty_set();
+ be_ssa_constr_set_phis_ignore(df, lv, nodes, phis, empty_set);
+}
+
+void be_ssa_constr_set_ignore(be_dom_front_info_t *df, be_lv_t *lv, pset *nodes, pset *ignore_uses)
+{
+ be_ssa_constr_set_phis_ignore(df, lv, nodes, NULL, ignore_uses);
+}
+
+void be_ssa_constr_set(be_dom_front_info_t *info, be_lv_t *lv, pset *nodes)
+{
+ pset *empty_set = be_empty_set();
+ be_ssa_constr_set_ignore(info, lv, nodes, empty_set);
+}
+
+/*
+ ___ _ ____
+ |_ _|_ __ ___ ___ _ __| |_ | _ \ ___ _ __ _ __ ___
+ | || '_ \/ __|/ _ \ '__| __| | |_) / _ \ '__| '_ ` _ \
+ | || | | \__ \ __/ | | |_ | __/ __/ | | | | | | |
+ |___|_| |_|___/\___|_| \__| |_| \___|_| |_| |_| |_|
+
+*/
+
+ir_node *insert_Perm_after(const arch_env_t *arch_env,
+ be_lv_t *lv,
+ const arch_register_class_t *cls,
+ be_dom_front_info_t *dom_front,
+ ir_node *pos)
+{
+ ir_node *bl = is_Block(pos) ? pos : get_nodes_block(pos);
+ ir_graph *irg = get_irn_irg(bl);
+ pset *live = pset_new_ptr_default();
+ FIRM_DBG_REGISTER(firm_dbg_module_t *dbg, "be.node");
+
+ ir_node *curr, *irn, *perm, **nodes;
+ int i, n;
+
+ DBG((dbg, LEVEL_1, "Insert Perm after: %+F\n", pos));
+
+ be_liveness_nodes_live_at(lv, arch_env, cls, pos, live);
+
+ n = pset_count(live);
+
+ if(n == 0) {
+ del_pset(live);
+ return NULL;
+ }
+
+ nodes = xmalloc(n * sizeof(nodes[0]));
+
+ DBG((dbg, LEVEL_1, "live:\n"));
+ for(irn = pset_first(live), i = 0; irn; irn = pset_next(live), i++) {
+ DBG((dbg, LEVEL_1, "\t%+F\n", irn));
+ nodes[i] = irn;
+ }
+ del_pset(live);
+
+ perm = be_new_Perm(cls, irg, bl, n, nodes);
+ sched_add_after(pos, perm);
+ free(nodes);
+
+ curr = perm;
+ for (i = 0; i < n; ++i) {
+ ir_node *copies[2];
+ ir_node *perm_op = get_irn_n(perm, i);
+ const arch_register_t *reg = arch_get_irn_register(arch_env, perm_op);
+
+ ir_mode *mode = get_irn_mode(perm_op);
+ ir_node *proj = new_r_Proj(irg, bl, perm, mode, i);
+ arch_set_irn_register(arch_env, proj, reg);
+
+ sched_add_after(curr, proj);
+ curr = proj;
+
+ copies[0] = perm_op;
+ copies[1] = proj;
+
+ be_ssa_constr(dom_front, lv, 2, copies);
+ }
+
+ return perm;
+}
+
+struct _elr_closure_t {
+ struct obstack obst;
+ const be_chordal_env_t *cenv;
+};
+
+static void elr_split_walker(ir_node *bl, void *data)
+{
+ struct _elr_closure_t *c = data;
+ const be_chordal_env_t *cenv = c->cenv;
+ const arch_env_t *aenv = cenv->birg->main_env->arch_env;
+ be_lv_t *lv = cenv->birg->lv;
+ be_dom_front_info_t *dom_front = cenv->birg->dom_front;
+ be_insn_t *insn;
+ be_insn_env_t ie;
+
+ be_insn_env_init(&ie, cenv->birg, cenv->cls, &c->obst);
+
+ for(insn = be_scan_insn(&ie, sched_first(bl)); !is_Block(insn->irn); insn = be_scan_insn(&ie, insn->next_insn)) {
+ ir_node *pred = sched_prev(insn->irn);
+ if(!is_Block(pred) && !is_Phi(insn->irn))
+ insert_Perm_after(aenv, lv, cenv->cls, dom_front, insn->irn);
+ }
+}
+
+void extreme_liverange_splitting(struct _be_chordal_env_t *cenv)
+{
+ struct _elr_closure_t c;
+ be_lv_t *lv = cenv->birg->lv;
+
+ c.cenv = cenv;
+ obstack_init(&c.obst);
+ be_liveness_recompute(lv);
+ irg_block_walk_graph(cenv->irg, elr_split_walker, NULL, &c);
+ be_liveness_recompute(lv);
+ obstack_free(&c.obst, NULL);
+}
+
+/**
+ * Post-block-walker: Find blocks containing only one jump and
+ * remove them.
+ */
+static void remove_empty_block(ir_node *block, void *data) {
+ const ir_edge_t *edge, *next;
+ ir_node *node;
+ int *changed = data;
+ ir_node *jump = NULL;
+
+ assert(is_Block(block));
+
+ if (get_Block_n_cfgpreds(block) != 1)
+ return;
+
+ sched_foreach(block, node) {
+ if (! is_Jmp(node))
+ return;
+ if (jump != NULL) {
+ /* we should never have 2 jumps in a block */
+ assert(0 && "We should never have 2 jumps in a block");
+ return;
+ }
+ jump = node;
+ }
+
+ if (jump == NULL)
+ return;
+
+ node = get_Block_cfgpred(block, 0);
+ foreach_out_edge_safe(jump, edge, next) {
+ ir_node *block = get_edge_src_irn(edge);
+ int pos = get_edge_src_pos(edge);
+
+ set_irn_n(block, pos, node);
+ }
+
+ set_Block_cfgpred(block, 0, new_Bad());
+ sched_remove(jump);
+ *changed = 1;
+}
+
+/* removes basic blocks that just contain a jump instruction */
+int be_remove_empty_blocks(ir_graph *irg) {
+ int changed = 0;
+
+ irg_block_walk_graph(irg, remove_empty_block, NULL, &changed);
+ if (changed) {
+ /* invalidate analysis info */
+ set_irg_doms_inconsistent(irg);
+ set_irg_extblk_inconsistent(irg);
+ set_irg_outs_inconsistent(irg);
+ }
+ return changed;