+/**
+ * Walks over all nodes to assure register constraints.
+ *
+ * @param birg The birg structure containing the irg
+ */
+void assure_constraints(be_irg_t *birg) {
+ ir_graph *irg = be_get_birg_irg(birg);
+ const arch_env_t *arch_env = be_get_birg_arch_env(birg);
+ constraint_env_t cenv;
+ op_copy_assoc_t *entry;
+ ir_node **nodes;
+ FIRM_DBG_REGISTER(firm_dbg_module_t *mod, "firm.be.lower.constr");
+
+ be_assure_dom_front(birg);
+
+ DEBUG_ONLY(cenv.dbg = mod;)
+ cenv.birg = birg;
+ cenv.op_set = new_pset(cmp_op_copy_assoc, 16);
+ obstack_init(&cenv.obst);
+
+ irg_walk_blkwise_graph(irg, NULL, assure_constraints_walker, &cenv);
+
+ /* melt copykeeps, pointing to projs of */
+ /* the same mode_T node and keeping the */
+ /* same operand */
+ melt_copykeeps(&cenv);
+
+ /* for all */
+ foreach_pset(cenv.op_set, entry) {
+ int n;
+ ir_node *cp;
+ ir_nodeset_iterator_t iter;
+ be_ssa_construction_env_t senv;
+
+ n = ir_nodeset_size(&entry->copies);
+ nodes = alloca(n * sizeof(nodes[0]));
+
+ /* put the node in an array */
+ DBG((mod, LEVEL_1, "introduce copies for %+F ", entry->op));
+
+ /* collect all copies */
+ n = 0;
+ foreach_ir_nodeset(&entry->copies, cp, iter) {
+ nodes[n++] = cp;
+ DB((mod, LEVEL_1, ", %+F ", cp));
+ }
+
+ DB((mod, LEVEL_1, "\n"));
+
+ /* introduce the copies for the operand and it's copies */
+ be_ssa_construction_init(&senv, birg);
+ be_ssa_construction_add_copy(&senv, entry->op);
+ be_ssa_construction_add_copies(&senv, nodes, n);
+ be_ssa_construction_fix_users(&senv, entry->op);
+ be_ssa_construction_destroy(&senv);
+
+ /* Could be that not all CopyKeeps are really needed, */
+ /* so we transform unnecessary ones into Keeps. */
+ foreach_ir_nodeset(&entry->copies, cp, iter) {
+ if (be_is_CopyKeep(cp) && get_irn_n_edges(cp) < 1) {
+ ir_node *keep;
+ int n = get_irn_arity(cp);
+
+ keep = be_new_Keep(arch_get_irn_reg_class(arch_env, cp, -1),
+ irg, get_nodes_block(cp), n, (ir_node **)&get_irn_in(cp)[1]);
+ sched_add_before(cp, keep);
+
+ /* Set all ins (including the block) of the CopyKeep BAD to keep the verifier happy. */
+ sched_remove(cp);
+ be_kill_node(cp);
+ }
+ }
+
+ ir_nodeset_destroy(&entry->copies);
+ }
+
+ del_pset(cenv.op_set);
+ obstack_free(&cenv.obst, NULL);
+ be_liveness_invalidate(be_get_birg_liveness(birg));
+}
+
+
+/**
+ * Push nodes that do not need to be permed through the Perm.
+ * This is commonly a reload cascade at block ends.
+ * @note This routine needs interference.
+ * @note Probably, we can implement it a little more efficient.
+ * Especially searching the frontier lazily might be better.
+ * @param perm The perm.
+ * @param data The walker data (lower_env_t).
+ * @return 1, if there is something left to perm over.
+ * 0, if removed the complete perm.
+ */
+static int push_through_perm(ir_node *perm, void *data)
+{
+ lower_env_t *env = data;
+ const arch_env_t *aenv = env->arch_env;
+
+ ir_graph *irg = get_irn_irg(perm);
+ ir_node *bl = get_nodes_block(perm);
+ int n = get_irn_arity(perm);
+ int *map = alloca(n * sizeof(map[0]));
+ ir_node **projs = alloca(n * sizeof(projs[0]));
+ bitset_t *keep = bitset_alloca(n);
+ ir_node *frontier = sched_first(bl);
+ FIRM_DBG_REGISTER(firm_dbg_module_t *mod, "firm.be.lower.permmove");
+
+ int i, new_size, n_keep;
+ const ir_edge_t *edge;
+ ir_node *last_proj, *irn;
+ const arch_register_class_t *cls;
+
+ DBG((mod, LEVEL_1, "perm move %+F irg %+F\n", perm, irg));
+
+ /* get some proj and find out the register class of the proj. */
+ foreach_out_edge (perm, edge) {
+ last_proj = get_edge_src_irn(edge);
+ cls = arch_get_irn_reg_class(aenv, last_proj, -1);
+ assert(is_Proj(last_proj));
+ break;
+ }
+
+ /* find the point in the schedule after which the
+ * potentially movable nodes must be defined.
+ * A perm will only be pushed up to first instruction
+ * which lets an operand of itself die. */
+
+ sched_foreach_reverse_from (sched_prev(perm), irn) {
+ for(i = get_irn_arity(irn) - 1; i >= 0; --i) {
+ ir_node *op = get_irn_n(irn, i);
+ if(arch_irn_consider_in_reg_alloc(aenv, cls, op)
+ && !values_interfere(env->birg, op, last_proj)) {
+ frontier = sched_next(irn);
+ goto found_front;
+ }
+ }
+ }
+found_front:
+
+ DBG((mod, LEVEL_2, "\tfrontier: %+F\n", frontier));
+
+ foreach_out_edge (perm, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+ int nr = get_Proj_proj(proj);
+ ir_node *op = get_irn_n(perm, nr);
+
+ assert(nr < n);
+
+ /* we will need the last Proj as an insertion point
+ * for the instruction(s) pushed through the Perm */
+ if (sched_comes_after(last_proj, proj))
+ last_proj = proj;
+
+ projs[nr] = proj;
+
+ bitset_set(keep, nr);
+ if (!is_Proj(op) && get_nodes_block(op) == bl
+ && (op == frontier || sched_comes_after(frontier, op))) {
+ for (i = get_irn_arity(op) - 1; i >= 0; --i) {
+ ir_node *opop = get_irn_n(op, i);
+ if (!arch_irn_consider_in_reg_alloc(aenv, cls, opop)) {
+ bitset_clear(keep, nr);
+ break;
+ }
+ }
+ }
+ }
+
+ n_keep = bitset_popcnt(keep);
+
+ /* well, we could not push enything through the perm */
+ if (n_keep == n)
+ return 1;
+
+ assert(is_Proj(last_proj));
+
+ DBG((mod, LEVEL_2, "\tkeep: %d, total: %d, mask: %b\n", n_keep, n, keep));
+ last_proj = sched_next(last_proj);
+ for (new_size = 0, i = 0; i < n; ++i) {
+ ir_node *proj = projs[i];
+
+ if (bitset_is_set(keep, i)) {
+ map[i] = new_size++;
+ set_Proj_proj(proj, map[i]);
+ DBG((mod, LEVEL_1, "\targ %d remap to %d\n", i, map[i]));
+ }
+
+ else {
+ ir_node *move = get_irn_n(perm, i);
+
+ DBG((mod, LEVEL_2, "\tmoving %+F before %+F, killing %+F\n", move, last_proj, proj));
+
+ /* move the movable node in front of the Perm */
+ sched_remove(move);
+ sched_add_before(last_proj, move);
+
+ /* give it the proj's register */
+ arch_set_irn_register(aenv, move, arch_get_irn_register(aenv, proj));
+
+ /* reroute all users of the proj to the moved node. */
+ edges_reroute(proj, move, irg);
+
+ /* and like it to bad so it is no more in the use array of the perm */
+ set_Proj_pred(proj, get_irg_bad(irg));
+
+ map[i] = -1;
+ }
+
+ }
+
+ if (n_keep > 0)
+ be_Perm_reduce(perm, new_size, map);
+
+ return n_keep > 0;