+ for (i = 0; pairs[i].checked; ++i) {}
+ get_perm_cycle(&cycle, pairs, n, i);
+
+ DB((dbg, LEVEL_1, "%+F: following %s created:\n ", irn, cycle.type == PERM_CHAIN ? "chain" : "cycle"));
+ for (j = 0; j < cycle.n_elems; j++) {
+ DB((dbg, LEVEL_1, " %s", cycle.elems[j]->name));
+ }
+ DB((dbg, LEVEL_1, "\n"));
+
+ if (n == 2 && cycle.type == PERM_CYCLE) {
+ /* We don't need to do anything if we have a Perm with two elements
+ * which represents a cycle, because those nodes already represent
+ * exchange nodes */
+ keep_perm = 1;
+ } else {
+ /* TODO: - iff PERM_CYCLE && do_copy -> determine free temp reg and
+ * insert copy to/from it before/after the copy cascade (this
+ * reduces the cycle into a chain) */
+
+ /* build copy/swap nodes from back to front */
+ for (i = cycle.n_elems - 2; i >= 0; i--) {
+ ir_node *arg1 = get_node_for_in_register(pairs, n, cycle.elems[i]);
+ ir_node *arg2 = get_node_for_in_register(pairs, n, cycle.elems[i + 1]);
+
+ ir_node *res1 = get_node_for_out_register(pairs, n, cycle.elems[i]);
+ ir_node *res2 = get_node_for_out_register(pairs, n, cycle.elems[i + 1]);
+ /* If we have a cycle and don't copy: we need to create exchange
+ * nodes
+ * NOTE: An exchange node is a perm node with 2 INs and 2 OUTs
+ * IN_1 = in node with register i
+ * IN_2 = in node with register i + 1
+ * OUT_1 = out node with register i + 1
+ * OUT_2 = out node with register i */
+ if (cycle.type == PERM_CYCLE && !do_copy) {
+ ir_node *in[2];
+ ir_node *cpyxchg;
+
+ in[0] = arg1;
+ in[1] = arg2;
+
+ /* At this point we have to handle the following problem:
+ *
+ * If we have a cycle with more than two elements, then this
+ * could correspond to the following Perm node:
+ *
+ * +----+ +----+ +----+
+ * | r1 | | r2 | | r3 |
+ * +-+--+ +-+--+ +--+-+
+ * | | |
+ * | | |
+ * +-+--------+---------+-+
+ * | Perm |
+ * +-+--------+---------+-+
+ * | | |
+ * | | |
+ * +-+--+ +-+--+ +--+-+
+ * |Proj| |Proj| |Proj|
+ * | r2 | | r3 | | r1 |
+ * +----+ +----+ +----+
+ *
+ * This node is about to be split up into two 2x Perm's for
+ * which we need 4 Proj's and the one additional Proj of the
+ * first Perm has to be one IN of the second. So in general
+ * we need to create one additional Proj for each "middle"
+ * Perm and set this to one in node of the successor Perm. */
+
+ DBG((dbg, LEVEL_1, "%+F creating exchange node (%+F, %s) and (%+F, %s) with\n",
+ irn, arg1, cycle.elems[i]->name, arg2, cycle.elems[i + 1]->name));
+ DBG((dbg, LEVEL_1, "%+F (%+F, %s) and (%+F, %s)\n",
+ irn, res1, cycle.elems[i]->name, res2, cycle.elems[i + 1]->name));
+
+ cpyxchg = be_new_Perm(reg_class, irg, block, 2, in);
+
+ if (i > 0) {
+ /* cycle is not done yet */
+ int pidx = get_pairidx_for_in_regidx(pairs, n, cycle.elems[i]->index);
+
+ /* create intermediate proj */
+ res1 = new_r_Proj(irg, block, cpyxchg, get_irn_mode(res1), 0);
+
+ /* set as in for next Perm */
+ pairs[pidx].in_node = res1;
+ }
+
+ set_Proj_pred(res2, cpyxchg);
+ set_Proj_proj(res2, 0);
+ set_Proj_pred(res1, cpyxchg);
+ set_Proj_proj(res1, 1);
+
+ arch_set_irn_register(res2, cycle.elems[i + 1]);
+ arch_set_irn_register(res1, cycle.elems[i]);
+
+ /* insert the copy/exchange node in schedule after the magic schedule node (see above) */
+ sched_add_after(sched_point, cpyxchg);
+
+ DBG((dbg, LEVEL_1, "replacing %+F with %+F, placed new node after %+F\n", irn, cpyxchg, sched_point));
+
+ /* set the new scheduling point */
+ sched_point = res1;
+ } else {
+ ir_node *cpyxchg;
+
+ DBG((dbg, LEVEL_1, "%+F creating copy node (%+F, %s) -> (%+F, %s)\n",
+ irn, arg1, cycle.elems[i]->name, res2, cycle.elems[i + 1]->name));
+
+ cpyxchg = be_new_Copy(reg_class, irg, block, arg1);
+ arch_set_irn_register(cpyxchg, cycle.elems[i + 1]);
+
+ /* exchange copy node and proj */
+ exchange(res2, cpyxchg);
+
+ /* insert the copy/exchange node in schedule after the magic schedule node (see above) */
+ sched_add_after(sched_point, cpyxchg);
+
+ /* set the new scheduling point */
+ sched_point = cpyxchg;
+ }
+ }
+ }
+
+ free((void*)cycle.elems);
+ }
+
+ /* remove the perm from schedule */
+ if (!keep_perm) {
+ sched_remove(irn);
+ kill_node(irn);
+ }
+}
+
+
+
+static int has_irn_users(const ir_node *irn) {
+ return get_irn_out_edge_first_kind(irn, EDGE_KIND_NORMAL) != 0;
+}
+
+static ir_node *find_copy(ir_node *irn, ir_node *op)
+{
+ ir_node *cur_node;
+
+ for (cur_node = irn;;) {
+ cur_node = sched_prev(cur_node);
+ if (! be_is_Copy(cur_node))
+ return NULL;
+ if (be_get_Copy_op(cur_node) == op && arch_irn_is(cur_node, dont_spill))
+ return cur_node;
+ }
+}
+
+static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, constraint_env_t *env) {
+ ir_graph *irg;
+ ir_nodemap_t *op_set;
+ ir_node *block;
+ const arch_register_class_t *cls;
+ ir_node *keep, *cpy;
+ op_copy_assoc_t *entry;
+
+ if (arch_irn_is(other_different, ignore) ||
+ !mode_is_datab(get_irn_mode(other_different))) {
+ DBG((dbg_constr, LEVEL_1, "ignore constraint for %+F because other_irn is ignore or not a datab node\n", irn));
+ return;
+ }
+
+ irg = be_get_birg_irg(env->birg);
+ op_set = &env->op_set;
+ block = get_nodes_block(irn);
+ cls = arch_get_irn_reg_class(other_different, -1);
+
+ /* Make a not spillable copy of the different node */
+ /* this is needed because the different irn could be */
+ /* in block far far away */
+ /* The copy is optimized later if not needed */
+
+ /* check if already exists such a copy in the schedule immediately before */
+ cpy = find_copy(skip_Proj(irn), other_different);
+ if (! cpy) {
+ cpy = be_new_Copy(cls, irg, block, other_different);
+ be_node_set_flags(cpy, BE_OUT_POS(0), arch_irn_flags_dont_spill);
+ DBG((dbg_constr, LEVEL_1, "created non-spillable %+F for value %+F\n", cpy, other_different));
+ } else {
+ DBG((dbg_constr, LEVEL_1, "using already existing %+F for value %+F\n", cpy, other_different));
+ }
+
+ /* Add the Keep resp. CopyKeep and reroute the users */
+ /* of the other_different irn in case of CopyKeep. */
+ if (has_irn_users(other_different)) {
+ keep = be_new_CopyKeep_single(cls, irg, block, cpy, irn, get_irn_mode(other_different));
+ be_node_set_reg_class(keep, 1, cls);
+ } else {
+ ir_node *in[2];
+
+ in[0] = irn;
+ in[1] = cpy;
+ keep = be_new_Keep(cls, irg, block, 2, in);
+ }
+
+ DBG((dbg_constr, LEVEL_1, "created %+F(%+F, %+F)\n\n", keep, irn, cpy));
+
+ /* insert copy and keep into schedule */
+ assert(sched_is_scheduled(irn) && "need schedule to assure constraints");
+ if (! sched_is_scheduled(cpy))
+ sched_add_before(skip_Proj(irn), cpy);
+ sched_add_after(irn, keep);
+
+ /* insert the other different and it's copies into the map */
+ entry = ir_nodemap_get(op_set, other_different);
+ if (! entry) {
+ entry = obstack_alloc(&env->obst, sizeof(*entry));
+ entry->cls = cls;
+ ir_nodeset_init(&entry->copies);
+
+ ir_nodemap_insert(op_set, other_different, entry);
+ }
+
+ /* insert copy */
+ ir_nodeset_insert(&entry->copies, cpy);
+
+ /* insert keep in case of CopyKeep */
+ if (be_is_CopyKeep(keep))
+ ir_nodeset_insert(&entry->copies, keep);
+}
+
+/**
+ * Checks if node has a must_be_different constraint in output and adds a Keep
+ * then to assure the constraint.
+ *
+ * @param irn the node to check
+ * @param skipped_irn if irn is a Proj node, its predecessor, else irn
+ * @param env the constraint environment
+ */
+static void assure_different_constraints(ir_node *irn, ir_node *skipped_irn, constraint_env_t *env) {
+ const arch_register_req_t *req = arch_get_register_req(irn, -1);
+
+ if (arch_register_req_is(req, must_be_different)) {
+ const unsigned other = req->other_different;
+ int i;
+
+ if (arch_register_req_is(req, should_be_same)) {
+ const unsigned same = req->other_same;
+
+ if (is_po2(other) && is_po2(same)) {
+ int idx_other = ntz(other);
+ int idx_same = ntz(same);
+
+ /*
+ * We can safely ignore a should_be_same x must_be_different y
+ * IFF both inputs are equal!
+ */
+ if (get_irn_n(skipped_irn, idx_other) == get_irn_n(skipped_irn, idx_same)) {
+ return;
+ }
+ }
+ }
+ for (i = 0; 1U << i <= other; ++i) {
+ if (other & (1U << i)) {
+ ir_node *different_from = get_irn_n(skipped_irn, i);
+ gen_assure_different_pattern(irn, different_from, env);
+ }
+ }
+ }
+}
+
+/**
+ * Calls the functions to assure register constraints.
+ *
+ * @param block The block to be checked
+ * @param walk_env The walker environment
+ */
+static void assure_constraints_walker(ir_node *block, void *walk_env) {
+ ir_node *irn;
+
+ sched_foreach_reverse(block, irn) {
+ ir_mode *mode = get_irn_mode(irn);
+
+ if (mode == mode_T) {
+ const ir_edge_t *edge;
+
+ foreach_out_edge(irn, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+
+ mode = get_irn_mode(proj);
+ if (mode_is_datab(mode))
+ assure_different_constraints(proj, irn, walk_env);
+ }
+ } else if (mode_is_datab(mode)) {
+ assure_different_constraints(irn, irn, walk_env);