+ if (! keep_perm) {
+ sched_remove(irn);
+ kill_node(irn);
+ }
+}
+
+
+
+static int has_irn_users(const ir_node *irn) {
+ return get_irn_out_edge_first_kind(irn, EDGE_KIND_NORMAL) != 0;
+}
+
+/**
+ * Skip all Proj nodes.
+ */
+static INLINE ir_node *belower_skip_proj(ir_node *irn) {
+ while(is_Proj(irn))
+ irn = get_Proj_pred(irn);
+ return irn;
+}
+
+static ir_node *find_copy(constraint_env_t *env, ir_node *irn, ir_node *op) {
+ const arch_env_t *arch_env = be_get_birg_arch_env(env->birg);
+ ir_node *block = get_nodes_block(irn);
+ ir_node *cur_node;
+
+ for (cur_node = sched_prev(irn);
+ ! is_Block(cur_node) && be_is_Copy(cur_node) && get_nodes_block(cur_node) == block;
+ cur_node = sched_prev(cur_node))
+ {
+ if (be_get_Copy_op(cur_node) == op && arch_irn_is(arch_env, cur_node, dont_spill))
+ return cur_node;
+ }
+
+ return NULL;
+}
+
+static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, constraint_env_t *env) {
+ be_irg_t *birg = env->birg;
+ ir_graph *irg = be_get_birg_irg(birg);
+ pset *op_set = env->op_set;
+ const arch_env_t *arch_env = be_get_birg_arch_env(birg);
+ ir_node *block = get_nodes_block(irn);
+ const arch_register_class_t *cls = arch_get_irn_reg_class(arch_env, other_different, -1);
+ ir_node *in[2], *keep, *cpy;
+ op_copy_assoc_t key, *entry;
+ DEBUG_ONLY(firm_dbg_module_t *mod = env->dbg;)
+
+ if (arch_irn_is(arch_env, other_different, ignore) || ! mode_is_datab(get_irn_mode(other_different))) {
+ DBG((mod, LEVEL_1, "ignore constraint for %+F because other_irn is ignore or not a datab node\n", irn));
+ return;
+ }
+
+ /* Make a not spillable copy of the different node */
+ /* this is needed because the different irn could be */
+ /* in block far far away */
+ /* The copy is optimized later if not needed */
+
+ /* check if already exists such a copy in the schedule immediately before */
+ cpy = find_copy(env, belower_skip_proj(irn), other_different);
+ if (! cpy) {
+ cpy = be_new_Copy(cls, irg, block, other_different);
+ be_node_set_flags(cpy, BE_OUT_POS(0), arch_irn_flags_dont_spill);
+ DBG((mod, LEVEL_1, "created non-spillable %+F for value %+F\n", cpy, other_different));
+ }
+ else {
+ DBG((mod, LEVEL_1, "using already existing %+F for value %+F\n", cpy, other_different));
+ }
+
+ in[0] = irn;
+ in[1] = cpy;
+
+ /* Add the Keep resp. CopyKeep and reroute the users */
+ /* of the other_different irn in case of CopyKeep. */
+ if (has_irn_users(other_different)) {
+ keep = be_new_CopyKeep_single(cls, irg, block, cpy, irn, get_irn_mode(other_different));
+ be_node_set_reg_class(keep, 1, cls);
+ }
+ else {
+ keep = be_new_Keep(cls, irg, block, 2, in);
+ }
+
+ DBG((mod, LEVEL_1, "created %+F(%+F, %+F)\n\n", keep, irn, cpy));
+
+ /* insert copy and keep into schedule */
+ assert(sched_is_scheduled(irn) && "need schedule to assure constraints");
+ if (! sched_is_scheduled(cpy))
+ sched_add_before(belower_skip_proj(irn), cpy);
+ sched_add_after(irn, keep);
+
+ /* insert the other different and it's copies into the set */
+ key.op = other_different;
+ entry = pset_find(op_set, &key, hash_irn(other_different));
+
+ if (! entry) {
+ entry = obstack_alloc(&env->obst, sizeof(*entry));
+ ir_nodeset_init(&entry->copies);
+ entry->op = other_different;
+ entry->cls = cls;
+ }
+
+ /* insert copy */
+ ir_nodeset_insert(&entry->copies, cpy);
+
+ /* insert keep in case of CopyKeep */
+ if (be_is_CopyKeep(keep)) {
+ ir_nodeset_insert(&entry->copies, keep);
+ }
+
+ pset_insert(op_set, entry, hash_irn(other_different));
+}
+
+/**
+ * Checks if node has a should_be_different constraint in output
+ * and adds a Keep then to assure the constraint.
+ */
+static void assure_different_constraints(ir_node *irn, constraint_env_t *env) {
+ const arch_register_req_t *req;
+ const arch_env_t *arch_env = be_get_birg_arch_env(env->birg);
+
+ req = arch_get_register_req(arch_env, irn, -1);
+
+ if (arch_register_req_is(req, should_be_different)) {
+ const unsigned other = req->other_different;
+ int i;
+
+ if (arch_register_req_is(req, should_be_same)) {
+ const unsigned same = req->other_same;
+
+ if (is_po2(other) && is_po2(same)) {
+ int idx_other = ntz(other);
+ int idx_same = ntz(same);
+
+ /*
+ * We can safely ignore a should_be_same x should_be_different y
+ * IFF both inputs are equal!
+ */
+ if (get_irn_n(irn, idx_other) == get_irn_n(irn, idx_same)) {
+ return;
+ }
+ }
+ }
+ for (i = 0; 1U << i <= other; ++i) {
+ if (other & (1U << i)) {
+ ir_node *different_from = get_irn_n(belower_skip_proj(irn), i);
+ gen_assure_different_pattern(irn, different_from, env);
+ }
+ }
+ }
+}
+
+/**
+ * Calls the functions to assure register constraints.
+ *
+ * @param irn The node to be checked for lowering
+ * @param walk_env The walker environment
+ */
+static void assure_constraints_walker(ir_node *irn, void *walk_env) {
+ if (is_Block(irn))
+ return;
+
+ if (sched_is_scheduled(irn) && mode_is_datab(get_irn_mode(irn)))
+ assure_different_constraints(irn, walk_env);
+}
+
+/**
+ * Melt all copykeeps pointing to the same node
+ * (or Projs of the same node), copying the same operand.
+ */
+static void melt_copykeeps(constraint_env_t *cenv) {
+ be_irg_t *birg = cenv->birg;
+ ir_graph *irg = be_get_birg_irg(birg);
+ op_copy_assoc_t *entry;
+
+ /* for all */
+ foreach_pset(cenv->op_set, entry) {
+ int idx, num_ck;
+ ir_node *cp;
+ struct obstack obst;
+ ir_nodeset_iterator_t iter;
+ ir_node **ck_arr, **melt_arr;
+
+ obstack_init(&obst);
+
+ /* collect all copykeeps */
+ num_ck = idx = 0;
+ foreach_ir_nodeset(&entry->copies, cp, iter) {
+ if (be_is_CopyKeep(cp)) {
+ obstack_grow(&obst, &cp, sizeof(cp));
+ ++num_ck;
+ }
+#ifdef KEEP_ALIVE_COPYKEEP_HACK
+ else {
+ set_irn_mode(cp, mode_ANY);
+ keep_alive(cp);
+ }
+#endif /* KEEP_ALIVE_COPYKEEP_HACK */
+ }
+
+ /* compare each copykeep with all other copykeeps */
+ ck_arr = (ir_node **)obstack_finish(&obst);
+ for (idx = 0; idx < num_ck; ++idx) {
+ ir_node *ref, *ref_mode_T;
+
+ if (ck_arr[idx]) {
+ int j, n_melt;
+ ir_node **new_ck_in;
+ ir_node *new_ck;
+ ir_node *sched_pt = NULL;
+
+ n_melt = 1;
+ ref = ck_arr[idx];
+ ref_mode_T = skip_Proj(get_irn_n(ref, 1));
+ obstack_grow(&obst, &ref, sizeof(ref));
+
+ DBG((cenv->dbg, LEVEL_1, "Trying to melt %+F:\n", ref));
+
+ /* check for copykeeps pointing to the same mode_T node as the reference copykeep */
+ for (j = 0; j < num_ck; ++j) {
+ ir_node *cur_ck = ck_arr[j];
+
+ if (j != idx && cur_ck && skip_Proj(get_irn_n(cur_ck, 1)) == ref_mode_T) {
+ obstack_grow(&obst, &cur_ck, sizeof(cur_ck));
+ ir_nodeset_remove(&entry->copies, cur_ck);
+ DBG((cenv->dbg, LEVEL_1, "\t%+F\n", cur_ck));
+ ck_arr[j] = NULL;
+ ++n_melt;
+ sched_remove(cur_ck);
+ }
+ }
+ ck_arr[idx] = NULL;
+
+ /* check, if we found some candidates for melting */
+ if (n_melt == 1) {
+ DBG((cenv->dbg, LEVEL_1, "\tno candidate found\n"));
+ continue;
+ }
+
+ ir_nodeset_remove(&entry->copies, ref);
+ sched_remove(ref);
+
+ melt_arr = (ir_node **)obstack_finish(&obst);
+ /* melt all found copykeeps */
+ NEW_ARR_A(ir_node *, new_ck_in, n_melt);
+ for (j = 0; j < n_melt; ++j) {
+ new_ck_in[j] = get_irn_n(melt_arr[j], 1);
+
+ /* now, we can kill the melted keep, except the */
+ /* ref one, we still need some information */
+ if (melt_arr[j] != ref)
+ kill_node(melt_arr[j]);
+ }
+
+#ifdef KEEP_ALIVE_COPYKEEP_HACK
+ new_ck = be_new_CopyKeep(entry->cls, irg, get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in, mode_ANY);
+ keep_alive(new_ck);
+#else
+ new_ck = be_new_CopyKeep(entry->cls, irg, get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in, get_irn_mode(ref));
+#endif /* KEEP_ALIVE_COPYKEEP_HACK */
+
+ /* set register class for all kept inputs */
+ for (j = 1; j <= n_melt; ++j)
+ be_node_set_reg_class(new_ck, j, entry->cls);
+
+ ir_nodeset_insert(&entry->copies, new_ck);
+
+ /* find scheduling point */
+ sched_pt = ref_mode_T;
+ do {
+ /* just walk along the schedule until a non-Keep/CopyKeep node is found */
+ sched_pt = sched_next(sched_pt);
+ } while (be_is_Keep(sched_pt) || be_is_CopyKeep(sched_pt));
+
+ sched_add_before(sched_pt, new_ck);
+ DBG((cenv->dbg, LEVEL_1, "created %+F, scheduled before %+F\n", new_ck, sched_pt));
+
+ /* finally: kill the reference copykeep */
+ kill_node(ref);
+ }
+ }
+
+ obstack_free(&obst, NULL);
+ }
+}
+
+/**
+ * Walks over all nodes to assure register constraints.
+ *
+ * @param birg The birg structure containing the irg
+ */
+void assure_constraints(be_irg_t *birg) {
+ ir_graph *irg = be_get_birg_irg(birg);
+ const arch_env_t *arch_env = be_get_birg_arch_env(birg);
+ constraint_env_t cenv;
+ op_copy_assoc_t *entry;
+ ir_node **nodes;
+ FIRM_DBG_REGISTER(firm_dbg_module_t *mod, "firm.be.lower.constr");
+
+ be_assure_dom_front(birg);
+
+ DEBUG_ONLY(cenv.dbg = mod;)
+ cenv.birg = birg;
+ cenv.op_set = new_pset(cmp_op_copy_assoc, 16);
+ obstack_init(&cenv.obst);
+
+ irg_walk_blkwise_graph(irg, NULL, assure_constraints_walker, &cenv);
+
+ /* melt copykeeps, pointing to projs of */
+ /* the same mode_T node and keeping the */
+ /* same operand */
+ melt_copykeeps(&cenv);
+
+ /* for all */
+ foreach_pset(cenv.op_set, entry) {
+ int n;
+ ir_node *cp;
+ ir_nodeset_iterator_t iter;
+ be_ssa_construction_env_t senv;
+
+ n = ir_nodeset_size(&entry->copies);
+ nodes = alloca(n * sizeof(nodes[0]));
+
+ /* put the node in an array */
+ DBG((mod, LEVEL_1, "introduce copies for %+F ", entry->op));
+
+ /* collect all copies */
+ n = 0;
+ foreach_ir_nodeset(&entry->copies, cp, iter) {
+ nodes[n++] = cp;
+ DB((mod, LEVEL_1, ", %+F ", cp));
+ }
+
+ DB((mod, LEVEL_1, "\n"));
+
+ /* introduce the copies for the operand and it's copies */
+ be_ssa_construction_init(&senv, birg);
+ be_ssa_construction_add_copy(&senv, entry->op);
+ be_ssa_construction_add_copies(&senv, nodes, n);
+ be_ssa_construction_fix_users(&senv, entry->op);
+ be_ssa_construction_destroy(&senv);
+
+ /* Could be that not all CopyKeeps are really needed, */
+ /* so we transform unnecessary ones into Keeps. */
+ foreach_ir_nodeset(&entry->copies, cp, iter) {
+ if (be_is_CopyKeep(cp) && get_irn_n_edges(cp) < 1) {
+ ir_node *keep;
+ int n = get_irn_arity(cp);
+
+ keep = be_new_Keep(arch_get_irn_reg_class(arch_env, cp, -1),
+ irg, get_nodes_block(cp), n, get_irn_in(cp) + 1);
+ sched_add_before(cp, keep);
+
+ /* Set all ins (including the block) of the CopyKeep BAD to keep the verifier happy. */
+ sched_remove(cp);
+ kill_node(cp);
+ }
+ }
+
+ ir_nodeset_destroy(&entry->copies);
+ }
+
+ del_pset(cenv.op_set);
+ obstack_free(&cenv.obst, NULL);
+ be_liveness_invalidate(be_get_birg_liveness(birg));
+}
+
+
+/**
+ * Push nodes that do not need to be permed through the Perm.
+ * This is commonly a reload cascade at block ends.
+ * @note This routine needs interference.
+ * @note Probably, we can implement it a little more efficient.
+ * Especially searching the frontier lazily might be better.
+ * @param perm The perm.
+ * @param data The walker data (lower_env_t).
+ * @return 1, if there is something left to perm over.
+ * 0, if removed the complete perm.
+ */
+static int push_through_perm(ir_node *perm, void *data)
+{
+ lower_env_t *env = data;
+ const arch_env_t *aenv = env->arch_env;
+
+ ir_graph *irg = get_irn_irg(perm);
+ ir_node *bl = get_nodes_block(perm);
+ ir_node *node;
+ int arity = get_irn_arity(perm);
+ int *map;
+ int *proj_map;
+ bitset_t *moved = bitset_alloca(arity);
+ int n_moved;
+ int new_size;
+ ir_node *frontier = sched_first(bl);
+ FIRM_DBG_REGISTER(firm_dbg_module_t *mod, "firm.be.lower.permmove");
+
+ int i, n;
+ const ir_edge_t *edge;
+ ir_node *last_proj = NULL, *irn;
+ const arch_register_class_t *cls = NULL;
+
+ DBG((mod, LEVEL_1, "perm move %+F irg %+F\n", perm, irg));
+
+ /* get some proj and find out the register class of the proj. */
+ foreach_out_edge (perm, edge) {
+ last_proj = get_edge_src_irn(edge);
+ cls = arch_get_irn_reg_class(aenv, last_proj, -1);
+ assert(is_Proj(last_proj));
+ break;
+ }
+
+ /* find the point in the schedule after which the
+ * potentially movable nodes must be defined.
+ * A perm will only be pushed up to first instruction
+ * which lets an operand of itself die. */
+
+ sched_foreach_reverse_from (sched_prev(perm), irn) {
+ for(i = get_irn_arity(irn) - 1; i >= 0; --i) {
+ ir_node *op = get_irn_n(irn, i);
+ if(arch_irn_consider_in_reg_alloc(aenv, cls, op)
+ && !values_interfere(env->birg, op, last_proj)) {
+ frontier = sched_next(irn);
+ goto found_front;
+ }
+ }
+ }
+found_front:
+
+ DBG((mod, LEVEL_2, "\tfrontier: %+F\n", frontier));
+
+ node = sched_prev(perm);
+ n_moved = 0;
+ while(!sched_is_begin(node)) {
+ const arch_register_req_t *req;
+ int input = -1;
+ ir_node *proj;
+
+ foreach_out_edge(perm, edge) {
+ ir_node *out = get_edge_src_irn(edge);
+ int pn = get_Proj_proj(out);
+ ir_node *in = get_irn_n(perm, pn);
+ if(node == in) {
+ proj = out;
+ input = pn;
+ break;
+ }
+ }
+ /* it wasn't an input to the perm, we can't do anything more */
+ if(input < 0)
+ break;
+ if(!sched_comes_after(frontier, node))
+ break;
+ if(arch_irn_is(aenv, node, modify_flags))
+ break;
+ if(is_Proj(node)) {
+ req = arch_get_register_req(aenv, get_Proj_pred(node),
+ -1 - get_Proj_proj(node));
+ } else {
+ req = arch_get_register_req(aenv, node, -1);
+ }
+ if(req->type != arch_register_req_type_normal)
+ break;
+ for(i = get_irn_arity(node) - 1; i >= 0; --i) {
+ ir_node *opop = get_irn_n(node, i);
+ if (arch_irn_consider_in_reg_alloc(aenv, cls, opop)) {
+ break;
+ }
+ }
+ if(i >= 0)
+ break;
+
+ DBG((mod, LEVEL_2, "\tmoving %+F after %+F, killing %+F\n", node, perm, proj));
+
+ /* move the movable node in front of the Perm */
+ sched_remove(node);
+ sched_add_after(perm, node);
+
+ /* give it the proj's register */
+ arch_set_irn_register(aenv, node, arch_get_irn_register(aenv, proj));
+
+ /* reroute all users of the proj to the moved node. */
+ edges_reroute(proj, node, irg);
+
+ /* and kill it */
+ set_Proj_pred(proj, new_Bad());
+ kill_node(proj);
+
+ bitset_set(moved, input);
+ n_moved++;
+
+ node = sched_prev(node);
+ }
+
+ /* well, we could not push anything through the perm */
+ if(n_moved == 0)
+ return 1;
+
+ new_size = arity - n_moved;
+ if(new_size == 0) {
+ return 0;
+ }
+
+ map = alloca(new_size * sizeof(map[0]));
+ proj_map = alloca(arity * sizeof(proj_map[0]));
+ memset(proj_map, -1, sizeof(proj_map[0]));
+ n = 0;
+ for(i = 0; i < arity; ++i) {
+ if(bitset_is_set(moved, i))
+ continue;
+ map[n] = i;
+ proj_map[i] = n;
+ n++;
+ }
+ assert(n == new_size);
+ foreach_out_edge(perm, edge) {
+ ir_node *proj = get_edge_src_irn(edge);
+ int pn = get_Proj_proj(proj);
+ pn = proj_map[pn];
+ assert(pn >= 0);
+ set_Proj_proj(proj, pn);
+ }
+
+ be_Perm_reduce(perm, new_size, map);
+ return 1;
+}
+
+/**
+ * Calls the corresponding lowering function for the node.
+ *
+ * @param irn The node to be checked for lowering
+ * @param walk_env The walker environment
+ */
+static void lower_nodes_after_ra_walker(ir_node *irn, void *walk_env) {
+ int perm_stayed;
+
+ if (is_Block(irn) || is_Proj(irn))
+ return;
+ if (!be_is_Perm(irn))
+ return;
+
+ perm_stayed = push_through_perm(irn, walk_env);
+ if (!perm_stayed)
+ return;
+
+ lower_perm_node(irn, walk_env);