2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Performs lowering of perm nodes. Inserts copies to assure register constraints.
23 * @author Christian Wuerdig
35 #include "irnodeset.h"
36 #include "irnodemap.h"
38 #include "iredges_t.h"
45 #include "besched_t.h"
47 #include "bessaconstr.h"
48 #include "beintlive_t.h"
50 #undef KEEP_ALIVE_COPYKEEP_HACK
52 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
53 DEBUG_ONLY(static firm_dbg_module_t *dbg_constr;)
54 DEBUG_ONLY(static firm_dbg_module_t *dbg_permmove;)
56 /** Associates an ir_node with it's copy and CopyKeep. */
58 ir_nodeset_t copies; /**< all non-spillable copies of this irn */
59 const arch_register_class_t *cls;
62 /** Environment for constraints. */
69 /** Lowering walker environment. */
70 typedef struct _lower_env_t {
75 /** Holds a Perm register pair. */
76 typedef struct _reg_pair_t {
77 const arch_register_t *in_reg; /**< a perm IN register */
78 ir_node *in_node; /**< the in node to which the register belongs */
80 const arch_register_t *out_reg; /**< a perm OUT register */
81 ir_node *out_node; /**< the out node to which the register belongs */
83 int checked; /**< indicates whether the pair was check for cycle or not */
86 typedef enum _perm_type_t {
93 /** Structure to represent cycles or chains in a Perm. */
94 typedef struct _perm_cycle_t {
95 const arch_register_t **elems; /**< the registers in the cycle */
96 int n_elems; /**< number of elements in the cycle */
97 perm_type_t type; /**< type (CHAIN or CYCLE) */
100 /** returns the number register pairs marked as checked. */
101 static int get_n_unchecked_pairs(reg_pair_t const *const pairs, int const n)
106 for (i = 0; i < n; i++) {
107 if (!pairs[i].checked)
115 * Gets the node corresponding to an IN register from an array of register pairs.
116 * NOTE: The given registers pairs and the register to look for must belong
117 * to the same register class.
119 * @param pairs The array of register pairs
120 * @param n The number of pairs
121 * @param reg The register to look for
122 * @return The corresponding node or NULL if not found
124 static ir_node *get_node_for_in_register(reg_pair_t *pairs, int n, const arch_register_t *reg) {
127 for (i = 0; i < n; i++) {
128 /* in register matches */
129 if (pairs[i].in_reg->index == reg->index)
130 return pairs[i].in_node;
137 * Gets the node corresponding to an OUT register from an array of register pairs.
138 * NOTE: The given registers pairs and the register to look for must belong
139 * to the same register class.
141 * @param pairs The array of register pairs
142 * @param n The number of pairs
143 * @param reg The register to look for
144 * @return The corresponding node or NULL if not found
146 static ir_node *get_node_for_out_register(reg_pair_t *pairs, int n, const arch_register_t *reg) {
149 for (i = 0; i < n; i++) {
150 /* out register matches */
151 if (pairs[i].out_reg->index == reg->index)
152 return pairs[i].out_node;
159 * Gets the index in the register pair array where the in register
160 * corresponds to reg_idx.
162 * @param pairs The array of register pairs
163 * @param n The number of pairs
164 * @param reg The register index to look for
166 * @return The corresponding index in pairs or -1 if not found
168 static int get_pairidx_for_in_regidx(reg_pair_t *pairs, int n, unsigned reg_idx) {
171 for (i = 0; i < n; i++) {
172 /* in register matches */
173 if (pairs[i].in_reg->index == reg_idx)
180 * Gets the index in the register pair array where the out register
181 * corresponds to reg_idx.
183 * @param pairs The array of register pairs
184 * @param n The number of pairs
185 * @param reg The register index to look for
187 * @return The corresponding index in pairs or -1 if not found
189 static int get_pairidx_for_out_regidx(reg_pair_t *pairs, int n, unsigned reg_idx) {
192 for (i = 0; i < n; i++) {
193 /* out register matches */
194 if (pairs[i].out_reg->index == reg_idx)
201 * Gets an array of register pairs and tries to identify a cycle or chain
202 * starting at position start.
204 * @param cycle Variable to hold the cycle
205 * @param pairs Array of register pairs
206 * @param n length of the pairs array
207 * @param start Index to start
209 * @return The cycle or chain
211 static void get_perm_cycle(perm_cycle_t *const cycle,
212 reg_pair_t *const pairs,
216 int head = pairs[start].in_reg->index;
217 int cur_idx = pairs[start].out_reg->index;
218 int const n_pairs_todo = get_n_unchecked_pairs(pairs, n);
219 perm_type_t cycle_tp = PERM_CYCLE;
222 /* We could be right in the middle of a chain, so we need to find the start */
223 while (head != cur_idx) {
224 /* goto previous register in cycle or chain */
225 int const cur_pair_idx = get_pairidx_for_out_regidx(pairs, n, head);
227 if (cur_pair_idx < 0) {
228 cycle_tp = PERM_CHAIN;
231 head = pairs[cur_pair_idx].in_reg->index;
232 start = cur_pair_idx;
236 /* assume worst case: all remaining pairs build a cycle or chain */
237 cycle->elems = XMALLOCNZ(const arch_register_t*, n_pairs_todo * 2);
238 cycle->n_elems = 2; /* initial number of elements is 2 */
239 cycle->elems[0] = pairs[start].in_reg;
240 cycle->elems[1] = pairs[start].out_reg;
241 cycle->type = cycle_tp;
242 cur_idx = pairs[start].out_reg->index;
245 /* check for cycle or end of a chain */
246 while (cur_idx != head) {
247 /* goto next register in cycle or chain */
248 int const cur_pair_idx = get_pairidx_for_in_regidx(pairs, n, cur_idx);
250 if (cur_pair_idx < 0)
253 cur_idx = pairs[cur_pair_idx].out_reg->index;
255 /* it's not the first element: insert it */
256 if (cur_idx != head) {
257 cycle->elems[idx++] = pairs[cur_pair_idx].out_reg;
260 /* we are there where we started -> CYCLE */
261 cycle->type = PERM_CYCLE;
265 /* mark all pairs having one in/out register with cycle in common as checked */
266 for (idx = 0; idx < cycle->n_elems; idx++) {
269 cur_pair_idx = get_pairidx_for_in_regidx(pairs, n, cycle->elems[idx]->index);
270 if (cur_pair_idx >= 0)
271 pairs[cur_pair_idx].checked = 1;
273 cur_pair_idx = get_pairidx_for_out_regidx(pairs, n, cycle->elems[idx]->index);
274 if (cur_pair_idx >= 0)
275 pairs[cur_pair_idx].checked = 1;
280 * Lowers a perm node. Resolves cycles and creates a bunch of
281 * copy and swap operations to permute registers.
282 * Note: The caller of this function has to make sure, that irn
285 * @param irn The perm node
286 * @param block The block the perm node belongs to
287 * @param env The lowerer environment
289 static void lower_perm_node(ir_node *irn, lower_env_t *env)
291 const arch_register_class_t *const reg_class = arch_get_irn_register(get_irn_n(irn, 0))->reg_class;
292 ir_graph *const irg = get_irn_irg(irn);
293 ir_node *const block = get_nodes_block(irn);
294 int const n = get_irn_arity(irn);
295 reg_pair_t *const pairs = alloca(n * sizeof(pairs[0]));
297 int do_copy = env->do_copy;
298 /* Get the schedule predecessor node to the perm.
299 * NOTE: This works with auto-magic. If we insert the new copy/exchange
300 * nodes after this node, everything should be ok. */
301 ir_node * sched_point = sched_prev(irn);
302 const ir_edge_t * edge;
305 DBG((dbg, LEVEL_1, "perm: %+F, sched point is %+F\n", irn, sched_point));
306 assert(sched_point && "Perm is not scheduled or has no predecessor");
308 assert(n == get_irn_n_edges(irn) && "perm's in and out numbers different");
310 /* build the list of register pairs (in, out) */
312 foreach_out_edge(irn, edge) {
313 reg_pair_t *const pair = &pairs[i++];
314 ir_node *const out = get_edge_src_irn(edge);
315 long const pn = get_Proj_proj(out);
316 ir_node *const in = get_irn_n(irn, pn);
319 pair->in_reg = arch_get_irn_register(in);
320 pair->out_node = out;
321 pair->out_reg = arch_get_irn_register(out);
325 /* Mark all equal pairs as checked, and exchange the OUT proj with the IN
327 for (i = 0; i < n; i++) {
328 reg_pair_t *const pair = &pairs[i];
330 if (pair->in_reg->index != pair->out_reg->index)
333 DBG((dbg, LEVEL_1, "%+F removing equal perm register pair (%+F, %+F, %s)\n",
334 irn, pair->in_node, pair->out_node, pair->out_reg->name));
336 /* reroute the edges from the proj to the argument */
337 exchange(pair->out_node, pair->in_node);
342 /* Set do_copy to 0 if it's on but we have no free register */
343 /* TODO check for free register */
348 /* check for cycles and chains */
349 while (get_n_unchecked_pairs(pairs, n) > 0) {
353 /* go to the first not-checked pair */
354 for (i = 0; pairs[i].checked; ++i) {}
355 get_perm_cycle(&cycle, pairs, n, i);
357 DB((dbg, LEVEL_1, "%+F: following %s created:\n ", irn, cycle.type == PERM_CHAIN ? "chain" : "cycle"));
358 for (j = 0; j < cycle.n_elems; j++) {
359 DB((dbg, LEVEL_1, " %s", cycle.elems[j]->name));
361 DB((dbg, LEVEL_1, "\n"));
363 if (n == 2 && cycle.type == PERM_CYCLE) {
364 /* We don't need to do anything if we have a Perm with two elements
365 * which represents a cycle, because those nodes already represent
369 /* TODO: - iff PERM_CYCLE && do_copy -> determine free temp reg and
370 * insert copy to/from it before/after the copy cascade (this
371 * reduces the cycle into a chain) */
373 /* build copy/swap nodes from back to front */
374 for (i = cycle.n_elems - 2; i >= 0; i--) {
375 ir_node *arg1 = get_node_for_in_register(pairs, n, cycle.elems[i]);
376 ir_node *arg2 = get_node_for_in_register(pairs, n, cycle.elems[i + 1]);
378 ir_node *res1 = get_node_for_out_register(pairs, n, cycle.elems[i]);
379 ir_node *res2 = get_node_for_out_register(pairs, n, cycle.elems[i + 1]);
380 /* If we have a cycle and don't copy: we need to create exchange
382 * NOTE: An exchange node is a perm node with 2 INs and 2 OUTs
383 * IN_1 = in node with register i
384 * IN_2 = in node with register i + 1
385 * OUT_1 = out node with register i + 1
386 * OUT_2 = out node with register i */
387 if (cycle.type == PERM_CYCLE && !do_copy) {
394 /* At this point we have to handle the following problem:
396 * If we have a cycle with more than two elements, then this
397 * could correspond to the following Perm node:
399 * +----+ +----+ +----+
400 * | r1 | | r2 | | r3 |
401 * +-+--+ +-+--+ +--+-+
404 * +-+--------+---------+-+
406 * +-+--------+---------+-+
409 * +-+--+ +-+--+ +--+-+
410 * |Proj| |Proj| |Proj|
411 * | r2 | | r3 | | r1 |
412 * +----+ +----+ +----+
414 * This node is about to be split up into two 2x Perm's for
415 * which we need 4 Proj's and the one additional Proj of the
416 * first Perm has to be one IN of the second. So in general
417 * we need to create one additional Proj for each "middle"
418 * Perm and set this to one in node of the successor Perm. */
420 DBG((dbg, LEVEL_1, "%+F creating exchange node (%+F, %s) and (%+F, %s) with\n",
421 irn, arg1, cycle.elems[i]->name, arg2, cycle.elems[i + 1]->name));
422 DBG((dbg, LEVEL_1, "%+F (%+F, %s) and (%+F, %s)\n",
423 irn, res1, cycle.elems[i]->name, res2, cycle.elems[i + 1]->name));
425 cpyxchg = be_new_Perm(reg_class, irg, block, 2, in);
428 /* cycle is not done yet */
429 int pidx = get_pairidx_for_in_regidx(pairs, n, cycle.elems[i]->index);
431 /* create intermediate proj */
432 res1 = new_r_Proj(irg, block, cpyxchg, get_irn_mode(res1), 0);
434 /* set as in for next Perm */
435 pairs[pidx].in_node = res1;
438 set_Proj_pred(res2, cpyxchg);
439 set_Proj_proj(res2, 0);
440 set_Proj_pred(res1, cpyxchg);
441 set_Proj_proj(res1, 1);
443 arch_set_irn_register(res2, cycle.elems[i + 1]);
444 arch_set_irn_register(res1, cycle.elems[i]);
446 /* insert the copy/exchange node in schedule after the magic schedule node (see above) */
447 sched_add_after(sched_point, cpyxchg);
449 DBG((dbg, LEVEL_1, "replacing %+F with %+F, placed new node after %+F\n", irn, cpyxchg, sched_point));
451 /* set the new scheduling point */
456 DBG((dbg, LEVEL_1, "%+F creating copy node (%+F, %s) -> (%+F, %s)\n",
457 irn, arg1, cycle.elems[i]->name, res2, cycle.elems[i + 1]->name));
459 cpyxchg = be_new_Copy(reg_class, irg, block, arg1);
460 arch_set_irn_register(cpyxchg, cycle.elems[i + 1]);
462 /* exchange copy node and proj */
463 exchange(res2, cpyxchg);
465 /* insert the copy/exchange node in schedule after the magic schedule node (see above) */
466 sched_add_after(sched_point, cpyxchg);
468 /* set the new scheduling point */
469 sched_point = cpyxchg;
474 free((void*)cycle.elems);
477 /* remove the perm from schedule */
486 static int has_irn_users(const ir_node *irn) {
487 return get_irn_out_edge_first_kind(irn, EDGE_KIND_NORMAL) != 0;
490 static ir_node *find_copy(ir_node *irn, ir_node *op)
494 for (cur_node = irn;;) {
495 cur_node = sched_prev(cur_node);
496 if (! be_is_Copy(cur_node))
498 if (be_get_Copy_op(cur_node) == op && arch_irn_is(cur_node, dont_spill))
503 static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, constraint_env_t *env) {
505 ir_nodemap_t *op_set;
507 const arch_register_class_t *cls;
509 op_copy_assoc_t *entry;
511 if (arch_irn_is(other_different, ignore) ||
512 !mode_is_datab(get_irn_mode(other_different))) {
513 DBG((dbg_constr, LEVEL_1, "ignore constraint for %+F because other_irn is ignore or not a datab node\n", irn));
517 irg = be_get_birg_irg(env->birg);
518 op_set = &env->op_set;
519 block = get_nodes_block(irn);
520 cls = arch_get_irn_reg_class(other_different, -1);
522 /* Make a not spillable copy of the different node */
523 /* this is needed because the different irn could be */
524 /* in block far far away */
525 /* The copy is optimized later if not needed */
527 /* check if already exists such a copy in the schedule immediately before */
528 cpy = find_copy(skip_Proj(irn), other_different);
530 cpy = be_new_Copy(cls, irg, block, other_different);
531 be_node_set_flags(cpy, BE_OUT_POS(0), arch_irn_flags_dont_spill);
532 DBG((dbg_constr, LEVEL_1, "created non-spillable %+F for value %+F\n", cpy, other_different));
534 DBG((dbg_constr, LEVEL_1, "using already existing %+F for value %+F\n", cpy, other_different));
537 /* Add the Keep resp. CopyKeep and reroute the users */
538 /* of the other_different irn in case of CopyKeep. */
539 if (has_irn_users(other_different)) {
540 keep = be_new_CopyKeep_single(cls, irg, block, cpy, irn, get_irn_mode(other_different));
541 be_node_set_reg_class(keep, 1, cls);
547 keep = be_new_Keep(cls, irg, block, 2, in);
550 DBG((dbg_constr, LEVEL_1, "created %+F(%+F, %+F)\n\n", keep, irn, cpy));
552 /* insert copy and keep into schedule */
553 assert(sched_is_scheduled(irn) && "need schedule to assure constraints");
554 if (! sched_is_scheduled(cpy))
555 sched_add_before(skip_Proj(irn), cpy);
556 sched_add_after(irn, keep);
558 /* insert the other different and it's copies into the map */
559 entry = ir_nodemap_get(op_set, other_different);
561 entry = obstack_alloc(&env->obst, sizeof(*entry));
563 ir_nodeset_init(&entry->copies);
565 ir_nodemap_insert(op_set, other_different, entry);
569 ir_nodeset_insert(&entry->copies, cpy);
571 /* insert keep in case of CopyKeep */
572 if (be_is_CopyKeep(keep))
573 ir_nodeset_insert(&entry->copies, keep);
577 * Checks if node has a must_be_different constraint in output and adds a Keep
578 * then to assure the constraint.
580 * @param irn the node to check
581 * @param skipped_irn if irn is a Proj node, its predecessor, else irn
582 * @param env the constraint environment
584 static void assure_different_constraints(ir_node *irn, ir_node *skipped_irn, constraint_env_t *env) {
585 const arch_register_req_t *req = arch_get_register_req(irn, -1);
587 if (arch_register_req_is(req, must_be_different)) {
588 const unsigned other = req->other_different;
591 if (arch_register_req_is(req, should_be_same)) {
592 const unsigned same = req->other_same;
594 if (is_po2(other) && is_po2(same)) {
595 int idx_other = ntz(other);
596 int idx_same = ntz(same);
599 * We can safely ignore a should_be_same x must_be_different y
600 * IFF both inputs are equal!
602 if (get_irn_n(skipped_irn, idx_other) == get_irn_n(skipped_irn, idx_same)) {
607 for (i = 0; 1U << i <= other; ++i) {
608 if (other & (1U << i)) {
609 ir_node *different_from = get_irn_n(skipped_irn, i);
610 gen_assure_different_pattern(irn, different_from, env);
617 * Calls the functions to assure register constraints.
619 * @param block The block to be checked
620 * @param walk_env The walker environment
622 static void assure_constraints_walker(ir_node *block, void *walk_env) {
625 sched_foreach_reverse(block, irn) {
626 ir_mode *mode = get_irn_mode(irn);
628 if (mode == mode_T) {
629 const ir_edge_t *edge;
631 foreach_out_edge(irn, edge) {
632 ir_node *proj = get_edge_src_irn(edge);
634 mode = get_irn_mode(proj);
635 if (mode_is_datab(mode))
636 assure_different_constraints(proj, irn, walk_env);
638 } else if (mode_is_datab(mode)) {
639 assure_different_constraints(irn, irn, walk_env);
645 * Melt all copykeeps pointing to the same node
646 * (or Projs of the same node), copying the same operand.
648 static void melt_copykeeps(constraint_env_t *cenv) {
649 be_irg_t *birg = cenv->birg;
650 ir_graph *irg = be_get_birg_irg(birg);
651 ir_nodemap_iterator_t map_iter;
652 ir_nodemap_entry_t map_entry;
655 foreach_ir_nodemap(&cenv->op_set, map_entry, map_iter) {
656 op_copy_assoc_t *entry = map_entry.data;
660 ir_nodeset_iterator_t iter;
661 ir_node **ck_arr, **melt_arr;
665 /* collect all copykeeps */
667 foreach_ir_nodeset(&entry->copies, cp, iter) {
668 if (be_is_CopyKeep(cp)) {
669 obstack_grow(&obst, &cp, sizeof(cp));
672 #ifdef KEEP_ALIVE_COPYKEEP_HACK
674 set_irn_mode(cp, mode_ANY);
677 #endif /* KEEP_ALIVE_COPYKEEP_HACK */
680 /* compare each copykeep with all other copykeeps */
681 ck_arr = (ir_node **)obstack_finish(&obst);
682 for (idx = 0; idx < num_ck; ++idx) {
683 ir_node *ref, *ref_mode_T;
689 ir_node *sched_pt = NULL;
693 ref_mode_T = skip_Proj(get_irn_n(ref, 1));
694 obstack_grow(&obst, &ref, sizeof(ref));
696 DBG((dbg_constr, LEVEL_1, "Trying to melt %+F:\n", ref));
698 /* check for copykeeps pointing to the same mode_T node as the reference copykeep */
699 for (j = 0; j < num_ck; ++j) {
700 ir_node *cur_ck = ck_arr[j];
702 if (j != idx && cur_ck && skip_Proj(get_irn_n(cur_ck, 1)) == ref_mode_T) {
703 obstack_grow(&obst, &cur_ck, sizeof(cur_ck));
704 ir_nodeset_remove(&entry->copies, cur_ck);
705 DBG((dbg_constr, LEVEL_1, "\t%+F\n", cur_ck));
708 sched_remove(cur_ck);
713 /* check, if we found some candidates for melting */
715 DBG((dbg_constr, LEVEL_1, "\tno candidate found\n"));
719 ir_nodeset_remove(&entry->copies, ref);
722 melt_arr = (ir_node **)obstack_finish(&obst);
723 /* melt all found copykeeps */
724 NEW_ARR_A(ir_node *, new_ck_in, n_melt);
725 for (j = 0; j < n_melt; ++j) {
726 new_ck_in[j] = get_irn_n(melt_arr[j], 1);
728 /* now, we can kill the melted keep, except the */
729 /* ref one, we still need some information */
730 if (melt_arr[j] != ref)
731 kill_node(melt_arr[j]);
734 #ifdef KEEP_ALIVE_COPYKEEP_HACK
735 new_ck = be_new_CopyKeep(entry->cls, irg, get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in, mode_ANY);
738 new_ck = be_new_CopyKeep(entry->cls, irg, get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in, get_irn_mode(ref));
739 #endif /* KEEP_ALIVE_COPYKEEP_HACK */
741 /* set register class for all kept inputs */
742 for (j = 1; j <= n_melt; ++j)
743 be_node_set_reg_class(new_ck, j, entry->cls);
745 ir_nodeset_insert(&entry->copies, new_ck);
747 /* find scheduling point */
748 sched_pt = ref_mode_T;
750 /* just walk along the schedule until a non-Keep/CopyKeep node is found */
751 sched_pt = sched_next(sched_pt);
752 } while (be_is_Keep(sched_pt) || be_is_CopyKeep(sched_pt));
754 sched_add_before(sched_pt, new_ck);
755 DBG((dbg_constr, LEVEL_1, "created %+F, scheduled before %+F\n", new_ck, sched_pt));
757 /* finally: kill the reference copykeep */
762 obstack_free(&obst, NULL);
767 * Walks over all nodes to assure register constraints.
769 * @param birg The birg structure containing the irg
771 void assure_constraints(be_irg_t *birg) {
772 ir_graph *irg = be_get_birg_irg(birg);
773 constraint_env_t cenv;
775 ir_nodemap_iterator_t map_iter;
776 ir_nodemap_entry_t map_entry;
778 FIRM_DBG_REGISTER(dbg_constr, "firm.be.lower.constr");
781 ir_nodemap_init(&cenv.op_set);
782 obstack_init(&cenv.obst);
784 irg_block_walk_graph(irg, NULL, assure_constraints_walker, &cenv);
786 /* melt copykeeps, pointing to projs of */
787 /* the same mode_T node and keeping the */
789 melt_copykeeps(&cenv);
792 foreach_ir_nodemap(&cenv.op_set, map_entry, map_iter) {
793 op_copy_assoc_t *entry = map_entry.data;
796 ir_nodeset_iterator_t iter;
797 be_ssa_construction_env_t senv;
799 n = ir_nodeset_size(&entry->copies);
800 nodes = alloca(n * sizeof(nodes[0]));
802 /* put the node in an array */
803 DBG((dbg_constr, LEVEL_1, "introduce copies for %+F ", map_entry.node));
805 /* collect all copies */
807 foreach_ir_nodeset(&entry->copies, cp, iter) {
809 DB((dbg_constr, LEVEL_1, ", %+F ", cp));
812 DB((dbg_constr, LEVEL_1, "\n"));
814 /* introduce the copies for the operand and it's copies */
815 be_ssa_construction_init(&senv, birg);
816 be_ssa_construction_add_copy(&senv, map_entry.node);
817 be_ssa_construction_add_copies(&senv, nodes, n);
818 be_ssa_construction_fix_users(&senv, map_entry.node);
819 be_ssa_construction_destroy(&senv);
821 /* Could be that not all CopyKeeps are really needed, */
822 /* so we transform unnecessary ones into Keeps. */
823 foreach_ir_nodeset(&entry->copies, cp, iter) {
824 if (be_is_CopyKeep(cp) && get_irn_n_edges(cp) < 1) {
826 int n = get_irn_arity(cp);
828 keep = be_new_Keep(arch_get_irn_reg_class(cp, -1),
829 irg, get_nodes_block(cp), n, get_irn_in(cp) + 1);
830 sched_add_before(cp, keep);
832 /* Set all ins (including the block) of the CopyKeep BAD to keep the verifier happy. */
838 ir_nodeset_destroy(&entry->copies);
841 ir_nodemap_destroy(&cenv.op_set);
842 obstack_free(&cenv.obst, NULL);
843 be_liveness_invalidate(be_get_birg_liveness(birg));
848 * Push nodes that do not need to be permed through the Perm.
849 * This is commonly a reload cascade at block ends.
850 * @note This routine needs interference.
851 * @note Probably, we can implement it a little more efficient.
852 * Especially searching the frontier lazily might be better.
854 * @param perm The perm
855 * @param env The lowerer environment
857 * @return 1, if there is something left to perm over.
858 * 0, if removed the complete perm.
860 static int push_through_perm(ir_node *perm, lower_env_t *env)
862 ir_graph *irg = get_irn_irg(perm);
863 ir_node *bl = get_nodes_block(perm);
865 int arity = get_irn_arity(perm);
868 bitset_t *moved = bitset_alloca(arity);
871 ir_node *frontier = bl;
874 const ir_edge_t *edge;
875 ir_node *one_proj = NULL, *irn;
876 const arch_register_class_t *cls = NULL;
878 DBG((dbg_permmove, LEVEL_1, "perm move %+F irg %+F\n", perm, irg));
880 /* get some Proj and find out the register class of that Proj. */
881 edge = get_irn_out_edge_first_kind(perm, EDGE_KIND_NORMAL);
882 one_proj = get_edge_src_irn(edge);
883 assert(is_Proj(one_proj));
884 cls = arch_get_irn_reg_class(one_proj, -1);
886 /* Find the point in the schedule after which the
887 * potentially movable nodes must be defined.
888 * A Perm will only be pushed up to first instruction
889 * which lets an operand of itself die.
890 * If we would allow to move the Perm above this instruction,
891 * the former dead operand would be live now at the point of
892 * the Perm, increasing the register pressure by one.
894 sched_foreach_reverse_from(sched_prev(perm), irn) {
895 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
896 ir_node *op = get_irn_n(irn, i);
897 if (arch_irn_consider_in_reg_alloc(cls, op) &&
898 !values_interfere(env->birg, op, one_proj)) {
906 DBG((dbg_permmove, LEVEL_2, "\tfrontier: %+F\n", frontier));
908 node = sched_prev(perm);
910 while (!sched_is_begin(node)) {
911 const arch_register_req_t *req;
915 /* search if node is a INPUT of Perm */
916 foreach_out_edge(perm, edge) {
917 ir_node *out = get_edge_src_irn(edge);
918 int pn = get_Proj_proj(out);
919 ir_node *in = get_irn_n(perm, pn);
926 /* it wasn't an input to the perm, we can't do anything more */
929 if (!sched_comes_after(frontier, node))
931 if (arch_irn_is(node, modify_flags))
934 req = arch_get_register_req(get_Proj_pred(node),
935 -1 - get_Proj_proj(node));
937 req = arch_get_register_req(node, -1);
939 if (req->type != arch_register_req_type_normal)
941 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
942 ir_node *opop = get_irn_n(node, i);
943 if (arch_irn_consider_in_reg_alloc(cls, opop)) {
950 DBG((dbg_permmove, LEVEL_2, "\tmoving %+F after %+F, killing %+F\n", node, perm, proj));
952 /* move the movable node in front of the Perm */
954 sched_add_after(perm, node);
956 /* give it the proj's register */
957 arch_set_irn_register(node, arch_get_irn_register(proj));
959 /* reroute all users of the proj to the moved node. */
960 edges_reroute(proj, node, irg);
963 set_Proj_pred(proj, new_Bad());
966 bitset_set(moved, input);
969 node = sched_prev(node);
972 /* well, we could not push anything through the perm */
976 new_size = arity - n_moved;
981 map = alloca(new_size * sizeof(map[0]));
982 proj_map = alloca(arity * sizeof(proj_map[0]));
983 memset(proj_map, -1, sizeof(proj_map[0]));
985 for (i = 0; i < arity; ++i) {
986 if (bitset_is_set(moved, i))
992 assert(n == new_size);
993 foreach_out_edge(perm, edge) {
994 ir_node *proj = get_edge_src_irn(edge);
995 int pn = get_Proj_proj(proj);
998 set_Proj_proj(proj, pn);
1001 be_Perm_reduce(perm, new_size, map);
1006 * Calls the corresponding lowering function for the node.
1008 * @param irn The node to be checked for lowering
1009 * @param walk_env The walker environment
1011 static void lower_nodes_after_ra_walker(ir_node *irn, void *walk_env)
1015 if (!be_is_Perm(irn))
1018 perm_stayed = push_through_perm(irn, walk_env);
1020 lower_perm_node(irn, walk_env);
1024 * Walks over all blocks in an irg and performs lowering need to be
1025 * done after register allocation (e.g. perm lowering).
1027 * @param birg The birg object
1028 * @param do_copy 1 == resolve cycles with a free reg if available
1030 void lower_nodes_after_ra(be_irg_t *birg, int do_copy) {
1034 FIRM_DBG_REGISTER(dbg, "firm.be.lower");
1035 FIRM_DBG_REGISTER(dbg_permmove, "firm.be.lower.permmove");
1038 env.do_copy = do_copy;
1040 /* we will need interference */
1041 be_liveness_assure_chk(be_get_birg_liveness(birg));
1043 irg = be_get_birg_irg(birg);
1044 irg_walk_graph(irg, NULL, lower_nodes_after_ra_walker, &env);