2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Performs lowering of perm nodes. Inserts copies to assure
23 * register constraints.
24 * @author Christian Wuerdig
34 #include "irnodeset.h"
35 #include "irnodehashmap.h"
37 #include "iredges_t.h"
47 #include "bessaconstr.h"
48 #include "beintlive_t.h"
50 #undef KEEP_ALIVE_COPYKEEP_HACK
52 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
53 DEBUG_ONLY(static firm_dbg_module_t *dbg_constr;)
54 DEBUG_ONLY(static firm_dbg_module_t *dbg_permmove;)
56 /** Associates an ir_node with its copy and CopyKeep. */
58 ir_nodeset_t copies; /**< all non-spillable copies of this irn */
59 const arch_register_class_t *cls;
62 /** Environment for constraints. */
65 ir_nodehashmap_t op_set;
69 /** Lowering walker environment. */
70 typedef struct lower_env_t {
75 /** Holds a Perm register pair. */
76 typedef struct reg_pair_t {
77 const arch_register_t *in_reg; /**< a perm IN register */
78 ir_node *in_node; /**< the in node to which the register belongs */
80 const arch_register_t *out_reg; /**< a perm OUT register */
81 ir_node *out_node; /**< the out node to which the register belongs */
83 int checked; /**< indicates whether the pair was check for cycle or not */
86 typedef enum perm_type_t {
93 /** Structure to represent cycles or chains in a Perm. */
94 typedef struct perm_cycle_t {
95 const arch_register_t **elems; /**< the registers in the cycle */
96 int n_elems; /**< number of elements in the cycle */
97 perm_type_t type; /**< type (CHAIN or CYCLE) */
100 /** returns the number register pairs marked as checked. */
101 static int get_n_unchecked_pairs(reg_pair_t const *const pairs, int const n)
106 for (i = 0; i < n; i++) {
107 if (!pairs[i].checked)
115 * Gets the node corresponding to an IN register from an array of register pairs.
116 * NOTE: The given registers pairs and the register to look for must belong
117 * to the same register class.
119 * @param pairs The array of register pairs
120 * @param n The number of pairs
121 * @param reg The register to look for
122 * @return The corresponding node or NULL if not found
124 static ir_node *get_node_for_in_register(reg_pair_t *pairs, int n, const arch_register_t *reg)
128 for (i = 0; i < n; i++) {
129 /* in register matches */
130 if (pairs[i].in_reg->index == reg->index)
131 return pairs[i].in_node;
138 * Gets the node corresponding to an OUT register from an array of register pairs.
139 * NOTE: The given registers pairs and the register to look for must belong
140 * to the same register class.
142 * @param pairs The array of register pairs
143 * @param n The number of pairs
144 * @param reg The register to look for
145 * @return The corresponding node or NULL if not found
147 static ir_node *get_node_for_out_register(reg_pair_t *pairs, int n, const arch_register_t *reg)
151 for (i = 0; i < n; i++) {
152 /* out register matches */
153 if (pairs[i].out_reg->index == reg->index)
154 return pairs[i].out_node;
161 * Gets the index in the register pair array where the in register
162 * corresponds to reg_idx.
164 * @param pairs The array of register pairs
165 * @param n The number of pairs
166 * @param reg The register index to look for
168 * @return The corresponding index in pairs or -1 if not found
170 static int get_pairidx_for_in_regidx(reg_pair_t *pairs, int n, unsigned reg_idx)
174 for (i = 0; i < n; i++) {
175 /* in register matches */
176 if (pairs[i].in_reg->index == reg_idx)
183 * Gets the index in the register pair array where the out register
184 * corresponds to reg_idx.
186 * @param pairs The array of register pairs
187 * @param n The number of pairs
188 * @param reg The register index to look for
190 * @return The corresponding index in pairs or -1 if not found
192 static int get_pairidx_for_out_regidx(reg_pair_t *pairs, int n, unsigned reg_idx)
196 for (i = 0; i < n; i++) {
197 /* out register matches */
198 if (pairs[i].out_reg->index == reg_idx)
205 * Gets an array of register pairs and tries to identify a cycle or chain
206 * starting at position start.
208 * @param cycle Variable to hold the cycle
209 * @param pairs Array of register pairs
210 * @param n length of the pairs array
211 * @param start Index to start
213 * @return The cycle or chain
215 static void get_perm_cycle(perm_cycle_t *const cycle,
216 reg_pair_t *const pairs,
220 int head = pairs[start].in_reg->index;
221 int cur_idx = pairs[start].out_reg->index;
222 int const n_pairs_todo = get_n_unchecked_pairs(pairs, n);
223 perm_type_t cycle_tp = PERM_CYCLE;
226 /* We could be right in the middle of a chain, so we need to find the start */
227 while (head != cur_idx) {
228 /* goto previous register in cycle or chain */
229 int const cur_pair_idx = get_pairidx_for_out_regidx(pairs, n, head);
231 if (cur_pair_idx < 0) {
232 cycle_tp = PERM_CHAIN;
235 head = pairs[cur_pair_idx].in_reg->index;
236 start = cur_pair_idx;
240 /* assume worst case: all remaining pairs build a cycle or chain */
241 cycle->elems = XMALLOCNZ(const arch_register_t*, n_pairs_todo * 2);
242 cycle->n_elems = 2; /* initial number of elements is 2 */
243 cycle->elems[0] = pairs[start].in_reg;
244 cycle->elems[1] = pairs[start].out_reg;
245 cycle->type = cycle_tp;
246 cur_idx = pairs[start].out_reg->index;
249 /* check for cycle or end of a chain */
250 while (cur_idx != head) {
251 /* goto next register in cycle or chain */
252 int const cur_pair_idx = get_pairidx_for_in_regidx(pairs, n, cur_idx);
254 if (cur_pair_idx < 0)
257 cur_idx = pairs[cur_pair_idx].out_reg->index;
259 /* it's not the first element: insert it */
260 if (cur_idx != head) {
261 cycle->elems[idx++] = pairs[cur_pair_idx].out_reg;
264 /* we are there where we started -> CYCLE */
265 cycle->type = PERM_CYCLE;
269 /* mark all pairs having one in/out register with cycle in common as checked */
270 for (idx = 0; idx < cycle->n_elems; idx++) {
273 cur_pair_idx = get_pairidx_for_in_regidx(pairs, n, cycle->elems[idx]->index);
274 if (cur_pair_idx >= 0)
275 pairs[cur_pair_idx].checked = 1;
277 cur_pair_idx = get_pairidx_for_out_regidx(pairs, n, cycle->elems[idx]->index);
278 if (cur_pair_idx >= 0)
279 pairs[cur_pair_idx].checked = 1;
284 * Lowers a perm node. Resolves cycles and creates a bunch of
285 * copy and swap operations to permute registers.
286 * Note: The caller of this function has to make sure, that irn
289 * @param irn The perm node
290 * @param block The block the perm node belongs to
291 * @param env The lowerer environment
293 static void lower_perm_node(ir_node *irn, lower_env_t *env)
295 const arch_register_class_t *const reg_class = arch_get_irn_register(get_irn_n(irn, 0))->reg_class;
296 ir_node *const block = get_nodes_block(irn);
297 int const arity = get_irn_arity(irn);
298 reg_pair_t *const pairs = ALLOCAN(reg_pair_t, arity);
300 int do_copy = env->do_copy;
301 /* Get the schedule predecessor node to the perm.
302 * NOTE: This works with auto-magic. If we insert the new copy/exchange
303 * nodes after this node, everything should be ok. */
304 ir_node * sched_point = sched_prev(irn);
308 DBG((dbg, LEVEL_1, "perm: %+F, sched point is %+F\n", irn, sched_point));
309 assert(sched_point && "Perm is not scheduled or has no predecessor");
311 assert(arity == get_irn_n_edges(irn) && "perm's in and out numbers different");
313 /* build the list of register pairs (in, out) */
315 foreach_out_edge_safe(irn, edge) {
316 ir_node *const out = get_edge_src_irn(edge);
317 long const pn = get_Proj_proj(out);
318 ir_node *const in = get_irn_n(irn, pn);
319 arch_register_t const *const in_reg = arch_get_irn_register(in);
320 arch_register_t const *const out_reg = arch_get_irn_register(out);
323 if (in_reg == out_reg) {
324 DBG((dbg, LEVEL_1, "%+F removing equal perm register pair (%+F, %+F, %s)\n",
325 irn, in, out, out_reg->name));
332 pair->in_reg = in_reg;
333 pair->out_node = out;
334 pair->out_reg = out_reg;
338 DBG((dbg, LEVEL_1, "%+F has %d unresolved constraints\n", irn, n));
340 /* Set do_copy to 0 if it's on but we have no free register */
341 /* TODO check for free register */
346 /* check for cycles and chains */
347 while (get_n_unchecked_pairs(pairs, n) > 0) {
351 /* go to the first not-checked pair */
352 for (i = 0; pairs[i].checked; ++i) {}
353 get_perm_cycle(&cycle, pairs, n, i);
355 DB((dbg, LEVEL_1, "%+F: following %s created:\n ", irn, cycle.type == PERM_CHAIN ? "chain" : "cycle"));
356 for (j = 0; j < cycle.n_elems; j++) {
357 DB((dbg, LEVEL_1, " %s", cycle.elems[j]->name));
359 DB((dbg, LEVEL_1, "\n"));
361 if (cycle.type == PERM_CYCLE && arity == 2) {
362 /* We don't need to do anything if we have a Perm with two elements
363 * which represents a cycle, because those nodes already represent
367 /* TODO: - iff PERM_CYCLE && do_copy -> determine free temp reg and
368 * insert copy to/from it before/after the copy cascade (this
369 * reduces the cycle into a chain) */
371 /* build copy/swap nodes from back to front */
372 for (i = cycle.n_elems - 2; i >= 0; i--) {
373 ir_node *arg1 = get_node_for_in_register(pairs, n, cycle.elems[i]);
374 ir_node *arg2 = get_node_for_in_register(pairs, n, cycle.elems[i + 1]);
376 ir_node *res1 = get_node_for_out_register(pairs, n, cycle.elems[i]);
377 ir_node *res2 = get_node_for_out_register(pairs, n, cycle.elems[i + 1]);
378 /* If we have a cycle and don't copy: we need to create exchange
380 * NOTE: An exchange node is a perm node with 2 INs and 2 OUTs
381 * IN_1 = in node with register i
382 * IN_2 = in node with register i + 1
383 * OUT_1 = out node with register i + 1
384 * OUT_2 = out node with register i */
386 if (cycle.type == PERM_CYCLE && !do_copy) {
392 /* At this point we have to handle the following problem:
394 * If we have a cycle with more than two elements, then this
395 * could correspond to the following Perm node:
397 * +----+ +----+ +----+
398 * | r1 | | r2 | | r3 |
399 * +-+--+ +-+--+ +--+-+
402 * +-+--------+---------+-+
404 * +-+--------+---------+-+
407 * +-+--+ +-+--+ +--+-+
408 * |Proj| |Proj| |Proj|
409 * | r2 | | r3 | | r1 |
410 * +----+ +----+ +----+
412 * This node is about to be split up into two 2x Perm's for
413 * which we need 4 Proj's and the one additional Proj of the
414 * first Perm has to be one IN of the second. So in general
415 * we need to create one additional Proj for each "middle"
416 * Perm and set this to one in node of the successor Perm. */
418 DBG((dbg, LEVEL_1, "%+F creating exchange node (%+F, %s) and (%+F, %s) with\n",
419 irn, arg1, cycle.elems[i]->name, arg2, cycle.elems[i + 1]->name));
420 DBG((dbg, LEVEL_1, "%+F (%+F, %s) and (%+F, %s)\n",
421 irn, res1, cycle.elems[i]->name, res2, cycle.elems[i + 1]->name));
423 cpyxchg = be_new_Perm(reg_class, block, 2, in);
426 /* cycle is not done yet */
427 int pidx = get_pairidx_for_in_regidx(pairs, n, cycle.elems[i]->index);
429 /* create intermediate proj */
430 res1 = new_r_Proj(cpyxchg, get_irn_mode(res1), 0);
432 /* set as in for next Perm */
433 pairs[pidx].in_node = res1;
436 set_Proj_pred(res2, cpyxchg);
437 set_Proj_proj(res2, 0);
438 set_Proj_pred(res1, cpyxchg);
439 set_Proj_proj(res1, 1);
441 arch_set_irn_register(res2, cycle.elems[i + 1]);
442 arch_set_irn_register(res1, cycle.elems[i]);
444 DB((dbg, LEVEL_1, "replacing %+F with %+F, placed new node after %+F\n", irn, cpyxchg, sched_point));
446 DB((dbg, LEVEL_1, "%+F creating copy node (%+F, %s) -> (%+F, %s)\n",
447 irn, arg1, cycle.elems[i]->name, res2, cycle.elems[i + 1]->name));
449 cpyxchg = be_new_Copy(block, arg1);
450 arch_set_irn_register(cpyxchg, cycle.elems[i + 1]);
452 /* exchange copy node and proj */
453 exchange(res2, cpyxchg);
456 /* insert the copy/exchange node in schedule after the magic schedule node (see above) */
457 sched_add_after(sched_point, cpyxchg);
459 /* set the new scheduling point */
460 sched_point = cpyxchg;
464 free((void*)cycle.elems);
467 /* remove the perm from schedule */
476 static int has_irn_users(const ir_node *irn)
478 return get_irn_out_edge_first_kind(irn, EDGE_KIND_NORMAL) != 0;
481 static ir_node *find_copy(ir_node *irn, ir_node *op)
485 for (cur_node = irn;;) {
486 cur_node = sched_prev(cur_node);
487 if (! be_is_Copy(cur_node))
489 if (be_get_Copy_op(cur_node) == op && arch_irn_is(cur_node, dont_spill))
494 static void gen_assure_different_pattern(ir_node *irn, ir_node *other_different, constraint_env_t *env)
496 ir_nodehashmap_t *op_set;
498 const arch_register_class_t *cls;
500 op_copy_assoc_t *entry;
502 arch_register_req_t const *const req = arch_get_irn_register_req(other_different);
503 if (arch_register_req_is(req, ignore) ||
504 !mode_is_datab(get_irn_mode(other_different))) {
505 DB((dbg_constr, LEVEL_1, "ignore constraint for %+F because other_irn is ignore or not a datab node\n", irn));
509 op_set = &env->op_set;
510 block = get_nodes_block(irn);
513 /* Make a not spillable copy of the different node */
514 /* this is needed because the different irn could be */
515 /* in block far far away */
516 /* The copy is optimized later if not needed */
518 /* check if already exists such a copy in the schedule immediately before */
519 cpy = find_copy(skip_Proj(irn), other_different);
521 cpy = be_new_Copy(block, other_different);
522 arch_set_irn_flags(cpy, arch_irn_flags_dont_spill);
523 DB((dbg_constr, LEVEL_1, "created non-spillable %+F for value %+F\n", cpy, other_different));
525 DB((dbg_constr, LEVEL_1, "using already existing %+F for value %+F\n", cpy, other_different));
528 /* Add the Keep resp. CopyKeep and reroute the users */
529 /* of the other_different irn in case of CopyKeep. */
530 if (has_irn_users(other_different)) {
531 keep = be_new_CopyKeep_single(block, cpy, irn);
532 be_node_set_reg_class_in(keep, 1, cls);
538 keep = be_new_Keep(block, 2, in);
541 DB((dbg_constr, LEVEL_1, "created %+F(%+F, %+F)\n\n", keep, irn, cpy));
543 /* insert copy and keep into schedule */
544 assert(sched_is_scheduled(irn) && "need schedule to assure constraints");
545 if (! sched_is_scheduled(cpy))
546 sched_add_before(skip_Proj(irn), cpy);
547 sched_add_after(skip_Proj(irn), keep);
549 /* insert the other different and its copies into the map */
550 entry = ir_nodehashmap_get(op_copy_assoc_t, op_set, other_different);
552 entry = OALLOC(&env->obst, op_copy_assoc_t);
554 ir_nodeset_init(&entry->copies);
556 ir_nodehashmap_insert(op_set, other_different, entry);
560 ir_nodeset_insert(&entry->copies, cpy);
562 /* insert keep in case of CopyKeep */
563 if (be_is_CopyKeep(keep))
564 ir_nodeset_insert(&entry->copies, keep);
568 * Checks if node has a must_be_different constraint in output and adds a Keep
569 * then to assure the constraint.
571 * @param irn the node to check
572 * @param skipped_irn if irn is a Proj node, its predecessor, else irn
573 * @param env the constraint environment
575 static void assure_different_constraints(ir_node *irn, ir_node *skipped_irn, constraint_env_t *env)
577 const arch_register_req_t *req = arch_get_irn_register_req(irn);
579 if (arch_register_req_is(req, must_be_different)) {
580 const unsigned other = req->other_different;
583 if (arch_register_req_is(req, should_be_same)) {
584 const unsigned same = req->other_same;
586 if (is_po2(other) && is_po2(same)) {
587 int idx_other = ntz(other);
588 int idx_same = ntz(same);
591 * We can safely ignore a should_be_same x must_be_different y
592 * IFF both inputs are equal!
594 if (get_irn_n(skipped_irn, idx_other) == get_irn_n(skipped_irn, idx_same)) {
599 for (i = 0; 1U << i <= other; ++i) {
600 if (other & (1U << i)) {
601 ir_node *different_from = get_irn_n(skipped_irn, i);
602 gen_assure_different_pattern(irn, different_from, env);
609 * Calls the functions to assure register constraints.
611 * @param block The block to be checked
612 * @param walk_env The walker environment
614 static void assure_constraints_walker(ir_node *block, void *walk_env)
616 constraint_env_t *env = (constraint_env_t*)walk_env;
618 sched_foreach_reverse(block, irn) {
619 ir_mode *mode = get_irn_mode(irn);
621 if (mode == mode_T) {
622 foreach_out_edge(irn, edge) {
623 ir_node *proj = get_edge_src_irn(edge);
625 mode = get_irn_mode(proj);
626 if (mode_is_datab(mode))
627 assure_different_constraints(proj, irn, env);
629 } else if (mode_is_datab(mode)) {
630 assure_different_constraints(irn, irn, env);
636 * Melt all copykeeps pointing to the same node
637 * (or Projs of the same node), copying the same operand.
639 static void melt_copykeeps(constraint_env_t *cenv)
641 ir_nodehashmap_iterator_t map_iter;
642 ir_nodehashmap_entry_t map_entry;
645 foreach_ir_nodehashmap(&cenv->op_set, map_entry, map_iter) {
646 op_copy_assoc_t *entry = (op_copy_assoc_t*)map_entry.data;
649 ir_node **ck_arr, **melt_arr;
653 /* collect all copykeeps */
655 foreach_ir_nodeset(&entry->copies, cp, iter) {
656 if (be_is_CopyKeep(cp)) {
657 obstack_grow(&obst, &cp, sizeof(cp));
660 #ifdef KEEP_ALIVE_COPYKEEP_HACK
662 set_irn_mode(cp, mode_ANY);
665 #endif /* KEEP_ALIVE_COPYKEEP_HACK */
668 /* compare each copykeep with all other copykeeps */
669 ck_arr = (ir_node **)obstack_finish(&obst);
670 for (idx = 0; idx < num_ck; ++idx) {
671 ir_node *ref, *ref_mode_T;
676 ir_node *sched_pt = NULL;
680 ref_mode_T = skip_Proj(get_irn_n(ref, 1));
681 obstack_grow(&obst, &ref, sizeof(ref));
683 DB((dbg_constr, LEVEL_1, "Trying to melt %+F:\n", ref));
685 /* check for copykeeps pointing to the same mode_T node as the reference copykeep */
686 for (j = 0; j < num_ck; ++j) {
687 ir_node *cur_ck = ck_arr[j];
689 if (j != idx && cur_ck && skip_Proj(get_irn_n(cur_ck, 1)) == ref_mode_T) {
690 obstack_grow(&obst, &cur_ck, sizeof(cur_ck));
691 ir_nodeset_remove(&entry->copies, cur_ck);
692 DB((dbg_constr, LEVEL_1, "\t%+F\n", cur_ck));
695 sched_remove(cur_ck);
700 /* check, if we found some candidates for melting */
702 DB((dbg_constr, LEVEL_1, "\tno candidate found\n"));
706 ir_nodeset_remove(&entry->copies, ref);
709 melt_arr = (ir_node **)obstack_finish(&obst);
710 /* melt all found copykeeps */
711 NEW_ARR_A(ir_node *, new_ck_in, n_melt);
712 for (j = 0; j < n_melt; ++j) {
713 new_ck_in[j] = get_irn_n(melt_arr[j], 1);
715 /* now, we can kill the melted keep, except the */
716 /* ref one, we still need some information */
717 if (melt_arr[j] != ref)
718 kill_node(melt_arr[j]);
721 ir_node *const new_ck = be_new_CopyKeep(get_nodes_block(ref), be_get_CopyKeep_op(ref), n_melt, new_ck_in);
722 #ifdef KEEP_ALIVE_COPYKEEP_HACK
724 #endif /* KEEP_ALIVE_COPYKEEP_HACK */
726 /* set register class for all kept inputs */
727 for (j = 1; j <= n_melt; ++j)
728 be_node_set_reg_class_in(new_ck, j, entry->cls);
730 ir_nodeset_insert(&entry->copies, new_ck);
732 /* find scheduling point */
733 sched_pt = ref_mode_T;
735 /* just walk along the schedule until a non-Keep/CopyKeep node is found */
736 sched_pt = sched_next(sched_pt);
737 } while (be_is_Keep(sched_pt) || be_is_CopyKeep(sched_pt));
739 sched_add_before(sched_pt, new_ck);
740 DB((dbg_constr, LEVEL_1, "created %+F, scheduled before %+F\n", new_ck, sched_pt));
742 /* finally: kill the reference copykeep */
747 obstack_free(&obst, NULL);
751 void assure_constraints(ir_graph *irg)
753 constraint_env_t cenv;
754 ir_nodehashmap_iterator_t map_iter;
755 ir_nodehashmap_entry_t map_entry;
757 FIRM_DBG_REGISTER(dbg_constr, "firm.be.lower.constr");
760 ir_nodehashmap_init(&cenv.op_set);
761 obstack_init(&cenv.obst);
763 irg_block_walk_graph(irg, NULL, assure_constraints_walker, &cenv);
765 /* melt copykeeps, pointing to projs of */
766 /* the same mode_T node and keeping the */
768 melt_copykeeps(&cenv);
771 foreach_ir_nodehashmap(&cenv.op_set, map_entry, map_iter) {
772 op_copy_assoc_t *entry = (op_copy_assoc_t*)map_entry.data;
773 size_t n = ir_nodeset_size(&entry->copies);
774 ir_node **nodes = ALLOCAN(ir_node*, n);
775 be_ssa_construction_env_t senv;
777 /* put the node in an array */
778 DBG((dbg_constr, LEVEL_1, "introduce copies for %+F ", map_entry.node));
780 /* collect all copies */
782 foreach_ir_nodeset(&entry->copies, cp, iter) {
784 DB((dbg_constr, LEVEL_1, ", %+F ", cp));
787 DB((dbg_constr, LEVEL_1, "\n"));
789 /* introduce the copies for the operand and its copies */
790 be_ssa_construction_init(&senv, irg);
791 be_ssa_construction_add_copy(&senv, map_entry.node);
792 be_ssa_construction_add_copies(&senv, nodes, n);
793 be_ssa_construction_fix_users(&senv, map_entry.node);
794 be_ssa_construction_destroy(&senv);
796 /* Could be that not all CopyKeeps are really needed, */
797 /* so we transform unnecessary ones into Keeps. */
798 foreach_ir_nodeset(&entry->copies, cp, iter) {
799 if (be_is_CopyKeep(cp) && get_irn_n_edges(cp) < 1) {
800 int n = get_irn_arity(cp);
803 keep = be_new_Keep(get_nodes_block(cp), n, get_irn_in(cp) + 1);
804 sched_add_before(cp, keep);
806 /* Set all ins (including the block) of the CopyKeep BAD to keep the verifier happy. */
812 ir_nodeset_destroy(&entry->copies);
815 ir_nodehashmap_destroy(&cenv.op_set);
816 obstack_free(&cenv.obst, NULL);
817 be_invalidate_live_sets(irg);
821 * Push nodes that do not need to be permed through the Perm.
822 * This is commonly a reload cascade at block ends.
823 * @note This routine needs interference.
824 * @note Probably, we can implement it a little more efficient.
825 * Especially searching the frontier lazily might be better.
827 * @param perm The perm
828 * @param env The lowerer environment
830 * @return 1, if there is something left to perm over.
831 * 0, if removed the complete perm.
833 static int push_through_perm(ir_node *perm)
835 ir_graph *irg = get_irn_irg(perm);
836 ir_node *bl = get_nodes_block(perm);
837 int arity = get_irn_arity(perm);
840 bitset_t *moved = bitset_alloca(arity);
843 ir_node *frontier = bl;
845 be_lv_t *lv = be_get_irg_liveness(irg);
847 /* get some Proj and find out the register class of that Proj. */
848 ir_node *one_proj = get_edge_src_irn(get_irn_out_edge_first_kind(perm, EDGE_KIND_NORMAL));
849 const arch_register_class_t *cls = arch_get_irn_reg_class(one_proj);
850 assert(is_Proj(one_proj));
852 DB((dbg_permmove, LEVEL_1, "perm move %+F irg %+F\n", perm, irg));
854 /* Find the point in the schedule after which the
855 * potentially movable nodes must be defined.
856 * A Perm will only be pushed up to first instruction
857 * which lets an operand of itself die.
858 * If we would allow to move the Perm above this instruction,
859 * the former dead operand would be live now at the point of
860 * the Perm, increasing the register pressure by one.
862 sched_foreach_reverse_from(sched_prev(perm), irn) {
863 be_foreach_use(irn, cls, in_req_, op, op_req_,
864 if (!be_values_interfere(lv, op, one_proj)) {
872 DB((dbg_permmove, LEVEL_2, "\tfrontier: %+F\n", frontier));
876 ir_node *const node = sched_prev(perm);
877 if (node == frontier)
880 const arch_register_req_t *req;
882 ir_node *proj = NULL;
884 /* search if node is a INPUT of Perm */
885 foreach_out_edge(perm, edge) {
886 ir_node *out = get_edge_src_irn(edge);
887 int pn = get_Proj_proj(out);
888 ir_node *in = get_irn_n(perm, pn);
895 /* it wasn't an input to the perm, we can't do anything more */
898 if (arch_irn_is(node, modify_flags))
900 req = arch_get_irn_register_req(node);
901 if (req->type != arch_register_req_type_normal)
903 for (i = get_irn_arity(node) - 1; i >= 0; --i) {
904 ir_node *opop = get_irn_n(node, i);
905 if (arch_irn_consider_in_reg_alloc(cls, opop)) {
912 DBG((dbg_permmove, LEVEL_2, "\tmoving %+F after %+F, killing %+F\n", node, perm, proj));
914 /* move the movable node in front of the Perm */
916 sched_add_after(perm, node);
918 /* give it the proj's register */
919 arch_set_irn_register(node, arch_get_irn_register(proj));
921 /* reroute all users of the proj to the moved node. */
922 exchange(proj, node);
924 bitset_set(moved, input);
928 /* well, we could not push anything through the perm */
932 new_size = arity - n_moved;
939 map = ALLOCAN(int, new_size);
940 proj_map = ALLOCAN(int, arity);
941 memset(proj_map, -1, sizeof(proj_map[0]));
943 for (i = 0; i < arity; ++i) {
944 if (bitset_is_set(moved, i))
950 assert(n == new_size);
951 foreach_out_edge(perm, edge) {
952 ir_node *proj = get_edge_src_irn(edge);
953 int pn = get_Proj_proj(proj);
956 set_Proj_proj(proj, pn);
959 be_Perm_reduce(perm, new_size, map);
964 * Calls the corresponding lowering function for the node.
966 * @param irn The node to be checked for lowering
967 * @param walk_env The walker environment
969 static void lower_nodes_after_ra_walker(ir_node *irn, void *walk_env)
973 if (!be_is_Perm(irn))
976 perm_stayed = push_through_perm(irn);
978 lower_perm_node(irn, (lower_env_t*)walk_env);
981 void lower_nodes_after_ra(ir_graph *irg, int do_copy)
985 FIRM_DBG_REGISTER(dbg, "firm.be.lower");
986 FIRM_DBG_REGISTER(dbg_permmove, "firm.be.lower.permmove");
989 env.do_copy = do_copy;
991 /* we will need interference */
992 be_assure_live_chk(irg);
994 irg_walk_graph(irg, NULL, lower_nodes_after_ra_walker, &env);