2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief New approach to allocation and copy coalescing
23 * @author Matthias Braun
27 * ... WE NEED A NAME FOR THIS ...
29 * Only a proof of concept at this moment...
31 * The idea is to allocate registers in 2 passes:
32 * 1. A first pass to determine "preferred" registers for live-ranges. This
33 * calculates for each register and each live-range a value indicating
34 * the usefulness. (You can roughly think of the value as the negative
35 * costs needed for copies when the value is in the specific registers...)
37 * 2. Walk blocks and assigns registers in a greedy fashion. Preferring
38 * registers with high preferences. When register constraints are not met,
39 * add copies and split live-ranges.
42 * - make use of free registers in the permutate_values code
43 * - We have to pessimistically construct Phi_0s when not all predecessors
44 * of a block are known.
45 * - Phi color assignment should give bonus points towards registers already
46 * assigned at predecessors.
47 * - think about a smarter sequence of visiting the blocks. Sorted by
48 * execfreq might be good, or looptree from inner to outermost loops going
49 * over blocks in a reverse postorder
50 * - propagate preferences through Phis
61 #include "iredges_t.h"
62 #include "irgraph_t.h"
68 #include "bechordal_t.h"
77 #include "bespillutil.h"
80 #include "bipartite.h"
81 #include "hungarian.h"
83 #define USE_FACTOR 1.0f
84 #define DEF_FACTOR 1.0f
85 #define NEIGHBOR_FACTOR 0.2f
86 #define AFF_SHOULD_BE_SAME 1.0f
89 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
91 static struct obstack obst;
92 static be_irg_t *birg;
94 static const arch_register_class_t *cls;
95 static const arch_register_req_t *default_cls_req;
97 static const ir_exec_freq *execfreqs;
98 static unsigned n_regs;
99 static bitset_t *ignore_regs;
101 /** info about the current assignment for a register */
102 struct assignment_t {
103 ir_node *value; /**< currently assigned value */
105 typedef struct assignment_t assignment_t;
107 /** currently active assignments (while processing a basic block) */
108 static assignment_t *assignments;
111 * allocation information: last_uses, register preferences
112 * the information is per firm-node.
114 struct allocation_info_t {
115 unsigned last_uses; /**< bitset indicating last uses (input pos) */
116 ir_node *current_value; /**< copy of the value that should be used */
117 ir_node *original_value; /**< for copies point to original value */
118 float prefs[0]; /**< register preferences */
120 typedef struct allocation_info_t allocation_info_t;
122 /** helper datastructure used when sorting register preferences */
127 typedef struct reg_pref_t reg_pref_t;
129 /** per basic-block information */
130 struct block_info_t {
131 bool processed; /**< indicate wether block is processed */
132 assignment_t assignments[0]; /**< register assignments at end of block */
134 typedef struct block_info_t block_info_t;
137 * Get the allocation info for a node.
138 * The info is allocated on the first visit of a node.
140 static allocation_info_t *get_allocation_info(ir_node *node)
142 allocation_info_t *info;
143 if (!irn_visited_else_mark(node)) {
144 size_t size = sizeof(info[0]) + n_regs * sizeof(info->prefs[0]);
145 info = obstack_alloc(&obst, size);
146 memset(info, 0, size);
147 info->current_value = node;
148 info->original_value = node;
149 set_irn_link(node, info);
151 info = get_irn_link(node);
158 * Get allocation information for a basic block
160 static block_info_t *get_block_info(ir_node *block)
164 assert(is_Block(block));
165 if (!irn_visited_else_mark(block)) {
166 size_t size = sizeof(info[0]) + n_regs * sizeof(info->assignments[0]);
167 info = obstack_alloc(&obst, size);
168 memset(info, 0, size);
169 set_irn_link(block, info);
171 info = get_irn_link(block);
178 * Get default register requirement for the current register class
180 static const arch_register_req_t *get_default_req_current_cls(void)
182 if (default_cls_req == NULL) {
183 struct obstack *obst = get_irg_obstack(irg);
184 arch_register_req_t *req = obstack_alloc(obst, sizeof(*req));
185 memset(req, 0, sizeof(*req));
187 req->type = arch_register_req_type_normal;
190 default_cls_req = req;
192 return default_cls_req;
196 * Link the allocation info of a node to a copy.
197 * Afterwards, both nodes uses the same allocation info.
198 * Copy must not have an allocation info assigned yet.
200 * @param copy the node that gets the allocation info assigned
201 * @param value the original node
203 static void mark_as_copy_of(ir_node *copy, ir_node *value)
206 allocation_info_t *info = get_allocation_info(value);
207 allocation_info_t *copy_info = get_allocation_info(copy);
209 /* find original value */
210 original = info->original_value;
211 if (original != value) {
212 info = get_allocation_info(original);
215 assert(info->original_value == original);
216 info->current_value = copy;
218 /* the copy should not be linked to something else yet */
219 assert(copy_info->original_value == copy);
220 /* copy over allocation preferences */
221 memcpy(copy_info->prefs, info->prefs, n_regs * sizeof(copy_info->prefs[0]));
222 copy_info->original_value = original;
226 * Calculate the penalties for every register on a node and its live neighbors.
228 * @param live_nodes the set of live nodes at the current position, may be NULL
229 * @param penalty the penalty to subtract from
230 * @param limited a raw bitset containing the limited set for the node
231 * @param node the node
233 static void give_penalties_for_limits(const ir_nodeset_t *live_nodes,
234 float penalty, const unsigned* limited,
237 ir_nodeset_iterator_t iter;
239 allocation_info_t *info = get_allocation_info(node);
242 /* give penalty for all forbidden regs */
243 for (r = 0; r < n_regs; ++r) {
244 if (rbitset_is_set(limited, r))
247 info->prefs[r] -= penalty;
250 /* all other live values should get a penalty for allowed regs */
251 if (live_nodes == NULL)
254 /* TODO: reduce penalty if there are multiple allowed registers... */
255 penalty *= NEIGHBOR_FACTOR;
256 foreach_ir_nodeset(live_nodes, neighbor, iter) {
257 allocation_info_t *neighbor_info;
259 /* TODO: if op is used on multiple inputs we might not do a
261 if (neighbor == node)
264 neighbor_info = get_allocation_info(neighbor);
265 for (r = 0; r < n_regs; ++r) {
266 if (!rbitset_is_set(limited, r))
269 neighbor_info->prefs[r] -= penalty;
275 * Calculate the preferences of a definition for the current register class.
276 * If the definition uses a limited set of registers, reduce the preferences
277 * for the limited register on the node and its neighbors.
279 * @param live_nodes the set of live nodes at the current node
280 * @param weight the weight
281 * @param node the current node
283 static void check_defs(const ir_nodeset_t *live_nodes, float weight,
286 const arch_register_req_t *req;
288 if (get_irn_mode(node) == mode_T) {
289 const ir_edge_t *edge;
290 foreach_out_edge(node, edge) {
291 ir_node *proj = get_edge_src_irn(edge);
292 check_defs(live_nodes, weight, proj);
297 if (!arch_irn_consider_in_reg_alloc(cls, node))
300 req = arch_get_register_req_out(node);
301 if (req->type & arch_register_req_type_limited) {
302 const unsigned *limited = req->limited;
303 float penalty = weight * DEF_FACTOR;
304 give_penalties_for_limits(live_nodes, penalty, limited, node);
307 if (req->type & arch_register_req_type_should_be_same) {
308 ir_node *insn = skip_Proj(node);
309 allocation_info_t *info = get_allocation_info(node);
310 int arity = get_irn_arity(insn);
313 float factor = 1.0f / rbitset_popcnt(&req->other_same, arity);
314 for (i = 0; i < arity; ++i) {
317 allocation_info_t *op_info;
319 if (!rbitset_is_set(&req->other_same, i))
322 op = get_irn_n(insn, i);
323 op_info = get_allocation_info(op);
324 for (r = 0; r < n_regs; ++r) {
325 if (bitset_is_set(ignore_regs, r))
327 op_info->prefs[r] += info->prefs[r] * factor;
334 * Walker: Runs an a block calculates the preferences for any
335 * node and every register from the considered register class.
337 static void analyze_block(ir_node *block, void *data)
339 float weight = get_block_execfreq(execfreqs, block);
340 ir_nodeset_t live_nodes;
344 ir_nodeset_init(&live_nodes);
345 be_liveness_end_of_block(lv, cls, block, &live_nodes);
347 sched_foreach_reverse(block, node) {
348 allocation_info_t *info;
355 /* TODO give/take penalties for should_be_same/different) */
356 check_defs(&live_nodes, weight, node);
359 arity = get_irn_arity(node);
361 /* the allocation info node currently only uses 1 unsigned value
362 to mark last used inputs. So we will fail for a node with more than
364 if (arity >= (int) sizeof(unsigned) * 8) {
365 panic("Node with more than %d inputs not supported yet",
366 (int) sizeof(unsigned) * 8);
369 info = get_allocation_info(node);
370 for (i = 0; i < arity; ++i) {
371 ir_node *op = get_irn_n(node, i);
372 if (!arch_irn_consider_in_reg_alloc(cls, op))
375 /* last usage of a value? */
376 if (!ir_nodeset_contains(&live_nodes, op)) {
377 rbitset_set(&info->last_uses, i);
381 be_liveness_transfer(cls, node, &live_nodes);
383 /* update weights based on usage constraints */
384 for (i = 0; i < arity; ++i) {
385 const arch_register_req_t *req;
386 const unsigned *limited;
387 ir_node *op = get_irn_n(node, i);
389 if (!arch_irn_consider_in_reg_alloc(cls, op))
392 req = arch_get_register_req(node, i);
393 if (!(req->type & arch_register_req_type_limited))
396 /* TODO: give penalties to neighbors for precolored nodes! */
398 limited = req->limited;
399 give_penalties_for_limits(&live_nodes, weight * USE_FACTOR, limited,
404 ir_nodeset_destroy(&live_nodes);
408 * Assign register reg to the given node.
410 * @param node the node
411 * @param reg the register
413 static void use_reg(ir_node *node, const arch_register_t *reg)
415 unsigned r = arch_register_get_index(reg);
416 assignment_t *assignment = &assignments[r];
418 //assert(assignment->value == NULL);
419 assignment->value = node;
421 arch_set_irn_register(node, reg);
425 * Compare two register preferences in decreasing order.
427 static int compare_reg_pref(const void *e1, const void *e2)
429 const reg_pref_t *rp1 = (const reg_pref_t*) e1;
430 const reg_pref_t *rp2 = (const reg_pref_t*) e2;
431 if (rp1->pref < rp2->pref)
433 if (rp1->pref > rp2->pref)
438 static void fill_sort_candidates(reg_pref_t *regprefs,
439 const allocation_info_t *info)
443 for (r = 0; r < n_regs; ++r) {
444 float pref = info->prefs[r];
445 if (bitset_is_set(ignore_regs, r)) {
449 regprefs[r].pref = pref;
451 /* TODO: use a stable sort here to avoid unnecessary register jumping */
452 qsort(regprefs, n_regs, sizeof(regprefs[0]), compare_reg_pref);
456 * Determine and assign a register for node @p node
458 static void assign_reg(const ir_node *block, ir_node *node)
460 const arch_register_t *reg;
461 allocation_info_t *info;
462 const arch_register_req_t *req;
463 reg_pref_t *reg_prefs;
467 assert(arch_irn_consider_in_reg_alloc(cls, node));
469 /* preassigned register? */
470 reg = arch_get_irn_register(node);
472 DB((dbg, LEVEL_2, "Preassignment %+F -> %s\n", node, reg->name));
477 /* give should_be_same boni */
478 info = get_allocation_info(node);
479 req = arch_get_register_req_out(node);
481 in_node = skip_Proj(node);
482 if (req->type & arch_register_req_type_should_be_same) {
483 float weight = get_block_execfreq(execfreqs, block);
484 int arity = get_irn_arity(in_node);
487 assert(arity <= (int) sizeof(req->other_same) * 8);
488 for (i = 0; i < arity; ++i) {
490 const arch_register_t *reg;
492 if (!rbitset_is_set(&req->other_same, i))
495 in = get_irn_n(in_node, i);
496 reg = arch_get_irn_register(in);
498 r = arch_register_get_index(reg);
499 if (bitset_is_set(ignore_regs, r))
501 info->prefs[r] += weight * AFF_SHOULD_BE_SAME;
505 DB((dbg, LEVEL_2, "Candidates for %+F:", node));
506 reg_prefs = alloca(n_regs * sizeof(reg_prefs[0]));
507 fill_sort_candidates(reg_prefs, info);
508 for (i = 0; i < n_regs; ++i) {
509 unsigned num = reg_prefs[i].num;
510 const arch_register_t *reg = arch_register_for_index(cls, num);
511 DB((dbg, LEVEL_2, " %s(%f)", reg->name, reg_prefs[i].pref));
513 DB((dbg, LEVEL_2, "\n"));
515 for (i = 0; i < n_regs; ++i) {
516 unsigned r = reg_prefs[i].num;
517 /* ignores are last and we should have at least 1 non-ignore left */
518 assert(!bitset_is_set(ignore_regs, r));
520 TODO: It might be better to copy the value occupying the register
521 around here instead of trying the next one, find out when... */
522 if (assignments[r].value != NULL)
524 reg = arch_register_for_index(cls, r);
525 DB((dbg, LEVEL_2, "Assign %+F -> %s\n", node, reg->name));
531 static void free_reg_of_value(ir_node *node)
533 assignment_t *assignment;
534 const arch_register_t *reg;
537 if (!arch_irn_consider_in_reg_alloc(cls, node))
540 reg = arch_get_irn_register(node);
541 r = arch_register_get_index(reg);
542 assignment = &assignments[r];
543 assert(assignment->value == node);
544 assignment->value = NULL;
548 * Add an permutation in front of a node and change the assignments
549 * due to this permutation.
551 * To understand this imagine a permutation like this:
561 * First we count how many destinations a single value has. At the same time
562 * we can be sure that each destination register has at most 1 source register
563 * (it can have 0 which means we don't care what value is in it).
564 * We ignore all fullfilled permuations (like 7->7)
565 * In a first pass we create as much copy instructions as possible as they
566 * are generally cheaper than exchanges. We do this by counting into how many
567 * destinations a register has to be copied (in the example it's 2 for register
568 * 3, or 1 for the registers 1,2,4 and 7).
569 * We can then create a copy into every destination register when the usecount
570 * of that register is 0 (= noone else needs the value in the register).
572 * After this step we should have cycles left. We implement a cyclic permutation
573 * of n registers with n-1 transpositions.
575 * @param live_nodes the set of live nodes, updated due to live range split
576 * @param before the node before we add the permutation
577 * @param permutation the permutation array indices are the destination
578 * registers, the values in the array are the source
581 static void permutate_values(ir_nodeset_t *live_nodes, ir_node *before,
582 unsigned *permutation)
584 ir_node **ins = ALLOCANZ(ir_node*, n_regs);
585 unsigned *n_used = ALLOCANZ(unsigned, n_regs);
589 /* create a list of permutations. Leave out fix points. */
590 for (r = 0; r < n_regs; ++r) {
591 unsigned old_reg = permutation[r];
592 assignment_t *assignment;
595 /* no need to do anything for a fixpoint */
599 assignment = &assignments[old_reg];
600 value = assignment->value;
602 /* nothing to do here, reg is not live. Mark it as fixpoint
603 * so we ignore it in the next steps */
608 ins[old_reg] = value;
610 //free_reg_of_value(value);
612 /* free occupation infos, we'll add the values back later */
613 if (live_nodes != NULL) {
614 ir_nodeset_remove(live_nodes, value);
618 block = get_nodes_block(before);
620 /* step1: create copies where immediately possible */
621 for (r = 0; r < n_regs; /* empty */) {
624 const arch_register_t *reg;
625 unsigned old_r = permutation[r];
627 /* - no need to do anything for fixed points.
628 - we can't copy if the value in the dest reg is still needed */
629 if (old_r == r || n_used[r] > 0) {
636 copy = be_new_Copy(cls, block, src);
637 sched_add_before(before, copy);
638 reg = arch_register_for_index(cls, r);
639 DB((dbg, LEVEL_2, "Copy %+F (from %+F, before %+F) -> %s\n",
640 copy, src, before, reg->name));
641 mark_as_copy_of(copy, src);
644 if (live_nodes != NULL) {
645 ir_nodeset_insert(live_nodes, copy);
648 /* old register has 1 user less, permutation is resolved */
649 assert(arch_register_get_index(arch_get_irn_register(src)) == old_r);
650 assert(n_used[old_r] > 0);
654 /* advance or jump back (if this copy enabled another copy) */
655 if (old_r < r && n_used[old_r] == 0) {
662 /* at this point we only have "cycles" left which we have to resolve with
664 * TODO: if we have free registers left, then we should really use copy
665 * instructions for any cycle longer than 2 registers...
666 * (this is probably architecture dependent, there might be archs where
667 * copies are preferable even for 2-cycles) */
669 /* create perms with the rest */
670 for (r = 0; r < n_regs; /* empty */) {
671 const arch_register_t *reg;
672 unsigned old_r = permutation[r];
684 /* we shouldn't have copies from 1 value to multiple destinations left*/
685 assert(n_used[old_r] == 1);
687 /* exchange old_r and r2; after that old_r is a fixed point */
688 r2 = permutation[old_r];
692 perm = be_new_Perm(cls, block, 2, in);
693 sched_add_before(before, perm);
694 DB((dbg, LEVEL_2, "Perm %+F (perm %+F,%+F, before %+F)\n",
695 perm, in[0], in[1], before));
697 proj0 = new_r_Proj(block, perm, get_irn_mode(in[0]), 0);
698 mark_as_copy_of(proj0, in[0]);
699 reg = arch_register_for_index(cls, old_r);
701 if (live_nodes != NULL) {
702 ir_nodeset_insert(live_nodes, proj0);
705 proj1 = new_r_Proj(block, perm, get_irn_mode(in[1]), 1);
707 /* 1 value is now in the correct register */
708 permutation[old_r] = old_r;
709 /* the source of r changed to r2 */
712 reg = arch_register_for_index(cls, r2);
714 /* if we have reached a fixpoint update data structures */
715 mark_as_copy_of(proj1, in[1]);
717 if (live_nodes != NULL) {
718 ir_nodeset_insert(live_nodes, proj1);
721 arch_set_irn_register(proj1, reg);
726 /* now we should only have fixpoints left */
727 for (r = 0; r < n_regs; ++r) {
728 assert(permutation[r] == r);
734 * Free regs for values last used.
736 * @param live_nodes set of live nodes, will be updated
737 * @param node the node to consider
739 static void free_last_uses(ir_nodeset_t *live_nodes, ir_node *node)
741 allocation_info_t *info = get_allocation_info(node);
742 const unsigned *last_uses = &info->last_uses;
743 int arity = get_irn_arity(node);
745 for (i = 0; i < arity; ++i) {
748 /* check if one operand is the last use */
749 if (!rbitset_is_set(last_uses, i))
752 op = get_irn_n(node, i);
753 free_reg_of_value(op);
754 ir_nodeset_remove(live_nodes, op);
759 * Create a bitset of registers occupied with value living through an
762 static void determine_live_through_regs(unsigned *bitset, ir_node *node)
764 const allocation_info_t *info = get_allocation_info(node);
769 /* mark all used registers as potentially live-through */
770 for (r = 0; r < n_regs; ++r) {
771 const assignment_t *assignment = &assignments[r];
772 if (assignment->value == NULL)
775 rbitset_set(bitset, r);
778 /* remove registers of value dying at the instruction */
779 arity = get_irn_arity(node);
780 for (i = 0; i < arity; ++i) {
782 const arch_register_t *reg;
784 if (!rbitset_is_set(&info->last_uses, i))
787 op = get_irn_n(node, i);
788 reg = arch_get_irn_register(op);
789 rbitset_clear(bitset, arch_register_get_index(reg));
794 * Enforce constraints at a node by live range splits.
796 * @param live_nodes the set of live nodes, might be changed
797 * @param node the current node
799 static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node)
801 int arity = get_irn_arity(node);
803 hungarian_problem_t *bp;
805 unsigned *assignment;
807 /* see if any use constraints are not met */
809 for (i = 0; i < arity; ++i) {
810 ir_node *op = get_irn_n(node, i);
811 const arch_register_t *reg;
812 const arch_register_req_t *req;
813 const unsigned *limited;
816 if (!arch_irn_consider_in_reg_alloc(cls, op))
819 /* are there any limitations for the i'th operand? */
820 req = arch_get_register_req(node, i);
821 if (!(req->type & arch_register_req_type_limited))
824 limited = req->limited;
825 reg = arch_get_irn_register(op);
826 r = arch_register_get_index(reg);
827 if (!rbitset_is_set(limited, r)) {
828 /* found an assignement outside the limited set */
834 /* construct a list of register occupied by live-through values */
835 unsigned *live_through_regs = NULL;
836 unsigned *output_regs = NULL;
838 /* is any of the live-throughs using a constrainted output register? */
839 if (get_irn_mode(node) == mode_T) {
840 const ir_edge_t *edge;
842 foreach_out_edge(node, edge) {
843 ir_node *proj = get_edge_src_irn(edge);
844 const arch_register_req_t *req;
846 if (!arch_irn_consider_in_reg_alloc(cls, proj))
849 req = arch_get_register_req_out(proj);
850 if (!(req->type & arch_register_req_type_limited))
853 if (live_through_regs == NULL) {
854 rbitset_alloca(live_through_regs, n_regs);
855 determine_live_through_regs(live_through_regs, node);
857 rbitset_alloca(output_regs, n_regs);
860 rbitset_or(output_regs, req->limited, n_regs);
861 if (rbitsets_have_common(req->limited, live_through_regs, n_regs)) {
866 if (arch_irn_consider_in_reg_alloc(cls, node)) {
867 const arch_register_req_t *req = arch_get_register_req_out(node);
868 if (req->type & arch_register_req_type_limited) {
869 rbitset_alloca(live_through_regs, n_regs);
870 determine_live_through_regs(live_through_regs, node);
871 if (rbitsets_have_common(req->limited, live_through_regs, n_regs)) {
874 rbitset_alloca(output_regs, n_regs);
875 rbitset_or(output_regs, req->limited, n_regs);
884 /* create these arrays if we haven't yet */
885 if (output_regs == NULL) {
886 if (live_through_regs == NULL) {
887 rbitset_alloca(live_through_regs, n_regs);
889 rbitset_alloca(output_regs, n_regs);
892 /* at this point we have to construct a bipartite matching problem to see
893 which values should go to which registers */
894 bp = hungarian_new(n_regs, n_regs, HUNGARIAN_MATCH_PERFECT);
896 /* add all combinations, then remove not allowed ones */
897 for (l = 0; l < n_regs; ++l) {
898 if (bitset_is_set(ignore_regs, l)) {
899 hungarian_add(bp, l, l, 1);
903 for (r = 0; r < n_regs; ++r) {
904 if (bitset_is_set(ignore_regs, r))
906 /* livethrough values may not use constrainted output registers */
907 if (rbitset_is_set(live_through_regs, l)
908 && rbitset_is_set(output_regs, r))
911 hungarian_add(bp, r, l, l == r ? 9 : 8);
915 for (i = 0; i < arity; ++i) {
916 ir_node *op = get_irn_n(node, i);
917 const arch_register_t *reg;
918 const arch_register_req_t *req;
919 const unsigned *limited;
920 unsigned current_reg;
922 if (!arch_irn_consider_in_reg_alloc(cls, op))
925 req = arch_get_register_req(node, i);
926 if (!(req->type & arch_register_req_type_limited))
929 limited = req->limited;
930 reg = arch_get_irn_register(op);
931 current_reg = arch_register_get_index(reg);
932 for (r = 0; r < n_regs; ++r) {
933 if (rbitset_is_set(limited, r))
935 hungarian_remv(bp, r, current_reg);
939 hungarian_print_costmatrix(bp, 1);
940 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
942 assignment = ALLOCAN(unsigned, n_regs);
943 res = hungarian_solve(bp, (int*) assignment, &dummy, 0);
947 printf("Swap result:");
948 for (i = 0; i < (int) n_regs; ++i) {
949 printf(" %d", assignment[i]);
956 permutate_values(live_nodes, node, assignment);
959 /** test wether a node @p n is a copy of the value of node @p of */
960 static bool is_copy_of(ir_node *value, ir_node *test_value)
962 allocation_info_t *test_info;
963 allocation_info_t *info;
965 if (value == test_value)
968 info = get_allocation_info(value);
969 test_info = get_allocation_info(test_value);
970 return test_info->original_value == info->original_value;
974 * find a value in the end-assignment of a basic block
975 * @returns the index into the assignment array if found
978 static int find_value_in_block_info(block_info_t *info, ir_node *value)
981 assignment_t *assignments = info->assignments;
982 for (r = 0; r < n_regs; ++r) {
983 const assignment_t *assignment = &assignments[r];
984 ir_node *a_value = assignment->value;
988 if (is_copy_of(a_value, value))
996 * Create the necessary permutations at the end of a basic block to fullfill
997 * the register assignment for phi-nodes in the next block
999 static void add_phi_permutations(ir_node *block, int p)
1002 unsigned *permutation;
1003 assignment_t *old_assignments;
1004 bool need_permutation;
1006 ir_node *pred = get_Block_cfgpred_block(block, p);
1008 block_info_t *pred_info = get_block_info(pred);
1010 /* predecessor not processed yet? nothing to do */
1011 if (!pred_info->processed)
1014 permutation = ALLOCAN(unsigned, n_regs);
1015 for (r = 0; r < n_regs; ++r) {
1019 /* check phi nodes */
1020 need_permutation = false;
1021 node = sched_first(block);
1022 for ( ; is_Phi(node); node = sched_next(node)) {
1023 const arch_register_t *reg;
1028 if (!arch_irn_consider_in_reg_alloc(cls, node))
1031 op = get_Phi_pred(node, p);
1032 a = find_value_in_block_info(pred_info, op);
1035 reg = arch_get_irn_register(node);
1036 regn = arch_register_get_index(reg);
1038 permutation[regn] = a;
1039 need_permutation = true;
1043 if (need_permutation) {
1044 /* permutate values at end of predecessor */
1045 old_assignments = assignments;
1046 assignments = pred_info->assignments;
1047 permutate_values(NULL, be_get_end_of_block_insertion_point(pred),
1049 assignments = old_assignments;
1052 /* change phi nodes to use the copied values */
1053 node = sched_first(block);
1054 for ( ; is_Phi(node); node = sched_next(node)) {
1058 if (!arch_irn_consider_in_reg_alloc(cls, node))
1061 /* we have permutated all values into the correct registers so we can
1062 simply query which value occupies the phis register in the
1064 a = arch_register_get_index(arch_get_irn_register(node));
1065 op = pred_info->assignments[a].value;
1066 set_Phi_pred(node, p, op);
1070 static void handle_phi_prefs(ir_node *phi)
1073 int arity = get_irn_arity(phi);
1074 ir_node *block = get_nodes_block(phi);
1075 allocation_info_t *info = get_allocation_info(phi);
1077 for (i = 0; i < arity; ++i) {
1078 ir_node *op = get_irn_n(phi, i);
1079 const arch_register_t *reg = arch_get_irn_register(op);
1087 /* give bonus for already assigned register */
1088 pred = get_Block_cfgpred_block(block, i);
1089 weight = get_block_execfreq(execfreqs, pred);
1090 r = arch_register_get_index(reg);
1091 info->prefs[r] += weight * AFF_PHI;
1096 * change inputs of a node to the current value (copies/perms)
1098 static void rewire_inputs(ir_node *node)
1101 int arity = get_irn_arity(node);
1103 for (i = 0; i < arity; ++i) {
1104 ir_node *op = get_irn_n(node, i);
1105 allocation_info_t *info;
1107 if (!arch_irn_consider_in_reg_alloc(cls, op))
1110 info = get_allocation_info(op);
1111 if (info->current_value != op) {
1112 set_irn_n(node, i, info->current_value);
1118 * Walker: assign registers to all nodes of a block that
1119 * need registers from the currently considered register class.
1121 static void allocate_coalesce_block(ir_node *block, void *data)
1124 ir_nodeset_t live_nodes;
1125 ir_nodeset_iterator_t iter;
1126 ir_node *node, *start;
1128 block_info_t *block_info;
1129 block_info_t **pred_block_infos;
1133 DB((dbg, LEVEL_2, "* Block %+F\n", block));
1135 /* clear assignments */
1136 block_info = get_block_info(block);
1137 assignments = block_info->assignments;
1139 ir_nodeset_init(&live_nodes);
1141 /* gather regalloc infos of predecessor blocks */
1142 n_preds = get_Block_n_cfgpreds(block);
1143 pred_block_infos = ALLOCAN(block_info_t*, n_preds);
1144 for (i = 0; i < n_preds; ++i) {
1145 ir_node *pred = get_Block_cfgpred_block(block, i);
1146 block_info_t *pred_info = get_block_info(pred);
1147 pred_block_infos[i] = pred_info;
1150 phi_ins = ALLOCAN(ir_node*, n_preds);
1152 /* collect live-in nodes and preassigned values */
1153 be_lv_foreach(lv, block, be_lv_state_in, i) {
1154 const arch_register_t *reg;
1157 node = be_lv_get_irn(lv, block, i);
1158 if (!arch_irn_consider_in_reg_alloc(cls, node))
1161 /* check all predecessors for this value, if it is not everywhere the
1162 same or unknown then we have to construct a phi
1163 (we collect the potential phi inputs here) */
1164 bool need_phi = false;
1165 for (p = 0; p < n_preds; ++p) {
1166 block_info_t *pred_info = pred_block_infos[p];
1168 if (!pred_info->processed) {
1169 /* use node for now, it will get fixed later */
1173 int a = find_value_in_block_info(pred_info, node);
1175 /* must live out of predecessor */
1177 phi_ins[p] = pred_info->assignments[a].value;
1178 /* different value from last time? then we need a phi */
1179 if (p > 0 && phi_ins[p-1] != phi_ins[p]) {
1186 ir_mode *mode = get_irn_mode(node);
1187 const arch_register_req_t *req = get_default_req_current_cls();
1190 phi = new_r_Phi(block, n_preds, phi_ins, mode);
1191 be_set_phi_reg_req(phi, req);
1193 DB((dbg, LEVEL_3, "Create Phi %+F (for %+F)\n", phi, node));
1195 mark_as_copy_of(phi, node);
1196 sched_add_after(block, phi);
1200 allocation_info_t *info = get_allocation_info(node);
1201 info->current_value = phi_ins[0];
1203 /* Grab 1 of the inputs we constructed (might not be the same as
1204 * "node" as we could see the same copy of the value in all
1209 /* if the node already has a register assigned use it */
1210 reg = arch_get_irn_register(node);
1212 /* TODO: consult pred-block infos here. The value could be copied
1213 away in some/all predecessor blocks. We need to construct
1214 phi-nodes in this case.
1215 We even need to construct some Phi_0 like constructs in cases
1216 where the predecessor allocation is not determined yet. */
1220 /* remember that this node is live at the beginning of the block */
1221 ir_nodeset_insert(&live_nodes, node);
1224 /* handle phis... */
1225 node = sched_first(block);
1226 for ( ; is_Phi(node); node = sched_next(node)) {
1227 const arch_register_t *reg;
1229 if (!arch_irn_consider_in_reg_alloc(cls, node))
1232 /* fill in regs already assigned */
1233 reg = arch_get_irn_register(node);
1237 /* TODO: give boni for registers already assigned at the
1239 handle_phi_prefs(node);
1240 assign_reg(block, node);
1245 /* assign regs for live-in values */
1246 foreach_ir_nodeset(&live_nodes, node, iter) {
1247 const arch_register_t *reg = arch_get_irn_register(node);
1251 assign_reg(block, node);
1254 /* assign instructions in the block */
1255 for (node = start; !sched_is_end(node); node = sched_next(node)) {
1256 /* enforce use constraints */
1257 enforce_constraints(&live_nodes, node);
1259 rewire_inputs(node);
1261 /* free registers of values last used at this instruction */
1262 free_last_uses(&live_nodes, node);
1264 /* assign output registers */
1265 /* TODO: 2 phases: first: pre-assigned ones, 2nd real regs */
1266 if (get_irn_mode(node) == mode_T) {
1267 const ir_edge_t *edge;
1268 foreach_out_edge(node, edge) {
1269 ir_node *proj = get_edge_src_irn(edge);
1270 if (!arch_irn_consider_in_reg_alloc(cls, proj))
1272 assign_reg(block, proj);
1274 } else if (arch_irn_consider_in_reg_alloc(cls, node)) {
1275 assign_reg(block, node);
1279 ir_nodeset_destroy(&live_nodes);
1282 block_info->processed = true;
1284 /* permutate values at end of predecessor blocks in case of phi-nodes */
1287 for (p = 0; p < n_preds; ++p) {
1288 add_phi_permutations(block, p);
1292 /* if we have exactly 1 successor then we might be able to produce phi
1294 if (get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) == 1) {
1295 const ir_edge_t *edge
1296 = get_irn_out_edge_first_kind(block, EDGE_KIND_BLOCK);
1297 ir_node *succ = get_edge_src_irn(edge);
1298 int p = get_edge_src_pos(edge);
1299 block_info_t *succ_info = get_block_info(succ);
1301 if (succ_info->processed) {
1302 add_phi_permutations(succ, p);
1308 * Run the register allocator for the current register class.
1310 static void be_straight_alloc_cls(void)
1312 lv = be_assure_liveness(birg);
1313 be_liveness_assure_sets(lv);
1314 be_liveness_assure_chk(lv);
1316 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
1317 inc_irg_visited(irg);
1319 DB((dbg, LEVEL_2, "=== Allocating registers of %s ===\n", cls->name));
1321 irg_block_walk_graph(irg, NULL, analyze_block, NULL);
1322 /* we need some dominance pre-order walk to ensure we see all
1323 * definitions/create copies before we encounter their users */
1324 dom_tree_walk_irg(irg, allocate_coalesce_block, NULL, NULL);
1326 ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
1329 static void dump(int mask, ir_graph *irg, const char *suffix,
1330 void (*dumper)(ir_graph *, const char *))
1332 if(birg->main_env->options->dump_flags & mask)
1333 be_dump(irg, suffix, dumper);
1337 * Run the spiller on the current graph.
1339 static void spill(void)
1341 /* make sure all nodes show their real register pressure */
1342 BE_TIMER_PUSH(t_ra_constr);
1343 be_pre_spill_prepare_constr(birg, cls);
1344 BE_TIMER_POP(t_ra_constr);
1346 dump(DUMP_RA, irg, "-spillprepare", dump_ir_block_graph_sched);
1349 BE_TIMER_PUSH(t_ra_spill);
1350 be_do_spill(birg, cls);
1351 BE_TIMER_POP(t_ra_spill);
1353 BE_TIMER_PUSH(t_ra_spill_apply);
1354 check_for_memory_operands(irg);
1355 BE_TIMER_POP(t_ra_spill_apply);
1357 dump(DUMP_RA, irg, "-spill", dump_ir_block_graph_sched);
1361 * The straight register allocator for a whole procedure.
1363 static void be_straight_alloc(be_irg_t *new_birg)
1365 const arch_env_t *arch_env = new_birg->main_env->arch_env;
1366 int n_cls = arch_env_get_n_reg_class(arch_env);
1369 obstack_init(&obst);
1372 irg = be_get_birg_irg(birg);
1373 execfreqs = birg->exec_freq;
1375 /* TODO: extract some of the stuff from bechordal allocator, like
1376 * statistics, time measurements, etc. and use them here too */
1378 for (c = 0; c < n_cls; ++c) {
1379 cls = arch_env_get_reg_class(arch_env, c);
1380 default_cls_req = NULL;
1381 if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
1384 stat_ev_ctx_push_str("regcls", cls->name);
1386 n_regs = arch_register_class_n_regs(cls);
1387 ignore_regs = bitset_malloc(n_regs);
1388 be_put_ignore_regs(birg, cls, ignore_regs);
1392 /* verify schedule and register pressure */
1393 BE_TIMER_PUSH(t_verify);
1394 if (birg->main_env->options->vrfy_option == BE_VRFY_WARN) {
1395 be_verify_schedule(birg);
1396 be_verify_register_pressure(birg, cls, irg);
1397 } else if (birg->main_env->options->vrfy_option == BE_VRFY_ASSERT) {
1398 assert(be_verify_schedule(birg) && "Schedule verification failed");
1399 assert(be_verify_register_pressure(birg, cls, irg)
1400 && "Register pressure verification failed");
1402 BE_TIMER_POP(t_verify);
1404 BE_TIMER_PUSH(t_ra_color);
1405 be_straight_alloc_cls();
1406 BE_TIMER_POP(t_ra_color);
1408 /* we most probably constructed new Phis so liveness info is invalid
1410 /* TODO: test liveness_introduce */
1411 be_liveness_invalidate(lv);
1413 bitset_free(ignore_regs);
1415 stat_ev_ctx_pop("regcls");
1418 BE_TIMER_PUSH(t_ra_spill_apply);
1419 be_abi_fix_stack_nodes(birg->abi);
1420 BE_TIMER_POP(t_ra_spill_apply);
1422 BE_TIMER_PUSH(t_verify);
1423 if (birg->main_env->options->vrfy_option == BE_VRFY_WARN) {
1424 be_verify_register_allocation(birg);
1425 } else if (birg->main_env->options->vrfy_option == BE_VRFY_ASSERT) {
1426 assert(be_verify_register_allocation(birg)
1427 && "Register allocation invalid");
1429 BE_TIMER_POP(t_verify);
1431 obstack_free(&obst, NULL);
1435 * Initializes this module.
1437 void be_init_straight_alloc(void)
1439 static be_ra_t be_ra_straight = {
1443 FIRM_DBG_REGISTER(dbg, "firm.be.straightalloc");
1445 be_register_allocator("straight", &be_ra_straight);
1448 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_straight_alloc);