2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief New approach to allocation and copy coalescing
23 * @author Matthias Braun
27 * ... WE NEED A NAME FOR THIS ...
29 * Only a proof of concept at this moment...
31 * The idea is to allocate registers in 2 passes:
32 * 1. A first pass to determine "preferred" registers for live-ranges. This
33 * calculates for each register and each live-range a value indicating
34 * the usefulness. (You can roughly think of the value as the negative
35 * costs needed for copies when the value is in the specific registers...)
37 * 2. Walk blocks and assigns registers in a greedy fashion. Preferring
38 * registers with high preferences. When register constraints are not met,
39 * add copies and split live-ranges.
42 * - make use of free registers in the permutate_values code
43 * - We have to pessimistically construct Phi_0s when not all predecessors
44 * of a block are known.
45 * - Phi color assignment should give bonus points towards registers already
46 * assigned at predecessors.
47 * - think about a smarter sequence of visiting the blocks. Sorted by
48 * execfreq might be good, or looptree from inner to outermost loops going
49 * over blocks in a reverse postorder
58 #include "irgraph_t.h"
59 #include "iredges_t.h"
69 #include "bechordal_t.h"
74 #include "bespillutil.h"
77 #include "bipartite.h"
78 #include "hungarian.h"
80 #define USE_FACTOR 1.0f
81 #define DEF_FACTOR 1.0f
82 #define NEIGHBOR_FACTOR 0.2f
83 #define SHOULD_BE_SAME 1.0f
85 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
87 static struct obstack obst;
88 static be_irg_t *birg;
90 static const arch_register_class_t *cls;
91 static const arch_register_req_t *default_cls_req;
93 static const ir_exec_freq *execfreqs;
94 static unsigned n_regs;
95 static bitset_t *ignore_regs;
97 /** info about the current assignment for a register */
99 ir_node *value; /**< currently assigned value */
101 typedef struct assignment_t assignment_t;
103 /** currently active assignments (while processing a basic block) */
104 static assignment_t *assignments;
107 * allocation information: last_uses, register preferences
108 * the information is per firm-node.
110 struct allocation_info_t {
111 unsigned last_uses; /**< bitset indicating last uses (input pos) */
112 ir_node *current_value; /**< copy of the value that should be used */
113 ir_node *original_value; /**< for copies point to original value */
114 float prefs[0]; /**< register preferences */
116 typedef struct allocation_info_t allocation_info_t;
118 /** helper datastructure used when sorting register preferences */
123 typedef struct reg_pref_t reg_pref_t;
125 /** per basic-block information */
126 struct block_info_t {
127 bool processed; /**< indicate wether block is processed */
128 assignment_t assignments[0]; /**< register assignments at end of block */
130 typedef struct block_info_t block_info_t;
133 * Get the allocation info for a node.
134 * The info is allocated on the first visit of a node.
136 static allocation_info_t *get_allocation_info(ir_node *node)
138 allocation_info_t *info;
139 if (!irn_visited_else_mark(node)) {
140 size_t size = sizeof(info[0]) + n_regs * sizeof(info->prefs[0]);
141 info = obstack_alloc(&obst, size);
142 memset(info, 0, size);
143 info->current_value = node;
144 info->original_value = node;
145 set_irn_link(node, info);
147 info = get_irn_link(node);
154 * Get allocation information for a basic block
156 static block_info_t *get_block_info(ir_node *block)
160 assert(is_Block(block));
161 if (!irn_visited_else_mark(block)) {
162 size_t size = sizeof(info[0]) + n_regs * sizeof(info->assignments[0]);
163 info = obstack_alloc(&obst, size);
164 memset(info, 0, size);
165 set_irn_link(block, info);
167 info = get_irn_link(block);
174 * Get default register requirement for the current register class
176 static const arch_register_req_t *get_default_req_current_cls(void)
178 if (default_cls_req == NULL) {
179 struct obstack *obst = get_irg_obstack(irg);
180 arch_register_req_t *req = obstack_alloc(obst, sizeof(*req));
181 memset(req, 0, sizeof(*req));
183 req->type = arch_register_req_type_normal;
186 default_cls_req = req;
188 return default_cls_req;
192 * Link the allocation info of a node to a copy.
193 * Afterwards, both nodes uses the same allocation info.
194 * Copy must not have an allocation info assigned yet.
196 * @param copy the node that gets the allocation info assigned
197 * @param value the original node
199 static void mark_as_copy_of(ir_node *copy, ir_node *value)
201 allocation_info_t *info = get_allocation_info(value);
202 allocation_info_t *copy_info = get_allocation_info(copy);
204 /* value must be an original value (not a copy) */
205 assert(info->original_value == value);
206 info->current_value = copy;
208 /* the copy should not be linked to something else yet */
209 assert(copy_info->original_value == copy);
210 copy_info->original_value = value;
214 * Calculate the penalties for every register on a node and its live neighbors.
216 * @param live_nodes the set of live nodes at the current position, may be NULL
217 * @param penalty the penalty to subtract from
218 * @param limited a raw bitset containing the limited set for the node
219 * @param node the node
221 static void give_penalties_for_limits(const ir_nodeset_t *live_nodes,
222 float penalty, const unsigned* limited,
225 ir_nodeset_iterator_t iter;
227 allocation_info_t *info = get_allocation_info(node);
230 /* give penalty for all forbidden regs */
231 for (r = 0; r < n_regs; ++r) {
232 if (rbitset_is_set(limited, r))
235 info->prefs[r] -= penalty;
238 /* all other live values should get a penalty for allowed regs */
239 if (live_nodes == NULL)
242 /* TODO: reduce penalty if there are multiple allowed registers... */
243 penalty *= NEIGHBOR_FACTOR;
244 foreach_ir_nodeset(live_nodes, neighbor, iter) {
245 allocation_info_t *neighbor_info;
247 /* TODO: if op is used on multiple inputs we might not do a
249 if (neighbor == node)
252 neighbor_info = get_allocation_info(neighbor);
253 for (r = 0; r < n_regs; ++r) {
254 if (!rbitset_is_set(limited, r))
257 neighbor_info->prefs[r] -= penalty;
263 * Calculate the preferences of a definition for the current register class.
264 * If the definition uses a limited set of registers, reduce the preferences
265 * for the limited register on the node and its neighbors.
267 * @param live_nodes the set of live nodes at the current node
268 * @param weight the weight
269 * @param node the current node
271 static void check_defs(const ir_nodeset_t *live_nodes, float weight,
274 const arch_register_req_t *req;
276 if (get_irn_mode(node) == mode_T) {
277 const ir_edge_t *edge;
278 foreach_out_edge(node, edge) {
279 ir_node *proj = get_edge_src_irn(edge);
280 check_defs(live_nodes, weight, proj);
285 if (!arch_irn_consider_in_reg_alloc(cls, node))
288 req = arch_get_register_req_out(node);
289 if (req->type & arch_register_req_type_limited) {
290 const unsigned *limited = req->limited;
291 float penalty = weight * DEF_FACTOR;
292 give_penalties_for_limits(live_nodes, penalty, limited, node);
295 if (req->type & arch_register_req_type_should_be_same) {
296 ir_node *insn = skip_Proj(node);
297 allocation_info_t *info = get_allocation_info(node);
298 int arity = get_irn_arity(insn);
301 float factor = 1.0f / rbitset_popcnt(&req->other_same, arity);
302 for (i = 0; i < arity; ++i) {
305 allocation_info_t *op_info;
307 if (!rbitset_is_set(&req->other_same, i))
310 op = get_irn_n(insn, i);
311 op_info = get_allocation_info(op);
312 for (r = 0; r < n_regs; ++r) {
313 if (bitset_is_set(ignore_regs, r))
315 op_info->prefs[r] += info->prefs[r] * factor;
322 * Walker: Runs an a block calculates the preferences for any
323 * node and every register from the considered register class.
325 static void analyze_block(ir_node *block, void *data)
327 float weight = get_block_execfreq(execfreqs, block);
328 ir_nodeset_t live_nodes;
332 ir_nodeset_init(&live_nodes);
333 be_liveness_end_of_block(lv, cls, block, &live_nodes);
335 sched_foreach_reverse(block, node) {
336 allocation_info_t *info;
343 /* TODO give/take penalties for should_be_same/different) */
344 check_defs(&live_nodes, weight, node);
347 arity = get_irn_arity(node);
349 /* the allocation info node currently only uses 1 unsigned value
350 to mark last used inputs. So we will fail for a node with more than
352 if (arity >= (int) sizeof(unsigned) * 8) {
353 panic("Node with more than %d inputs not supported yet",
354 (int) sizeof(unsigned) * 8);
357 info = get_allocation_info(node);
358 for (i = 0; i < arity; ++i) {
359 ir_node *op = get_irn_n(node, i);
360 if (!arch_irn_consider_in_reg_alloc(cls, op))
363 /* last usage of a value? */
364 if (!ir_nodeset_contains(&live_nodes, op)) {
365 rbitset_set(&info->last_uses, i);
369 be_liveness_transfer(cls, node, &live_nodes);
371 /* update weights based on usage constraints */
372 for (i = 0; i < arity; ++i) {
373 const arch_register_req_t *req;
374 const unsigned *limited;
375 ir_node *op = get_irn_n(node, i);
377 if (!arch_irn_consider_in_reg_alloc(cls, op))
380 req = arch_get_register_req(node, i);
381 if (!(req->type & arch_register_req_type_limited))
384 /* TODO: give penalties to neighbors for precolored nodes! */
386 limited = req->limited;
387 give_penalties_for_limits(&live_nodes, weight * USE_FACTOR, limited,
392 ir_nodeset_destroy(&live_nodes);
396 * Assign register reg to the given node.
398 * @param node the node
399 * @param reg the register
401 static void use_reg(ir_node *node, const arch_register_t *reg)
403 unsigned r = arch_register_get_index(reg);
404 assignment_t *assignment = &assignments[r];
406 assert(assignment->value == NULL);
407 assignment->value = node;
409 arch_set_irn_register(node, reg);
413 * Compare two register preferences in decreasing order.
415 static int compare_reg_pref(const void *e1, const void *e2)
417 const reg_pref_t *rp1 = (const reg_pref_t*) e1;
418 const reg_pref_t *rp2 = (const reg_pref_t*) e2;
419 if (rp1->pref < rp2->pref)
421 if (rp1->pref > rp2->pref)
426 static void fill_sort_candidates(reg_pref_t *regprefs,
427 const allocation_info_t *info)
431 for (r = 0; r < n_regs; ++r) {
432 float pref = info->prefs[r];
433 if (bitset_is_set(ignore_regs, r)) {
437 regprefs[r].pref = pref;
439 /* TODO: use a stable sort here to avoid unnecessary register jumping */
440 qsort(regprefs, n_regs, sizeof(regprefs[0]), compare_reg_pref);
444 * Determine and assign a register for node @p node
446 static void assign_reg(const ir_node *block, ir_node *node)
448 const arch_register_t *reg;
449 allocation_info_t *info;
450 const arch_register_req_t *req;
451 reg_pref_t *reg_prefs;
455 assert(arch_irn_consider_in_reg_alloc(cls, node));
457 /* preassigned register? */
458 reg = arch_get_irn_register(node);
460 DB((dbg, LEVEL_2, "Preassignment %+F -> %s\n", node, reg->name));
465 /* give should_be_same boni */
466 info = get_allocation_info(node);
467 req = arch_get_register_req_out(node);
469 in_node = skip_Proj(node);
470 if (req->type & arch_register_req_type_should_be_same) {
471 float weight = get_block_execfreq(execfreqs, block);
472 int arity = get_irn_arity(in_node);
475 assert(arity <= (int) sizeof(req->other_same) * 8);
476 for (i = 0; i < arity; ++i) {
478 const arch_register_t *reg;
480 if (!rbitset_is_set(&req->other_same, i))
483 in = get_irn_n(in_node, i);
484 reg = arch_get_irn_register(in);
486 r = arch_register_get_index(reg);
487 if (bitset_is_set(ignore_regs, r))
489 info->prefs[r] += weight * SHOULD_BE_SAME;
493 DB((dbg, LEVEL_2, "Candidates for %+F:", node));
494 reg_prefs = alloca(n_regs * sizeof(reg_prefs[0]));
495 fill_sort_candidates(reg_prefs, info);
496 for (i = 0; i < n_regs; ++i) {
497 unsigned num = reg_prefs[i].num;
498 const arch_register_t *reg = arch_register_for_index(cls, num);
499 DB((dbg, LEVEL_2, " %s(%f)", reg->name, reg_prefs[i].pref));
501 DB((dbg, LEVEL_2, "\n"));
503 for (i = 0; i < n_regs; ++i) {
504 unsigned r = reg_prefs[i].num;
505 /* ignores are last and we should have at least 1 non-ignore left */
506 assert(!bitset_is_set(ignore_regs, r));
508 TODO: It might be better to copy the value occupying the register
509 around here instead of trying the next one, find out when... */
510 if (assignments[r].value != NULL)
512 reg = arch_register_for_index(cls, r);
513 DB((dbg, LEVEL_2, "Assign %+F -> %s\n", node, reg->name));
519 static void free_reg_of_value(ir_node *node)
521 assignment_t *assignment;
522 const arch_register_t *reg;
525 if (!arch_irn_consider_in_reg_alloc(cls, node))
528 reg = arch_get_irn_register(node);
529 r = arch_register_get_index(reg);
530 assignment = &assignments[r];
531 assignment->value = NULL;
535 * Add an permutation in front of a node and change the assignments
536 * due to this permutation.
538 * To understand this imagine a permutation like this:
548 * First we count how many destinations a single value has. At the same time
549 * we can be sure that each destination register has at most 1 source register
550 * (it can have 0 which means we don't care what value is in it).
551 * We ignore all fullfilled permuations (like 7->7)
552 * In a first pass we create as much copy instructions as possible as they
553 * are generally cheaper than exchanges. We do this by counting into how many
554 * destinations a register has to be copied (in the example it's 2 for register
555 * 3, or 1 for the registers 1,2,4 and 7).
556 * We can then create a copy into every destination register when the usecount
557 * of that register is 0 (= noone else needs the value in the register).
559 * After this step we should have cycles left. We implement a cyclic permutation
560 * of n registers with n-1 transpositions.
562 * @param live_nodes the set of live nodes, updated due to live range split
563 * @param before the node before we add the permutation
564 * @param permutation the permutation array indices are the destination
565 * registers, the values in the array are the source
568 static void permutate_values(ir_nodeset_t *live_nodes, ir_node *before,
569 unsigned *permutation)
572 ir_node **ins = ALLOCANZ(ir_node*, n_regs);
573 unsigned *n_used = ALLOCANZ(unsigned, n_regs);
576 /* create a list of permutations. Leave out fix points. */
577 for (r = 0; r < n_regs; ++r) {
578 unsigned old_reg = permutation[r];
579 assignment_t *assignment;
582 /* no need to do anything for a fixpoint */
586 assignment = &assignments[old_reg];
587 value = assignment->value;
589 /* nothing to do here, reg is not live. Mark it as fixpoint
590 * so we ignore it in the next steps */
595 ins[old_reg] = value;
598 /* free occupation infos, we'll add the values back later */
599 if (live_nodes != NULL) {
600 free_reg_of_value(value);
601 ir_nodeset_remove(live_nodes, value);
605 block = get_nodes_block(before);
607 /* step1: create copies where immediately possible */
608 for (r = 0; r < n_regs; /* empty */) {
611 const arch_register_t *reg;
612 unsigned old_r = permutation[r];
614 /* - no need to do anything for fixed points.
615 - we can't copy if the value in the dest reg is still needed */
616 if (old_r == r || n_used[r] > 0) {
623 copy = be_new_Copy(cls, block, src);
624 sched_add_before(before, copy);
625 reg = arch_register_for_index(cls, r);
626 DB((dbg, LEVEL_2, "Copy %+F (from %+F, before %+F) -> %s\n",
627 copy, src, before, reg->name));
628 mark_as_copy_of(copy, src);
631 if (live_nodes != NULL) {
632 ir_nodeset_insert(live_nodes, copy);
635 /* old register has 1 user less, permutation is resolved */
636 assert(arch_register_get_index(arch_get_irn_register(src)) == old_r);
637 assert(n_used[old_r] > 0);
641 /* advance or jump back (this copy could have enabled another copy) */
642 if (old_r < r && n_used[old_r] == 0) {
649 /* at this point we only have "cycles" left which we have to resolve with
651 * TODO: if we have free registers left, then we should really use copy
652 * instructions for any cycle longer than 2 registers...
653 * (this is probably architecture dependent, there might be archs where
654 * copies are preferable even for 2 cycles)
657 /* create perms with the rest */
658 for (r = 0; r < n_regs; /* empty */) {
659 const arch_register_t *reg;
660 unsigned old_r = permutation[r];
672 /* we shouldn't have copies from 1 value to multiple destinations left*/
673 assert(n_used[old_r] == 1);
675 /* exchange old_r and r2; after that old_r is a fixed point */
676 r2 = permutation[old_r];
680 perm = be_new_Perm(cls, block, 2, in);
681 sched_add_before(before, perm);
682 DB((dbg, LEVEL_2, "Perm %+F (perm %+F,%+F, before %+F)\n",
683 perm, in[0], in[1], before));
685 proj0 = new_r_Proj(block, perm, get_irn_mode(in[0]), 0);
686 mark_as_copy_of(proj0, in[0]);
687 reg = arch_register_for_index(cls, old_r);
689 if (live_nodes != NULL) {
690 ir_nodeset_insert(live_nodes, proj0);
693 proj1 = new_r_Proj(block, perm, get_irn_mode(in[1]), 1);
695 /* 1 value is now in the correct register */
696 permutation[old_r] = old_r;
697 /* the source of r changed to r2 */
700 reg = arch_register_for_index(cls, r2);
702 /* if we have reached a fixpoint update data structures */
703 mark_as_copy_of(proj1, in[1]);
705 if (live_nodes != NULL) {
706 ir_nodeset_insert(live_nodes, proj1);
709 arch_set_irn_register(proj1, reg);
714 /* now we should only have fixpoints left */
715 for (r = 0; r < n_regs; ++r) {
716 assert(permutation[r] == r);
722 * Free regs for values last used.
724 * @param live_nodes set of live nodes, will be updated
725 * @param node the node to consider
727 static void free_last_uses(ir_nodeset_t *live_nodes, ir_node *node)
729 allocation_info_t *info = get_allocation_info(node);
730 const unsigned *last_uses = &info->last_uses;
731 int arity = get_irn_arity(node);
733 for (i = 0; i < arity; ++i) {
736 /* check if one operand is the last use */
737 if (!rbitset_is_set(last_uses, i))
740 op = get_irn_n(node, i);
741 free_reg_of_value(op);
742 ir_nodeset_remove(live_nodes, op);
747 * Create a bitset of registers occupied with value living through an
750 static void determine_live_through_regs(unsigned *bitset, ir_node *node)
752 const allocation_info_t *info = get_allocation_info(node);
757 /* mark all used registers as potentially live-through */
758 for (r = 0; r < n_regs; ++r) {
759 const assignment_t *assignment = &assignments[r];
760 if (assignment->value == NULL)
763 rbitset_set(bitset, r);
766 /* remove registers of value dying at the instruction */
767 arity = get_irn_arity(node);
768 for (i = 0; i < arity; ++i) {
770 const arch_register_t *reg;
772 if (!rbitset_is_set(&info->last_uses, i))
775 op = get_irn_n(node, i);
776 reg = arch_get_irn_register(op);
777 rbitset_clear(bitset, arch_register_get_index(reg));
782 * Enforce constraints at a node by live range splits.
784 * @param live_nodes the set of live nodes, might be changed
785 * @param node the current node
787 static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node)
789 int arity = get_irn_arity(node);
791 hungarian_problem_t *bp;
793 unsigned *assignment;
795 /* see if any use constraints are not met */
797 for (i = 0; i < arity; ++i) {
798 ir_node *op = get_irn_n(node, i);
799 const arch_register_t *reg;
800 const arch_register_req_t *req;
801 const unsigned *limited;
804 if (!arch_irn_consider_in_reg_alloc(cls, op))
807 /* are there any limitations for the i'th operand? */
808 req = arch_get_register_req(node, i);
809 if (!(req->type & arch_register_req_type_limited))
812 limited = req->limited;
813 reg = arch_get_irn_register(op);
814 r = arch_register_get_index(reg);
815 if (!rbitset_is_set(limited, r)) {
816 /* found an assignement outside the limited set */
822 /* construct a list of register occupied by live-through values */
823 unsigned *live_through_regs = NULL;
824 unsigned *output_regs = NULL;
826 /* is any of the live-throughs using a constrainted output register? */
827 if (get_irn_mode(node) == mode_T) {
828 const ir_edge_t *edge;
830 foreach_out_edge(node, edge) {
831 ir_node *proj = get_edge_src_irn(edge);
832 const arch_register_req_t *req;
834 if (!arch_irn_consider_in_reg_alloc(cls, proj))
837 req = arch_get_register_req_out(proj);
838 if (!(req->type & arch_register_req_type_limited))
841 if (live_through_regs == NULL) {
842 rbitset_alloca(live_through_regs, n_regs);
843 determine_live_through_regs(live_through_regs, node);
845 rbitset_alloca(output_regs, n_regs);
848 rbitset_or(output_regs, req->limited, n_regs);
849 if (rbitsets_have_common(req->limited, live_through_regs, n_regs)) {
855 if (arch_irn_consider_in_reg_alloc(cls, node)) {
856 const arch_register_req_t *req = arch_get_register_req_out(node);
857 if (req->type & arch_register_req_type_limited) {
858 rbitset_alloca(live_through_regs, n_regs);
859 determine_live_through_regs(live_through_regs, node);
860 if (rbitsets_have_common(req->limited, live_through_regs, n_regs)) {
863 rbitset_alloca(output_regs, n_regs);
864 rbitset_or(output_regs, req->limited, n_regs);
873 if (output_regs == NULL) {
874 if (live_through_regs == NULL) {
875 rbitset_alloca(live_through_regs, n_regs);
877 rbitset_alloca(output_regs, n_regs);
880 /* swap values around */
881 bp = hungarian_new(n_regs, n_regs, HUNGARIAN_MATCH_PERFECT);
883 /* add all combinations, then remove not allowed ones */
884 for (l = 0; l < n_regs; ++l) {
885 if (bitset_is_set(ignore_regs, l)) {
886 hungarian_add(bp, l, l, 90);
890 for (r = 0; r < n_regs; ++r) {
891 if (bitset_is_set(ignore_regs, r))
893 /* livethrough values may not use constrainted output registers */
894 if (rbitset_is_set(live_through_regs, l)
895 && rbitset_is_set(output_regs, r))
898 hungarian_add(bp, l, r, l == r ? 90 : 89);
902 for (i = 0; i < arity; ++i) {
903 ir_node *op = get_irn_n(node, i);
904 const arch_register_t *reg;
905 const arch_register_req_t *req;
906 const unsigned *limited;
907 unsigned current_reg;
909 if (!arch_irn_consider_in_reg_alloc(cls, op))
912 req = arch_get_register_req(node, i);
913 if (!(req->type & arch_register_req_type_limited))
916 limited = req->limited;
917 reg = arch_get_irn_register(op);
918 current_reg = arch_register_get_index(reg);
919 for (r = 0; r < n_regs; ++r) {
920 if (rbitset_is_set(limited, r))
922 hungarian_remv(bp, current_reg, r);
926 //hungarian_print_costmatrix(bp, 1);
927 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
929 assignment = ALLOCAN(unsigned, n_regs);
930 res = hungarian_solve(bp, (int*) assignment, &dummy, 0);
934 printf("Swap result:");
935 for (i = 0; i < n_regs; ++i) {
936 printf(" %d", assignment[i]);
943 permutate_values(live_nodes, node, assignment);
946 /** test wether a node @p n is a copy of the value of node @p of */
947 static bool is_copy_of(ir_node *n, ir_node *of)
949 allocation_info_t *of_info;
957 of_info = get_allocation_info(of);
961 return of_info == get_irn_link(n);
964 /** find a value in the end-assignment of a basic block
965 * @returns the index into the assignment array if found
968 static int find_value_in_block_info(block_info_t *info, ir_node *value)
971 assignment_t *assignments = info->assignments;
972 for (r = 0; r < n_regs; ++r) {
973 const assignment_t *assignment = &assignments[r];
974 if (is_copy_of(assignment->value, value))
982 * Create the necessary permutations at the end of a basic block to fullfill
983 * the register assignment for phi-nodes in the next block
985 static void add_phi_permutations(ir_node *block, int p)
988 unsigned *permutation;
989 assignment_t *old_assignments;
990 bool need_permutation;
992 ir_node *pred = get_Block_cfgpred_block(block, p);
994 block_info_t *pred_info = get_block_info(pred);
996 /* predecessor not processed yet? nothing to do */
997 if (!pred_info->processed)
1000 permutation = ALLOCAN(unsigned, n_regs);
1001 for (r = 0; r < n_regs; ++r) {
1005 /* check phi nodes */
1006 need_permutation = false;
1007 node = sched_first(block);
1008 for ( ; is_Phi(node); node = sched_next(node)) {
1009 const arch_register_t *reg;
1014 if (!arch_irn_consider_in_reg_alloc(cls, node))
1017 op = get_Phi_pred(node, p);
1018 a = find_value_in_block_info(pred_info, op);
1021 reg = arch_get_irn_register(node);
1022 regn = arch_register_get_index(reg);
1024 permutation[regn] = a;
1025 need_permutation = true;
1029 if (!need_permutation)
1032 /* permutate values at end of predecessor */
1033 old_assignments = assignments;
1034 assignments = pred_info->assignments;
1035 permutate_values(NULL, be_get_end_of_block_insertion_point(pred),
1037 assignments = old_assignments;
1039 /* change phi nodes to use the copied values */
1040 node = sched_first(block);
1041 for ( ; is_Phi(node); node = sched_next(node)) {
1045 if (!arch_irn_consider_in_reg_alloc(cls, node))
1048 op = get_Phi_pred(node, p);
1049 a = arch_register_get_index(arch_get_irn_register(node));
1051 op = pred_info->assignments[a].value;
1052 set_Phi_pred(node, p, op);
1057 * Walker: assign registers to all nodes of a block that
1058 * need registers from the currently considered register class.
1060 static void allocate_coalesce_block(ir_node *block, void *data)
1064 ir_nodeset_t live_nodes;
1065 ir_nodeset_iterator_t iter;
1066 ir_node *node, *start;
1068 block_info_t *block_info;
1069 block_info_t *processed_pred_info;
1070 block_info_t **pred_block_infos;
1071 bool all_preds_processed;
1074 DB((dbg, LEVEL_2, "Allocating in block %+F\n", block));
1076 /* clear assignments */
1077 block_info = get_block_info(block);
1078 assignments = block_info->assignments;
1080 ir_nodeset_init(&live_nodes);
1082 /* gather regalloc infos of predecessor blocks */
1083 n_preds = get_Block_n_cfgpreds(block);
1084 pred_block_infos = ALLOCAN(block_info_t*, n_preds);
1085 all_preds_processed = true;
1086 for (i = 0; i < n_preds; ++i) {
1087 ir_node *pred = get_Block_cfgpred_block(block, i);
1088 block_info_t *pred_info = get_block_info(pred);
1089 pred_block_infos[i] = pred_info;
1091 if (!pred_info->processed) {
1092 all_preds_processed = false;
1094 /* we need 1 (arbitrary) processed predecessor */
1095 processed_pred_info = pred_info;
1099 /* we create Phi0s (=SSA construction) if not all preds are known */
1100 if (!all_preds_processed) {
1101 block->attr.block.is_matured = 0;
1104 /* collect live-in nodes and preassigned values */
1105 be_lv_foreach(lv, block, be_lv_state_in, i) {
1106 const arch_register_t *reg;
1108 node = be_lv_get_irn(lv, block, i);
1109 if (!arch_irn_consider_in_reg_alloc(cls, node))
1112 /* if the node already has a register assigned use it */
1113 reg = arch_get_irn_register(node);
1115 /* TODO: consult pred-block infos here. The value could be copied
1116 away in some/all predecessor blocks. We need to construct
1117 phi-nodes in this case.
1118 We even need to construct some Phi_0 like constructs in cases
1119 where the predecessor allocation is not determined yet. */
1123 /* if we don't know all predecessors, then we have no idea which values
1124 are copied, so we have to pessimistically construct phi-nodes for all
1126 if (!all_preds_processed) {
1127 ir_mode *mode = get_irn_mode(node);
1128 ir_node *phi = new_r_Phi(block, 0, NULL, mode);
1129 const arch_register_req_t *req = get_default_req_current_cls();
1130 be_set_phi_reg_req(phi, req);
1132 /* TODO: if node had a register assigned use that as a strong
1134 mark_as_copy_of(phi, node);
1135 sched_add_after(block, phi);
1139 /* check wether the value is the same in all predecessors,
1140 if not construct a phi node */
1144 /* remember that this node is live at the beginning of the block */
1145 ir_nodeset_insert(&live_nodes, node);
1148 /* handle phis... */
1149 node = sched_first(block);
1150 for ( ; is_Phi(node); node = sched_next(node)) {
1151 const arch_register_t *reg;
1153 if (!arch_irn_consider_in_reg_alloc(cls, node))
1156 /* fill in regs already assigned */
1157 reg = arch_get_irn_register(node);
1161 /* TODO: give boni for registers already assigned at the
1163 assign_reg(block, node);
1168 /* assign regs for live-in values */
1169 foreach_ir_nodeset(&live_nodes, node, iter) {
1170 const arch_register_t *reg = arch_get_irn_register(node);
1174 assign_reg(block, node);
1177 /* permutate values at end of predecessor blocks in case of phi-nodes */
1180 for (p = 0; p < n_preds; ++p) {
1181 add_phi_permutations(block, p);
1185 /* assign instructions in the block */
1186 for (node = start; !sched_is_end(node); node = sched_next(node)) {
1187 int arity = get_irn_arity(node);
1190 /* enforce use constraints */
1191 enforce_constraints(&live_nodes, node);
1193 /* exchange values to copied values where needed */
1194 for (i = 0; i < arity; ++i) {
1195 ir_node *op = get_irn_n(node, i);
1196 allocation_info_t *info;
1198 if (!arch_irn_consider_in_reg_alloc(cls, op))
1201 info = get_allocation_info(op);
1202 if (info->current_value != op) {
1203 set_irn_n(node, i, info->current_value);
1207 /* free registers of values last used at this instruction */
1208 free_last_uses(&live_nodes, node);
1210 /* assign output registers */
1211 /* TODO: 2 phases: first: pre-assigned ones, 2nd real regs */
1212 if (get_irn_mode(node) == mode_T) {
1213 const ir_edge_t *edge;
1214 foreach_out_edge(node, edge) {
1215 ir_node *proj = get_edge_src_irn(edge);
1216 if (!arch_irn_consider_in_reg_alloc(cls, proj))
1218 assign_reg(block, proj);
1220 } else if (arch_irn_consider_in_reg_alloc(cls, node)) {
1221 assign_reg(block, node);
1225 ir_nodeset_destroy(&live_nodes);
1228 block_info->processed = true;
1230 /* if we have exactly 1 successor then we might be able to produce phi
1232 if (get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) == 1) {
1233 const ir_edge_t *edge
1234 = get_irn_out_edge_first_kind(block, EDGE_KIND_BLOCK);
1235 ir_node *succ = get_edge_src_irn(edge);
1236 int p = get_edge_src_pos(edge);
1237 block_info_t *succ_info = get_block_info(succ);
1239 if (succ_info->processed) {
1240 add_phi_permutations(succ, p);
1246 * Run the register allocator for the current register class.
1248 static void be_straight_alloc_cls(void)
1250 lv = be_assure_liveness(birg);
1251 be_liveness_assure_sets(lv);
1252 be_liveness_assure_chk(lv);
1256 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
1257 inc_irg_visited(irg);
1259 DB((dbg, LEVEL_2, "=== Allocating registers of %s ===\n", cls->name));
1261 irg_block_walk_graph(irg, NULL, analyze_block, NULL);
1262 irg_block_walk_graph(irg, NULL, allocate_coalesce_block, NULL);
1264 ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
1267 static void dump(int mask, ir_graph *irg, const char *suffix,
1268 void (*dumper)(ir_graph *, const char *))
1270 if(birg->main_env->options->dump_flags & mask)
1271 be_dump(irg, suffix, dumper);
1275 * Run the spiller on the current graph.
1277 static void spill(void)
1279 /* make sure all nodes show their real register pressure */
1280 BE_TIMER_PUSH(t_ra_constr);
1281 be_pre_spill_prepare_constr(birg, cls);
1282 BE_TIMER_POP(t_ra_constr);
1284 dump(DUMP_RA, irg, "-spillprepare", dump_ir_block_graph_sched);
1287 BE_TIMER_PUSH(t_ra_spill);
1288 be_do_spill(birg, cls);
1289 BE_TIMER_POP(t_ra_spill);
1291 BE_TIMER_PUSH(t_ra_spill_apply);
1292 check_for_memory_operands(irg);
1293 BE_TIMER_POP(t_ra_spill_apply);
1295 dump(DUMP_RA, irg, "-spill", dump_ir_block_graph_sched);
1299 * The straight register allocator for a whole procedure.
1301 static void be_straight_alloc(be_irg_t *new_birg)
1303 const arch_env_t *arch_env = new_birg->main_env->arch_env;
1304 int n_cls = arch_env_get_n_reg_class(arch_env);
1307 obstack_init(&obst);
1310 irg = be_get_birg_irg(birg);
1311 execfreqs = birg->exec_freq;
1313 /* TODO: extract some of the stuff from bechordal allocator, like
1314 * statistics, time measurements, etc. and use them here too */
1316 for (c = 0; c < n_cls; ++c) {
1317 cls = arch_env_get_reg_class(arch_env, c);
1318 default_cls_req = NULL;
1319 if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
1322 stat_ev_ctx_push_str("bestraight_cls", cls->name);
1324 n_regs = arch_register_class_n_regs(cls);
1325 ignore_regs = bitset_malloc(n_regs);
1326 be_put_ignore_regs(birg, cls, ignore_regs);
1330 /* verify schedule and register pressure */
1331 BE_TIMER_PUSH(t_verify);
1332 if (birg->main_env->options->vrfy_option == BE_CH_VRFY_WARN) {
1333 be_verify_schedule(birg);
1334 be_verify_register_pressure(birg, cls, irg);
1335 } else if (birg->main_env->options->vrfy_option == BE_CH_VRFY_ASSERT) {
1336 assert(be_verify_schedule(birg) && "Schedule verification failed");
1337 assert(be_verify_register_pressure(birg, cls, irg)
1338 && "Register pressure verification failed");
1340 BE_TIMER_POP(t_verify);
1342 BE_TIMER_PUSH(t_ra_color);
1343 be_straight_alloc_cls();
1344 BE_TIMER_POP(t_ra_color);
1346 bitset_free(ignore_regs);
1348 stat_ev_ctx_pop("bestraight_cls");
1351 BE_TIMER_PUSH(t_verify);
1352 if (birg->main_env->options->vrfy_option == BE_CH_VRFY_WARN) {
1353 be_verify_register_allocation(birg);
1354 } else if(birg->main_env->options->vrfy_option == BE_CH_VRFY_ASSERT) {
1355 assert(be_verify_register_allocation(birg)
1356 && "Register allocation invalid");
1358 BE_TIMER_POP(t_verify);
1360 obstack_free(&obst, NULL);
1364 * Initializes this module.
1366 void be_init_straight_alloc(void)
1368 static be_ra_t be_ra_straight = {
1372 FIRM_DBG_REGISTER(dbg, "firm.be.straightalloc");
1374 be_register_allocator("straight", &be_ra_straight);
1377 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_straight_alloc);