2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief New approach to allocation and copy coalescing
23 * @author Matthias Braun
27 * ... WE NEED A NAME FOR THIS ...
29 * Only a proof of concept at this moment...
31 * The idea is to allocate registers in 2 passes:
32 * 1. A first pass to determine "preferred" registers for live-ranges. This
33 * calculates for each register and each live-range a value indicating
34 * the usefulness. (You can roughly think of the value as the negative
35 * costs needed for copies when the value is in the specific registers...)
37 * 2. Walk blocks and assigns registers in a greedy fashion. Preferring
38 * registers with high preferences. When register constraints are not met,
39 * add copies and split live-ranges.
42 * - make use of free registers in the permutate_values code
43 * - output constraints are not ensured. The algorithm fails to copy values
44 * away, so the registers for constrained outputs are free.
45 * - must_be_different constraint is not respected
46 * - We have to pessimistically construct Phi_0s when not all predecessors
47 * of a block are known.
48 * - Phi color assignment should give bonus points towards registers already
49 * assigned at predecessors.
50 * - think about a smarter sequence of visiting the blocks. Sorted by
51 * execfreq might be good, or looptree from inner to outermost loops going
52 * over blocks in a reverse postorder
60 #include "irgraph_t.h"
61 #include "iredges_t.h"
70 #include "bechordal_t.h"
75 #include "bespillutil.h"
78 #include "bipartite.h"
79 #include "hungarian.h"
81 #define USE_FACTOR 1.0f
82 #define DEF_FACTOR 1.0f
83 #define NEIGHBOR_FACTOR 0.2f
84 #define SHOULD_BE_SAME 1.0f
86 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
88 static struct obstack obst;
89 static be_irg_t *birg;
91 static const arch_register_class_t *cls;
93 static const ir_exec_freq *execfreqs;
94 static unsigned n_regs;
95 static bitset_t *ignore_regs;
97 /** info about the current assignment for a register */
99 ir_node *value; /**< currently assigned value */
101 typedef struct assignment_t assignment_t;
103 /** currently active assignments (while processing a basic block) */
104 static assignment_t *assignments;
107 * allocation information: last_uses, register preferences
108 * the information is per firm-node.
110 struct allocation_info_t {
111 unsigned last_uses; /**< bitset indicating last uses (input pos) */
112 assignment_t *current_assignment;
113 float prefs[0]; /**< register preferences */
115 typedef struct allocation_info_t allocation_info_t;
117 /** helper datastructure used when sorting register preferences */
122 typedef struct reg_pref_t reg_pref_t;
124 /** per basic-block information */
125 struct block_info_t {
126 int processed; /**< indicate wether block is processed */
127 assignment_t assignments[0]; /**< register assignments at end of block */
129 typedef struct block_info_t block_info_t;
132 * Get the allocation info for a node.
133 * The info is allocated on the first visit of a node.
135 static allocation_info_t *get_allocation_info(ir_node *node)
137 allocation_info_t *info;
138 if (!irn_visited_else_mark(node)) {
139 size_t size = sizeof(info[0]) + n_regs * sizeof(info->prefs[0]);
140 info = obstack_alloc(&obst, size);
141 memset(info, 0, size);
142 set_irn_link(node, info);
144 info = get_irn_link(node);
151 * Get allocation information for a basic block
153 static block_info_t *get_block_info(ir_node *block)
157 assert(is_Block(block));
158 if (!irn_visited_else_mark(block)) {
159 size_t size = sizeof(info[0]) + n_regs * sizeof(info->assignments[0]);
160 info = obstack_alloc(&obst, size);
161 memset(info, 0, size);
162 set_irn_link(block, info);
164 info = get_irn_link(block);
171 * Link the allocation info of a node to a copy.
172 * Afterwards, both nodes uses the same allocation info.
173 * Copy must not have an allocation info assigned yet.
175 * @param copy the node that gets the allocation info assigned
176 * @param value the original node
178 static void link_to(ir_node *copy, ir_node *value)
180 allocation_info_t *info = get_allocation_info(value);
181 assert(!irn_visited(copy));
182 set_irn_link(copy, info);
183 mark_irn_visited(copy);
187 * Calculate the penalties for every register on a node and its live neighbors.
189 * @param live_nodes the set of live nodes at the current position, may be NULL
190 * @param penalty the penalty to subtract from
191 * @param limited a raw bitset containing the limited set for the node
192 * @param node the node
194 static void give_penalties_for_limits(const ir_nodeset_t *live_nodes,
195 float penalty, const unsigned* limited,
198 ir_nodeset_iterator_t iter;
200 allocation_info_t *info = get_allocation_info(node);
203 /* give penalty for all forbidden regs */
204 for (r = 0; r < n_regs; ++r) {
205 if (rbitset_is_set(limited, r))
208 info->prefs[r] -= penalty;
211 /* all other live values should get a penalty for allowed regs */
212 if (live_nodes == NULL)
215 /* TODO: reduce penalty if there are multiple allowed registers... */
216 penalty *= NEIGHBOR_FACTOR;
217 foreach_ir_nodeset(live_nodes, neighbor, iter) {
218 allocation_info_t *neighbor_info;
220 /* TODO: if op is used on multiple inputs we might not do a
222 if (neighbor == node)
225 neighbor_info = get_allocation_info(neighbor);
226 for (r = 0; r < n_regs; ++r) {
227 if (!rbitset_is_set(limited, r))
230 neighbor_info->prefs[r] -= penalty;
236 * Calculate the preferences of a definition for the current register class.
237 * If the definition uses a limited set of registers, reduce the preferences
238 * for the limited register on the node and its neighbors.
240 * @param live_nodes the set of live nodes at the current node
241 * @param weight the weight
242 * @param node the current node
244 static void check_defs(const ir_nodeset_t *live_nodes, float weight,
247 const arch_register_req_t *req;
249 if (get_irn_mode(node) == mode_T) {
250 const ir_edge_t *edge;
251 foreach_out_edge(node, edge) {
252 ir_node *proj = get_edge_src_irn(edge);
253 check_defs(live_nodes, weight, proj);
258 if (!arch_irn_consider_in_reg_alloc(cls, node))
261 req = arch_get_register_req_out(node);
262 if (req->type & arch_register_req_type_limited) {
263 const unsigned *limited = req->limited;
264 float penalty = weight * DEF_FACTOR;
265 give_penalties_for_limits(live_nodes, penalty, limited, node);
268 if (req->type & arch_register_req_type_should_be_same) {
269 ir_node *insn = skip_Proj(node);
270 allocation_info_t *info = get_allocation_info(node);
271 int arity = get_irn_arity(insn);
274 float factor = 1.0f / rbitset_popcnt(&req->other_same, arity);
275 for (i = 0; i < arity; ++i) {
278 allocation_info_t *op_info;
280 if (!rbitset_is_set(&req->other_same, i))
283 op = get_irn_n(insn, i);
284 op_info = get_allocation_info(op);
285 for (r = 0; r < n_regs; ++r) {
286 if (bitset_is_set(ignore_regs, r))
288 op_info->prefs[r] += info->prefs[r] * factor;
295 * Walker: Runs an a block calculates the preferences for any
296 * node and every register from the considered register class.
298 static void analyze_block(ir_node *block, void *data)
300 float weight = get_block_execfreq(execfreqs, block);
301 ir_nodeset_t live_nodes;
305 ir_nodeset_init(&live_nodes);
306 be_liveness_end_of_block(lv, cls, block, &live_nodes);
308 sched_foreach_reverse(block, node) {
309 allocation_info_t *info;
313 /* TODO: handle constrained phi-nodes */
317 /* TODO give/take penalties for should_be_same/different) */
318 check_defs(&live_nodes, weight, node);
321 arity = get_irn_arity(node);
322 /* I was lazy, and only allocated 1 unsigned
323 => maximum of 32 uses per node (rewrite if necessary) */
324 assert(arity <= (int) sizeof(unsigned) * 8);
326 info = get_allocation_info(node);
327 for (i = 0; i < arity; ++i) {
328 ir_node *op = get_irn_n(node, i);
329 if (!arch_irn_consider_in_reg_alloc(cls, op))
332 /* last usage of a value? */
333 if (!ir_nodeset_contains(&live_nodes, op)) {
334 rbitset_set(&info->last_uses, i);
338 be_liveness_transfer(cls, node, &live_nodes);
340 /* update weights based on usage constraints */
341 for (i = 0; i < arity; ++i) {
342 const arch_register_req_t *req;
343 const unsigned *limited;
344 ir_node *op = get_irn_n(node, i);
346 if (!arch_irn_consider_in_reg_alloc(cls, op))
349 req = arch_get_register_req(node, i);
350 if (!(req->type & arch_register_req_type_limited))
353 /* TODO: give penalties to neighbors for precolored nodes! */
355 limited = req->limited;
356 give_penalties_for_limits(&live_nodes, weight * USE_FACTOR, limited,
361 ir_nodeset_destroy(&live_nodes);
365 * Assign register reg to the given node.
367 * @param node the node
368 * @param reg the register
370 static void use_reg(ir_node *node, const arch_register_t *reg)
372 unsigned r = arch_register_get_index(reg);
373 assignment_t *assignment = &assignments[r];
374 allocation_info_t *info;
376 assert(assignment->value == NULL);
377 assignment->value = node;
379 info = get_allocation_info(node);
380 info->current_assignment = assignment;
382 arch_set_irn_register(node, reg);
386 * Compare two register preferences in decreasing order.
388 static int compare_reg_pref(const void *e1, const void *e2)
390 const reg_pref_t *rp1 = (const reg_pref_t*) e1;
391 const reg_pref_t *rp2 = (const reg_pref_t*) e2;
392 if (rp1->pref < rp2->pref)
394 if (rp1->pref > rp2->pref)
399 static void fill_sort_candidates(reg_pref_t *regprefs,
400 const allocation_info_t *info)
404 for (r = 0; r < n_regs; ++r) {
405 float pref = info->prefs[r];
406 if (bitset_is_set(ignore_regs, r)) {
410 regprefs[r].pref = pref;
412 /* TODO: use a stable sort here to avoid unnecessary register jumping */
413 qsort(regprefs, n_regs, sizeof(regprefs[0]), compare_reg_pref);
417 * Determine and assign a register for node @p node
419 static void assign_reg(const ir_node *block, ir_node *node)
421 const arch_register_t *reg;
422 allocation_info_t *info;
423 const arch_register_req_t *req;
424 reg_pref_t *reg_prefs;
428 assert(arch_irn_consider_in_reg_alloc(cls, node));
430 /* preassigned register? */
431 reg = arch_get_irn_register(node);
433 DB((dbg, LEVEL_2, "Preassignment %+F -> %s\n", node, reg->name));
438 /* give should_be_same boni */
439 info = get_allocation_info(node);
440 req = arch_get_register_req_out(node);
442 in_node = skip_Proj(node);
443 if (req->type & arch_register_req_type_should_be_same) {
444 float weight = get_block_execfreq(execfreqs, block);
445 int arity = get_irn_arity(in_node);
448 assert(arity <= (int) sizeof(req->other_same) * 8);
449 for (i = 0; i < arity; ++i) {
451 const arch_register_t *reg;
453 if (!rbitset_is_set(&req->other_same, i))
456 in = get_irn_n(in_node, i);
457 reg = arch_get_irn_register(in);
459 r = arch_register_get_index(reg);
460 if (bitset_is_set(ignore_regs, r))
462 info->prefs[r] += weight * SHOULD_BE_SAME;
466 /* TODO: handle must_be_different */
469 DB((dbg, LEVEL_2, "Candidates for %+F:", node));
470 reg_prefs = alloca(n_regs * sizeof(reg_prefs[0]));
471 fill_sort_candidates(reg_prefs, info);
472 for (i = 0; i < n_regs; ++i) {
473 unsigned num = reg_prefs[i].num;
474 const arch_register_t *reg = arch_register_for_index(cls, num);
475 DB((dbg, LEVEL_2, " %s(%f)", reg->name, reg_prefs[i].pref));
477 DB((dbg, LEVEL_2, "\n"));
479 for (i = 0; i < n_regs; ++i) {
480 unsigned r = reg_prefs[i].num;
481 /* ignores should be last and we should have a non-ignore left */
482 assert(!bitset_is_set(ignore_regs, r));
484 TODO: It might be better to copy the value occupying the register around here, find out when... */
485 if (assignments[r].value != NULL)
487 reg = arch_register_for_index(cls, r);
488 DB((dbg, LEVEL_2, "Assign %+F -> %s\n", node, reg->name));
494 static void free_reg_of_value(ir_node *node)
496 allocation_info_t *info;
497 assignment_t *assignment;
500 if (!arch_irn_consider_in_reg_alloc(cls, node))
503 info = get_allocation_info(node);
504 assignment = info->current_assignment;
506 assert(assignment != NULL);
508 r = assignment - assignments;
509 DB((dbg, LEVEL_2, "Value %+F ended, freeing %s\n",
510 node, arch_register_for_index(cls, r)->name));
511 assignment->value = NULL;
512 info->current_assignment = NULL;
516 * Return the index of the currently assigned register of a node.
518 static unsigned get_current_reg(ir_node *node)
520 allocation_info_t *info = get_allocation_info(node);
521 assignment_t *assignment = info->current_assignment;
522 return assignment - assignments;
526 * Return the currently assigned assignment of a node.
528 static assignment_t *get_current_assignment(ir_node *node)
530 allocation_info_t *info = get_allocation_info(node);
531 return info->current_assignment;
535 * Add an permutation in front of a node and change the assignments
536 * due to this permutation.
538 * To understand this imagine a permutation like this:
548 * First we count how many destinations a single value has. At the same time
549 * we can be sure that each destination register has at most 1 source register
550 * (it can have 0 which means we don't care what value is in it).
551 * We ignore all fullfilled permuations (like 7->7)
552 * In a first pass we create as much copy instructions as possible as they
553 * are generally cheaper than exchanges. We do this by counting into how many
554 * destinations a register has to be copied (in the example it's 2 for register
555 * 3, or 1 for the registers 1,2,4 and 7).
556 * We can then create a copy into every destination register when the usecount
557 * of that register is 0 (= noone else needs the value in the register).
559 * After this step we should have cycles left. We implement a cyclic permutation
560 * of n registers with n-1 transpositions.
562 * @param live_nodes the set of live nodes, updated due to live range split
563 * @param before the node before we add the permutation
564 * @param permutation the permutation array indices are the destination
565 * registers, the values in the array are the source
568 static void permutate_values(ir_nodeset_t *live_nodes, ir_node *before,
569 unsigned *permutation)
572 ir_node **ins = ALLOCANZ(ir_node*, n_regs);
573 unsigned *n_used = ALLOCANZ(unsigned, n_regs);
576 /* create a list of permutations. Leave out fix points. */
577 for (r = 0; r < n_regs; ++r) {
578 unsigned old_reg = permutation[r];
579 assignment_t *assignment;
582 /* no need to do anything for a fixpoint */
586 assignment = &assignments[old_reg];
587 value = assignment->value;
589 /* nothing to do here, reg is not live. Mark it as fixpoint
590 * so we ignore it in the next steps */
595 ins[old_reg] = value;
598 /* free occupation infos, we'll add the values back later */
599 if (live_nodes != NULL) {
600 free_reg_of_value(value);
601 ir_nodeset_remove(live_nodes, value);
605 block = get_nodes_block(before);
607 /* step1: create copies where immediately possible */
608 for (r = 0; r < n_regs; /* empty */) {
611 const arch_register_t *reg;
612 unsigned old_r = permutation[r];
614 /* - no need to do anything for fixed points.
615 - we can't copy if the value in the dest reg is still needed */
616 if (old_r == r || n_used[r] > 0) {
623 copy = be_new_Copy(cls, block, src);
624 reg = arch_register_for_index(cls, r);
625 DB((dbg, LEVEL_2, "Copy %+F (from %+F) -> %s\n", copy, src, reg->name));
628 sched_add_before(before, copy);
630 /* old register has 1 user less, permutation is resolved */
631 assert(arch_register_get_index(arch_get_irn_register(src)) == old_r);
632 assert(n_used[old_r] > 0);
636 /* advance or jump back (this copy could have enabled another copy) */
637 if (old_r < r && n_used[old_r] == 0) {
644 /* at this point we only have "cycles" left which we have to resolve with
646 * TODO: if we have free registers left, then we should really use copy
647 * instructions for any cycle longer than 2 registers...
648 * (this is probably architecture dependent, there might be archs where
649 * copies are preferable even for 2 cycles)
652 /* create perms with the rest */
653 for (r = 0; r < n_regs; /* empty */) {
654 const arch_register_t *reg;
655 unsigned old_r = permutation[r];
667 /* we shouldn't have copies from 1 value to multiple destinations left*/
668 assert(n_used[old_r] == 1);
670 /* exchange old_r and r2; after that old_r is a fixed point */
671 r2 = permutation[old_r];
675 perm = be_new_Perm(cls, block, 2, in);
677 proj0 = new_r_Proj(block, perm, get_irn_mode(in[0]), 0);
678 link_to(proj0, in[0]);
679 reg = arch_register_for_index(cls, old_r);
682 proj1 = new_r_Proj(block, perm, get_irn_mode(in[1]), 1);
684 /* 1 value is now in the correct register */
685 permutation[old_r] = old_r;
686 /* the source of r changed to r2 */
689 reg = arch_register_for_index(cls, r2);
691 /* if we have reached a fixpoint update data structures */
692 link_to(proj1, in[1]);
695 arch_set_irn_register(proj1, reg);
700 /* now we should only have fixpoints left */
701 for (r = 0; r < n_regs; ++r) {
702 assert(permutation[r] == r);
708 * Free regs for values last used.
710 * @param live_nodes set of live nodes, will be updated
711 * @param node the node to consider
713 static void free_last_uses(ir_nodeset_t *live_nodes, ir_node *node)
715 allocation_info_t *info = get_allocation_info(node);
716 int arity = get_irn_arity(node);
718 for (i = 0; i < arity; ++i) {
721 /* check if one operand is the last use */
722 if (!rbitset_is_set(&info->last_uses, i))
725 op = get_irn_n(node, i);
726 free_reg_of_value(op);
727 ir_nodeset_remove(live_nodes, op);
732 * Create a bitset of registers occupied with value living through an
735 static void determine_live_through_regs(unsigned *bitset, ir_node *node)
737 const allocation_info_t *info = get_allocation_info(node);
742 /* mark all used registers as potentially live-through */
743 for (r = 0; r < n_regs; ++r) {
744 const assignment_t *assignment = &assignments[r];
745 if (assignment->value == NULL)
748 rbitset_set(bitset, r);
751 /* remove registers of value dying at the instruction */
752 arity = get_irn_arity(node);
753 for (i = 0; i < arity; ++i) {
755 const arch_register_t *reg;
757 if (!rbitset_is_set(&info->last_uses, i))
760 op = get_irn_n(node, i);
761 reg = arch_get_irn_register(op);
762 rbitset_clear(bitset, arch_register_get_index(reg));
767 * Enforce constraints at a node by live range splits.
769 * @param live_nodes the set of live nodes, might be changed
770 * @param node the current node
772 static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node)
774 int arity = get_irn_arity(node);
776 hungarian_problem_t *bp;
778 unsigned *assignment;
780 /* see if any use constraints are not met */
782 for (i = 0; i < arity; ++i) {
783 ir_node *op = get_irn_n(node, i);
784 const arch_register_req_t *req;
785 const unsigned *limited;
788 if (!arch_irn_consider_in_reg_alloc(cls, op))
791 /* are there any limitations for the i'th operand? */
792 req = arch_get_register_req(node, i);
793 if (!(req->type & arch_register_req_type_limited))
796 limited = req->limited;
797 r = get_current_reg(op);
798 if (!rbitset_is_set(limited, r)) {
799 /* found an assignement outside the limited set */
805 /* construct a list of register occupied by live-through values */
806 unsigned *live_through_regs = NULL;
807 unsigned *output_regs = NULL;
809 /* is any of the live-throughs using a constrainted output register? */
810 if (get_irn_mode(node) == mode_T) {
811 const ir_edge_t *edge;
813 foreach_out_edge(node, edge) {
814 ir_node *proj = get_edge_src_irn(edge);
815 const arch_register_req_t *req;
817 if (!arch_irn_consider_in_reg_alloc(cls, proj))
820 req = arch_get_register_req_out(proj);
821 if (!(req->type & arch_register_req_type_limited))
824 if (live_through_regs == NULL) {
825 rbitset_alloca(live_through_regs, n_regs);
826 determine_live_through_regs(live_through_regs, node);
828 rbitset_alloca(output_regs, n_regs);
831 rbitset_or(output_regs, req->limited, n_regs);
832 if (rbitsets_have_common(req->limited, live_through_regs, n_regs)) {
838 if (arch_irn_consider_in_reg_alloc(cls, node)) {
839 const arch_register_req_t *req = arch_get_register_req_out(node);
840 if (req->type & arch_register_req_type_limited) {
841 rbitset_alloca(live_through_regs, n_regs);
842 determine_live_through_regs(live_through_regs, node);
843 if (rbitsets_have_common(req->limited, live_through_regs, n_regs)) {
846 rbitset_alloca(output_regs, n_regs);
847 rbitset_or(output_regs, req->limited, n_regs);
856 if (live_through_regs == NULL) {
857 rbitset_alloca(live_through_regs, n_regs);
858 rbitset_alloca(output_regs, n_regs);
861 /* swap values around */
862 bp = hungarian_new(n_regs, n_regs, HUNGARIAN_MATCH_PERFECT);
864 /* add all combinations, then remove not allowed ones */
865 for (l = 0; l < n_regs; ++l) {
866 if (bitset_is_set(ignore_regs, l)) {
867 hungarian_add(bp, l, l, 90);
871 for (r = 0; r < n_regs; ++r) {
872 if (bitset_is_set(ignore_regs, r))
874 /* livethrough values may not use constrainted output registers */
875 if (rbitset_is_set(live_through_regs, l)
876 && rbitset_is_set(output_regs, r))
879 hungarian_add(bp, l, r, l == r ? 90 : 89);
883 for (i = 0; i < arity; ++i) {
884 ir_node *op = get_irn_n(node, i);
885 const arch_register_req_t *req;
886 const unsigned *limited;
887 unsigned current_reg;
889 if (!arch_irn_consider_in_reg_alloc(cls, op))
892 req = arch_get_register_req(node, i);
893 if (!(req->type & arch_register_req_type_limited))
896 limited = req->limited;
897 current_reg = get_current_reg(op);
898 for (r = 0; r < n_regs; ++r) {
899 if (rbitset_is_set(limited, r))
901 hungarian_remv(bp, current_reg, r);
905 hungarian_print_costmatrix(bp, 1);
906 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
908 assignment = ALLOCAN(unsigned, n_regs);
909 res = hungarian_solve(bp, (int*) assignment, &dummy, 0);
912 printf("Swap result:");
913 for (p = 0; p < n_regs; ++p) {
914 printf(" %d", assignment[p]);
920 permutate_values(live_nodes, node, assignment);
923 /** test wether a node @p n is a copy of the value of node @p of */
924 static int is_copy_of(ir_node *n, ir_node *of)
926 allocation_info_t *of_info;
934 of_info = get_allocation_info(of);
938 return of_info == get_irn_link(n);
941 /** find a value in the end-assignment of a basic block
942 * @returns the index into the assignment array if found
945 static int find_value_in_block_info(block_info_t *info, ir_node *value)
948 assignment_t *assignments = info->assignments;
949 for (r = 0; r < n_regs; ++r) {
950 const assignment_t *assignment = &assignments[r];
951 if (is_copy_of(assignment->value, value))
959 * Create the necessary permutations at the end of a basic block to fullfill
960 * the register assignment for phi-nodes in the next block
962 static void add_phi_permutations(ir_node *block, int p)
965 unsigned *permutation;
966 assignment_t *old_assignments;
967 int need_permutation;
969 ir_node *pred = get_Block_cfgpred_block(block, p);
971 block_info_t *pred_info = get_block_info(pred);
973 /* predecessor not processed yet? nothing to do */
974 if (!pred_info->processed)
977 permutation = ALLOCAN(unsigned, n_regs);
978 for (r = 0; r < n_regs; ++r) {
982 /* check phi nodes */
983 need_permutation = 0;
984 node = sched_first(block);
985 for ( ; is_Phi(node); node = sched_next(node)) {
986 const arch_register_t *reg;
991 if (!arch_irn_consider_in_reg_alloc(cls, node))
994 op = get_Phi_pred(node, p);
995 a = find_value_in_block_info(pred_info, op);
998 reg = arch_get_irn_register(node);
999 regn = arch_register_get_index(reg);
1001 permutation[regn] = a;
1002 need_permutation = 1;
1006 old_assignments = assignments;
1007 assignments = pred_info->assignments;
1008 permutate_values(NULL, be_get_end_of_block_insertion_point(pred),
1010 assignments = old_assignments;
1012 node = sched_first(block);
1013 for ( ; is_Phi(node); node = sched_next(node)) {
1017 if (!arch_irn_consider_in_reg_alloc(cls, node))
1020 op = get_Phi_pred(node, p);
1021 /* TODO: optimize */
1022 a = find_value_in_block_info(pred_info, op);
1025 op = pred_info->assignments[a].value;
1026 set_Phi_pred(node, p, op);
1031 * Walker: assign registers to all nodes of a block that
1032 * need registers from the currently considered register class.
1034 static void allocate_coalesce_block(ir_node *block, void *data)
1038 ir_nodeset_t live_nodes;
1039 ir_nodeset_iterator_t iter;
1040 ir_node *node, *start;
1042 block_info_t *block_info;
1043 block_info_t **pred_block_infos;
1046 DB((dbg, LEVEL_2, "Allocating in block %+F\n", block));
1048 /* clear assignments */
1049 block_info = get_block_info(block);
1050 assignments = block_info->assignments;
1052 for (r = 0; r < n_regs; ++r) {
1053 assignment_t *assignment = &assignments[r];
1054 ir_node *value = assignment->value;
1055 allocation_info_t *info;
1060 info = get_allocation_info(value);
1061 info->current_assignment = assignment;
1064 ir_nodeset_init(&live_nodes);
1066 /* gather regalloc infos of predecessor blocks */
1067 n_preds = get_Block_n_cfgpreds(block);
1068 pred_block_infos = ALLOCAN(block_info_t*, n_preds);
1069 for (i = 0; i < n_preds; ++i) {
1070 ir_node *pred = get_Block_cfgpred_block(block, i);
1071 pred_block_infos[i] = get_block_info(pred);
1074 /* collect live-in nodes and preassigned values */
1075 be_lv_foreach(lv, block, be_lv_state_in, i) {
1076 const arch_register_t *reg;
1078 node = be_lv_get_irn(lv, block, i);
1079 if (!arch_irn_consider_in_reg_alloc(cls, node))
1082 /* remember that this node is live at the beginning of the block */
1083 ir_nodeset_insert(&live_nodes, node);
1085 /* if the node already has a register assigned use it */
1086 reg = arch_get_irn_register(node);
1088 /* TODO: consult pred-block infos here. The value could be copied
1089 away in some/all predecessor blocks. We need to construct
1090 phi-nodes in this case.
1091 We even need to construct some Phi_0 like constructs in cases
1092 where the predecessor allocation is not determined yet. */
1097 /* handle phis... */
1098 node = sched_first(block);
1099 for ( ; is_Phi(node); node = sched_next(node)) {
1100 const arch_register_t *reg;
1102 if (!arch_irn_consider_in_reg_alloc(cls, node))
1105 /* fill in regs already assigned */
1106 reg = arch_get_irn_register(node);
1110 /* TODO: give boni for registers already assigned at the
1112 assign_reg(block, node);
1117 /* assign regs for live-in values */
1118 foreach_ir_nodeset(&live_nodes, node, iter) {
1119 const arch_register_t *reg = arch_get_irn_register(node);
1123 assign_reg(block, node);
1126 /* permutate values at end of predecessor blocks in case of phi-nodes */
1129 for (p = 0; p < n_preds; ++p) {
1130 add_phi_permutations(block, p);
1134 /* assign instructions in the block */
1135 for (node = start; !sched_is_end(node); node = sched_next(node)) {
1136 int arity = get_irn_arity(node);
1139 /* enforce use constraints */
1140 enforce_constraints(&live_nodes, node);
1142 /* exchange values to copied values where needed */
1143 for (i = 0; i < arity; ++i) {
1144 ir_node *op = get_irn_n(node, i);
1145 assignment_t *assignment;
1147 if (!arch_irn_consider_in_reg_alloc(cls, op))
1149 assignment = get_current_assignment(op);
1150 assert(assignment != NULL);
1151 if (op != assignment->value) {
1152 set_irn_n(node, i, assignment->value);
1156 /* free registers of values last used at this instruction */
1157 free_last_uses(&live_nodes, node);
1159 /* assign output registers */
1160 /* TODO: 2 phases: first: pre-assigned ones, 2nd real regs */
1161 if (get_irn_mode(node) == mode_T) {
1162 const ir_edge_t *edge;
1163 foreach_out_edge(node, edge) {
1164 ir_node *proj = get_edge_src_irn(edge);
1165 if (!arch_irn_consider_in_reg_alloc(cls, proj))
1167 assign_reg(block, proj);
1169 } else if (arch_irn_consider_in_reg_alloc(cls, node)) {
1170 assign_reg(block, node);
1174 ir_nodeset_destroy(&live_nodes);
1177 block_info->processed = 1;
1179 /* if we have exactly 1 successor then we might be able to produce phi
1181 if (get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) == 1) {
1182 const ir_edge_t *edge
1183 = get_irn_out_edge_first_kind(block, EDGE_KIND_BLOCK);
1184 ir_node *succ = get_edge_src_irn(edge);
1185 int p = get_edge_src_pos(edge);
1186 block_info_t *succ_info = get_block_info(succ);
1188 if (succ_info->processed) {
1189 add_phi_permutations(succ, p);
1195 * Run the register allocator for the current register class.
1197 static void be_straight_alloc_cls(void)
1199 lv = be_assure_liveness(birg);
1200 be_liveness_assure_sets(lv);
1201 be_liveness_assure_chk(lv);
1205 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
1206 inc_irg_visited(irg);
1208 DB((dbg, LEVEL_2, "=== Allocating registers of %s ===\n", cls->name));
1210 irg_block_walk_graph(irg, NULL, analyze_block, NULL);
1211 irg_block_walk_graph(irg, NULL, allocate_coalesce_block, NULL);
1213 ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
1217 * Run the spiller on the current graph.
1219 static void spill(void)
1221 /* make sure all nodes show their real register pressure */
1222 BE_TIMER_PUSH(t_ra_constr);
1223 be_pre_spill_prepare_constr(birg, cls);
1224 BE_TIMER_POP(t_ra_constr);
1227 BE_TIMER_PUSH(t_ra_spill);
1228 be_do_spill(birg, cls);
1229 BE_TIMER_POP(t_ra_spill);
1231 BE_TIMER_PUSH(t_ra_spill_apply);
1232 check_for_memory_operands(irg);
1233 BE_TIMER_POP(t_ra_spill_apply);
1237 * The straight register allocator for a whole procedure.
1239 static void be_straight_alloc(be_irg_t *new_birg)
1241 const arch_env_t *arch_env = new_birg->main_env->arch_env;
1242 int n_cls = arch_env_get_n_reg_class(arch_env);
1245 obstack_init(&obst);
1248 irg = be_get_birg_irg(birg);
1249 execfreqs = birg->exec_freq;
1251 /* TODO: extract some of the stuff from bechordal allocator, like
1252 * statistics, time measurements, etc. and use them here too */
1254 for (c = 0; c < n_cls; ++c) {
1255 cls = arch_env_get_reg_class(arch_env, c);
1256 if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
1259 stat_ev_ctx_push_str("bestraight_cls", cls->name);
1261 n_regs = arch_register_class_n_regs(cls);
1262 ignore_regs = bitset_malloc(n_regs);
1263 be_put_ignore_regs(birg, cls, ignore_regs);
1267 /* verify schedule and register pressure */
1268 BE_TIMER_PUSH(t_verify);
1269 if (birg->main_env->options->vrfy_option == BE_CH_VRFY_WARN) {
1270 be_verify_schedule(birg);
1271 be_verify_register_pressure(birg, cls, irg);
1272 } else if (birg->main_env->options->vrfy_option == BE_CH_VRFY_ASSERT) {
1273 assert(be_verify_schedule(birg) && "Schedule verification failed");
1274 assert(be_verify_register_pressure(birg, cls, irg)
1275 && "Register pressure verification failed");
1277 BE_TIMER_POP(t_verify);
1279 BE_TIMER_PUSH(t_ra_color);
1280 be_straight_alloc_cls();
1281 BE_TIMER_POP(t_ra_color);
1283 bitset_free(ignore_regs);
1285 stat_ev_ctx_pop("bestraight_cls");
1288 BE_TIMER_PUSH(t_verify);
1289 if (birg->main_env->options->vrfy_option == BE_CH_VRFY_WARN) {
1290 be_verify_register_allocation(birg);
1291 } else if(birg->main_env->options->vrfy_option == BE_CH_VRFY_ASSERT) {
1292 assert(be_verify_register_allocation(birg)
1293 && "Register allocation invalid");
1295 BE_TIMER_POP(t_verify);
1297 obstack_free(&obst, NULL);
1301 * Initializes this module.
1303 void be_init_straight_alloc(void)
1305 static be_ra_t be_ra_straight = {
1309 FIRM_DBG_REGISTER(dbg, "firm.be.straightalloc");
1311 be_register_allocator("straight", &be_ra_straight);
1314 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_straight_alloc);