2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief New approach to allocation and copy coalescing
23 * @author Matthias Braun
27 * ... WE NEED A NAME FOR THIS ...
29 * Only a proof of concept at this moment...
31 * The idea is to allocate registers in 2 passes:
32 * 1. A first pass to determine "preferred" registers for live-ranges. This
33 * calculates for each register and each live-range a value indicating
34 * the usefulness. (You can roughly think of the value as the negative
35 * costs needed for copies when the value is in the specific registers...)
37 * 2. Walk blocks and assigns registers in a greedy fashion. Preferring
38 * registers with high preferences. When register constraints are not met,
39 * add copies and split live-ranges.
42 * - output constraints are not ensured. The algorithm fails to copy values
43 * away, so the registers for constrained outputs are free.
44 * - must_be_different constraint is not respected
45 * - No parallel copies at basic block borders are created, no additional phis
46 * created after copies have been inserted.
47 * - Phi color assignment should give bonus points towards registers already
48 * assigned at predecessors.
49 * - think about a smarter sequence of visiting the blocks. Sorted by
50 * execfreq might be good, or looptree from inner to outermost loops going
51 * over blocks in a reverse postorder
59 #include "irgraph_t.h"
60 #include "iredges_t.h"
69 #include "bechordal_t.h"
70 #include "besched_t.h"
74 #include "bespilloptions.h"
77 #include "bipartite.h"
78 #include "hungarian.h"
80 #define USE_FACTOR 1.0f
81 #define DEF_FACTOR 1.0f
82 #define NEIGHBOR_FACTOR 0.2f
83 #define SHOULD_BE_SAME 1.0f
85 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
87 static struct obstack obst;
88 static be_irg_t *birg;
90 static const arch_register_class_t *cls;
92 static const ir_exec_freq *execfreqs;
93 static unsigned n_regs;
94 static bitset_t *ignore_regs;
96 /** info about the current assignment for a register */
98 ir_node *value; /**< currently assigned value */
100 typedef struct assignment_t assignment_t;
102 /** currently active assignments (while processing a basic block) */
103 static assignment_t *assignments;
106 * allocation information: last_uses, register preferences
107 * the information is per firm-node.
109 struct allocation_info_t {
110 unsigned last_uses; /**< bitset indicating last uses (input pos) */
111 assignment_t *current_assignment;
112 float prefs[0]; /**< register preferences */
114 typedef struct allocation_info_t allocation_info_t;
116 /** helper datastructure used when sorting register preferences */
121 typedef struct reg_pref_t reg_pref_t;
123 /** per basic-block information */
124 struct block_info_t {
125 int processed; /**< indicate wether block is processed */
126 assignment_t assignments[0]; /**< register assignments at end of block */
128 typedef struct block_info_t block_info_t;
131 * Get the allocation info for a node.
132 * The info is allocated on the first visit of a node.
134 static allocation_info_t *get_allocation_info(ir_node *node)
136 allocation_info_t *info;
137 if (!irn_visited(node)) {
138 size_t size = sizeof(info[0]) + n_regs * sizeof(info->prefs[0]);
139 info = obstack_alloc(&obst, size);
140 memset(info, 0, size);
141 set_irn_link(node, info);
142 mark_irn_visited(node);
144 info = get_irn_link(node);
151 * Get allocation information for a basic block
153 static block_info_t *get_block_info(ir_node *block)
157 assert(is_Block(block));
158 if (!irn_visited(block)) {
159 size_t size = sizeof(info[0]) + n_regs * sizeof(info->assignments[0]);
160 info = obstack_alloc(&obst, size);
161 memset(info, 0, size);
162 set_irn_link(block, info);
163 mark_irn_visited(block);
165 info = get_irn_link(block);
172 * Link the allocation info of a node to a copy.
173 * Afterwards, both nodes uses the same allocation info.
174 * Copy must not have an allocation info assigned yet.
176 * @param copy the node that gets the allocation info assigned
177 * @param value the original node
179 static void link_to(ir_node *copy, ir_node *value)
181 allocation_info_t *info = get_allocation_info(value);
182 assert(!irn_visited(copy));
183 set_irn_link(copy, info);
184 mark_irn_visited(copy);
188 * Calculate the penalties for every register on a node and its live neighbors.
190 * @param live_nodes the set of live nodes at the current position, may be NULL
191 * @param penalty the penalty to subtract from
192 * @param limited a raw bitset containing the limited set for the node
193 * @param node the node
195 static void give_penalties_for_limits(const ir_nodeset_t *live_nodes,
196 float penalty, const unsigned* limited,
199 ir_nodeset_iterator_t iter;
201 allocation_info_t *info = get_allocation_info(node);
204 /* give penalty for all forbidden regs */
205 for (r = 0; r < n_regs; ++r) {
206 if (rbitset_is_set(limited, r))
209 info->prefs[r] -= penalty;
212 /* all other live values should get a penalty for allowed regs */
213 if (live_nodes == NULL)
216 /* TODO: reduce penalty if there are multiple allowed registers... */
217 penalty *= NEIGHBOR_FACTOR;
218 foreach_ir_nodeset(live_nodes, neighbor, iter) {
219 allocation_info_t *neighbor_info;
221 /* TODO: if op is used on multiple inputs we might not do a
223 if (neighbor == node)
226 neighbor_info = get_allocation_info(neighbor);
227 for (r = 0; r < n_regs; ++r) {
228 if (!rbitset_is_set(limited, r))
231 neighbor_info->prefs[r] -= penalty;
237 * Calculate the preferences of a definition for the current register class.
238 * If the definition uses a limited set of registers, reduce the preferences
239 * for the limited register on the node and its neighbors.
241 * @param live_nodes the set of live nodes at the current node
242 * @param weight the weight
243 * @param node the current node
245 static void check_defs(const ir_nodeset_t *live_nodes, float weight,
248 const arch_register_req_t *req;
250 if (get_irn_mode(node) == mode_T) {
251 const ir_edge_t *edge;
252 foreach_out_edge(node, edge) {
253 ir_node *proj = get_edge_src_irn(edge);
254 check_defs(live_nodes, weight, proj);
259 if (!arch_irn_consider_in_reg_alloc(cls, node))
262 req = arch_get_register_req_out(node);
263 if (req->type & arch_register_req_type_limited) {
264 const unsigned *limited = req->limited;
265 float penalty = weight * DEF_FACTOR;
266 give_penalties_for_limits(live_nodes, penalty, limited, node);
269 if (req->type & arch_register_req_type_should_be_same) {
270 ir_node *insn = skip_Proj(node);
271 allocation_info_t *info = get_allocation_info(node);
272 int arity = get_irn_arity(insn);
275 float factor = 1.0f / rbitset_popcnt(&req->other_same, arity);
276 for (i = 0; i < arity; ++i) {
279 allocation_info_t *op_info;
281 if (!rbitset_is_set(&req->other_same, i))
284 op = get_irn_n(insn, i);
285 op_info = get_allocation_info(op);
286 for (r = 0; r < n_regs; ++r) {
287 if (bitset_is_set(ignore_regs, r))
289 op_info->prefs[r] += info->prefs[r] * factor;
296 * Walker: Runs an a block calculates the preferences for any
297 * node and every register from the considered register class.
299 static void analyze_block(ir_node *block, void *data)
301 float weight = get_block_execfreq(execfreqs, block);
302 ir_nodeset_t live_nodes;
306 ir_nodeset_init(&live_nodes);
307 be_liveness_end_of_block(lv, cls, block, &live_nodes);
309 sched_foreach_reverse(block, node) {
310 allocation_info_t *info;
314 /* TODO: handle constrained phi-nodes */
318 /* TODO give/take penalties for should_be_same/different) */
319 check_defs(&live_nodes, weight, node);
322 arity = get_irn_arity(node);
323 /* I was lazy, and only allocated 1 unsigned
324 => maximum of 32 uses per node (rewrite if necessary) */
325 assert(arity <= (int) sizeof(unsigned) * 8);
327 info = get_allocation_info(node);
328 for (i = 0; i < arity; ++i) {
329 ir_node *op = get_irn_n(node, i);
330 if (!arch_irn_consider_in_reg_alloc(cls, op))
333 /* last usage of a value? */
334 if (!ir_nodeset_contains(&live_nodes, op)) {
335 rbitset_set(&info->last_uses, i);
339 be_liveness_transfer(cls, node, &live_nodes);
341 /* update weights based on usage constraints */
342 for (i = 0; i < arity; ++i) {
343 const arch_register_req_t *req;
344 const unsigned *limited;
345 ir_node *op = get_irn_n(node, i);
347 if (!arch_irn_consider_in_reg_alloc(cls, op))
350 req = arch_get_register_req(node, i);
351 if ((req->type & arch_register_req_type_limited) == 0)
354 /* TODO: give penalties to neighbors for precolored nodes! */
356 limited = req->limited;
357 give_penalties_for_limits(&live_nodes, weight * USE_FACTOR, limited,
362 ir_nodeset_destroy(&live_nodes);
366 * Assign register reg to the given node.
368 * @param node the node
369 * @param reg the register
371 static void use_reg(ir_node *node, const arch_register_t *reg)
373 unsigned r = arch_register_get_index(reg);
374 assignment_t *assignment = &assignments[r];
375 allocation_info_t *info;
377 assert(assignment->value == NULL);
378 assignment->value = node;
380 info = get_allocation_info(node);
381 info->current_assignment = assignment;
383 arch_set_irn_register(node, reg);
387 * Compare two register preferences in decreasing order.
389 static int compare_reg_pref(const void *e1, const void *e2)
391 const reg_pref_t *rp1 = (const reg_pref_t*) e1;
392 const reg_pref_t *rp2 = (const reg_pref_t*) e2;
393 if (rp1->pref < rp2->pref)
395 if (rp1->pref > rp2->pref)
400 static void fill_sort_candidates(reg_pref_t *regprefs,
401 const allocation_info_t *info)
405 for (r = 0; r < n_regs; ++r) {
406 float pref = info->prefs[r];
407 if (bitset_is_set(ignore_regs, r)) {
411 regprefs[r].pref = pref;
413 /* TODO: use a stable sort here to avoid unnecessary register jumping */
414 qsort(regprefs, n_regs, sizeof(regprefs[0]), compare_reg_pref);
418 * Determine and assign a register for node @p node
420 static void assign_reg(const ir_node *block, ir_node *node)
422 const arch_register_t *reg;
423 allocation_info_t *info;
424 const arch_register_req_t *req;
425 reg_pref_t *reg_prefs;
429 assert(arch_irn_consider_in_reg_alloc(cls, node));
431 /* preassigned register? */
432 reg = arch_get_irn_register(node);
434 DB((dbg, LEVEL_2, "Preassignment %+F -> %s\n", node, reg->name));
439 /* give should_be_same boni */
440 info = get_allocation_info(node);
441 req = arch_get_register_req_out(node);
443 in_node = skip_Proj(node);
444 if (req->type & arch_register_req_type_should_be_same) {
445 float weight = get_block_execfreq(execfreqs, block);
446 int arity = get_irn_arity(in_node);
449 assert(arity <= (int) sizeof(req->other_same) * 8);
450 for (i = 0; i < arity; ++i) {
452 const arch_register_t *reg;
454 if (!rbitset_is_set(&req->other_same, i))
457 in = get_irn_n(in_node, i);
458 reg = arch_get_irn_register(in);
460 r = arch_register_get_index(reg);
461 if (bitset_is_set(ignore_regs, r))
463 info->prefs[r] += weight * SHOULD_BE_SAME;
467 /* TODO: handle must_be_different */
470 DB((dbg, LEVEL_2, "Candidates for %+F:", node));
471 reg_prefs = alloca(n_regs * sizeof(reg_prefs[0]));
472 fill_sort_candidates(reg_prefs, info);
473 for (i = 0; i < n_regs; ++i) {
474 unsigned num = reg_prefs[i].num;
475 const arch_register_t *reg = arch_register_for_index(cls, num);
476 DB((dbg, LEVEL_2, " %s(%f)", reg->name, reg_prefs[i].pref));
478 DB((dbg, LEVEL_2, "\n"));
480 for (i = 0; i < n_regs; ++i) {
481 unsigned r = reg_prefs[i].num;
482 /* ignores should be last and we should have a non-ignore left */
483 assert(!bitset_is_set(ignore_regs, r));
484 /* already used? TODO: It might be better to copy the value occupying the register around here, find out when... */
485 if (assignments[r].value != NULL)
487 reg = arch_register_for_index(cls, r);
488 DB((dbg, LEVEL_2, "Assign %+F -> %s\n", node, reg->name));
494 static void free_reg_of_value(ir_node *node)
496 allocation_info_t *info;
497 assignment_t *assignment;
500 if (!arch_irn_consider_in_reg_alloc(cls, node))
503 info = get_allocation_info(node);
504 assignment = info->current_assignment;
506 assert(assignment != NULL);
508 r = assignment - assignments;
509 DB((dbg, LEVEL_2, "Value %+F ended, freeing %s\n",
510 node, arch_register_for_index(cls, r)->name));
511 assignment->value = NULL;
512 info->current_assignment = NULL;
516 * Return the index of the currently assigned register of a node.
518 static unsigned get_current_reg(ir_node *node)
520 allocation_info_t *info = get_allocation_info(node);
521 assignment_t *assignment = info->current_assignment;
522 return assignment - assignments;
526 * Return the currently assigned assignment of a node.
528 static assignment_t *get_current_assignment(ir_node *node)
530 allocation_info_t *info = get_allocation_info(node);
531 return info->current_assignment;
535 * Add an permutation in front of a node and change the assignments
536 * due to this permutation.
538 * To understand this imagine a permutation like this:
548 * First we count how many destinations a single value has. At the same time
549 * we can be sure that each destination register has at most 1 source register
550 * (it can have 0 which means we don't care what value is in it).
551 * We ignore all fullfilled permuations (like 7->7)
552 * In a first pass we create as much copy instructions as possible as they
553 * are generally cheaper than exchanges. We do this by counting into how many
554 * destinations a register has to be copied (in the example it's 2 for register
555 * 3, or 1 for the registers 1,2,4 and 7).
556 * We can then create a copy into every destination register when the usecount
557 * of that register is 0 (= noone else needs the value in the register).
559 * After this step we should have cycles left. We implement a cyclic permutation
560 * of n registers with n-1 transpositions.
562 * @param live_nodes the set of live nodes, updated due to live range split
563 * @param before the node before we add the permutation
564 * @param permutation the permutation array indices are the destination
565 * registers, the values in the array are the source
568 static void permutate_values(ir_nodeset_t *live_nodes, ir_node *before,
569 unsigned *permutation)
572 ir_node **ins = ALLOCANZ(ir_node*, n_regs);
573 unsigned *n_used = ALLOCANZ(unsigned, n_regs);
576 /* create a list of permutations. Leave out fix points. */
577 for (r = 0; r < n_regs; ++r) {
578 unsigned old_reg = permutation[r];
579 assignment_t *assignment;
582 /* no need to do anything for a fixpoint */
586 assignment = &assignments[old_reg];
587 value = assignment->value;
589 /* nothing to do here, reg is not live. Mark it as fixpoint
590 * so we ignore it in the next steps */
595 ins[old_reg] = value;
598 /* free occupation infos, we'll add the values back later */
599 if (live_nodes != NULL) {
600 free_reg_of_value(value);
601 ir_nodeset_remove(live_nodes, value);
605 block = get_nodes_block(before);
607 /* step1: create copies where immediately possible */
608 for (r = 0; r < n_regs; /* empty */) {
611 const arch_register_t *reg;
612 unsigned old_r = permutation[r];
614 /* - no need to do anything for fixed points.
615 - we can't copy if the value in the dest reg is still needed */
616 if (old_r == r || n_used[r] > 0) {
623 copy = be_new_Copy(cls, block, src);
624 reg = arch_register_for_index(cls, r);
625 DB((dbg, LEVEL_2, "Copy %+F (from %+F) -> %s\n", copy, src, reg->name));
628 sched_add_before(before, copy);
630 /* old register has 1 user less, permutation is resolved */
631 assert(arch_register_get_index(arch_get_irn_register(src)) == old_r);
632 assert(n_used[old_r] > 0);
636 /* advance or jump back (this copy could have enabled another copy) */
637 if (old_r < r && n_used[old_r] == 0) {
644 /* at this point we only have "cycles" left which we have to resolve with
646 * TODO: if we have free registers left, then we should really use copy
647 * instructions for any cycle longer than 2 registers...
648 * (this is probably architecture dependent, there might be archs where
649 * copies are preferable even for 2 cycles)
652 /* create perms with the rest */
653 for (r = 0; r < n_regs; /* empty */) {
654 const arch_register_t *reg;
655 unsigned old_r = permutation[r];
667 /* we shouldn't have copies from 1 value to multiple destinations left*/
668 assert(n_used[old_r] == 1);
670 /* exchange old_r and r2; after that old_r is a fixed point */
671 r2 = permutation[old_r];
675 perm = be_new_Perm(cls, block, 2, in);
677 proj0 = new_r_Proj(block, perm, get_irn_mode(in[0]), 0);
678 link_to(proj0, in[0]);
679 reg = arch_register_for_index(cls, old_r);
682 proj1 = new_r_Proj(block, perm, get_irn_mode(in[1]), 1);
684 /* 1 value is now in the correct register */
685 permutation[old_r] = old_r;
686 /* the source of r changed to r2 */
689 reg = arch_register_for_index(cls, r2);
691 /* if we have reached a fixpoint update data structures */
692 link_to(proj1, in[1]);
695 arch_set_irn_register(proj1, reg);
700 /* now we should only have fixpoints left */
701 for (r = 0; r < n_regs; ++r) {
702 assert(permutation[r] == r);
708 * Free regs for values last used.
710 * @param live_nodes set of live nodes, will be updated
711 * @param node the node to consider
713 static void free_last_uses(ir_nodeset_t *live_nodes, ir_node *node)
715 allocation_info_t *info = get_allocation_info(node);
716 int arity = get_irn_arity(node);
718 for (i = 0; i < arity; ++i) {
721 /* check if one operand is the last use */
722 if (!rbitset_is_set(&info->last_uses, i))
725 op = get_irn_n(node, i);
726 free_reg_of_value(op);
727 ir_nodeset_remove(live_nodes, op);
732 * Enforce constraints at a node by live range splits.
734 * @param live_nodes the set of live nodes, might be changed
735 * @param node the current node
737 static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node)
739 int arity = get_irn_arity(node);
741 hungarian_problem_t *bp;
743 unsigned *assignment;
745 /* see if any use constraints are not met */
747 for (i = 0; i < arity; ++i) {
748 ir_node *op = get_irn_n(node, i);
749 const arch_register_req_t *req;
750 const unsigned *limited;
753 if (!arch_irn_consider_in_reg_alloc(cls, op))
756 /* are there any limitations for the i'th operand? */
757 req = arch_get_register_req(node, i);
758 if ((req->type & arch_register_req_type_limited) == 0)
761 limited = req->limited;
762 r = get_current_reg(op);
763 if (!rbitset_is_set(limited, r)) {
764 /* found an assignement outside the limited set */
773 /* swap values around */
774 bp = hungarian_new(n_regs, n_regs, HUNGARIAN_MATCH_PERFECT);
776 /* add all combinations, then remove not allowed ones */
777 for (l = 0; l < n_regs; ++l) {
778 if (bitset_is_set(ignore_regs, l)) {
779 hungarian_add(bp, l, l, 90);
783 for (r = 0; r < n_regs; ++r) {
784 if (bitset_is_set(ignore_regs, r))
787 hungarian_add(bp, l, r, l == r ? 90 : 89);
791 for (i = 0; i < arity; ++i) {
792 ir_node *op = get_irn_n(node, i);
793 const arch_register_req_t *req;
794 const unsigned *limited;
795 unsigned current_reg;
797 if (!arch_irn_consider_in_reg_alloc(cls, op))
800 req = arch_get_register_req(node, i);
801 if ((req->type & arch_register_req_type_limited) == 0)
804 limited = req->limited;
805 current_reg = get_current_reg(op);
806 for (r = 0; r < n_regs; ++r) {
807 if (rbitset_is_set(limited, r))
809 hungarian_remv(bp, current_reg, r);
813 hungarian_print_costmatrix(bp, 1);
814 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
816 assignment = ALLOCAN(unsigned, n_regs);
817 res = hungarian_solve(bp, (int*) assignment, &dummy, 0);
820 printf("Swap result:");
821 for (p = 0; p < n_regs; ++p) {
822 printf(" %d", assignment[p]);
828 permutate_values(live_nodes, node, assignment);
831 /** test wether a node @p n is a copy of the value of node @p of */
832 static int is_copy_of(ir_node *n, ir_node *of)
834 allocation_info_t *of_info;
842 of_info = get_allocation_info(of);
846 return of_info == get_irn_link(n);
849 /** find a value in the end-assignment of a basic block
850 * @returns the index into the assignment array if found
853 static int find_value_in_block_info(block_info_t *info, ir_node *value)
856 assignment_t *assignments = info->assignments;
857 for (r = 0; r < n_regs; ++r) {
858 const assignment_t *assignment = &assignments[r];
859 if (is_copy_of(assignment->value, value))
867 * Create the necessary permutations at the end of a basic block to fullfill
868 * the register assignment for phi-nodes in the next block
870 static void add_phi_permutations(ir_node *block, int p)
873 unsigned *permutation;
874 assignment_t *old_assignments;
875 int need_permutation;
877 ir_node *pred = get_Block_cfgpred_block(block, p);
879 block_info_t *pred_info = get_block_info(pred);
881 /* predecessor not processed yet? nothing to do */
882 if (!pred_info->processed)
885 permutation = ALLOCAN(unsigned, n_regs);
886 for (r = 0; r < n_regs; ++r) {
890 /* check phi nodes */
891 need_permutation = 0;
892 node = sched_first(block);
893 for ( ; is_Phi(node); node = sched_next(node)) {
894 const arch_register_t *reg;
899 if (!arch_irn_consider_in_reg_alloc(cls, node))
902 op = get_Phi_pred(node, p);
903 a = find_value_in_block_info(pred_info, op);
906 reg = arch_get_irn_register(node);
907 regn = arch_register_get_index(reg);
909 permutation[regn] = a;
910 need_permutation = 1;
914 old_assignments = assignments;
915 assignments = pred_info->assignments;
916 permutate_values(NULL, be_get_end_of_block_insertion_point(pred),
918 assignments = old_assignments;
920 node = sched_first(block);
921 for ( ; is_Phi(node); node = sched_next(node)) {
925 if (!arch_irn_consider_in_reg_alloc(cls, node))
928 op = get_Phi_pred(node, p);
930 a = find_value_in_block_info(pred_info, op);
933 op = pred_info->assignments[a].value;
934 set_Phi_pred(node, p, op);
939 * Walker: assign registers to all nodes of a block that
940 * needs registers from the currently considered register class.
942 static void allocate_coalesce_block(ir_node *block, void *data)
946 ir_nodeset_t live_nodes;
947 ir_nodeset_iterator_t iter;
948 ir_node *node, *start;
950 block_info_t *block_info;
951 block_info_t **pred_block_infos;
954 DB((dbg, LEVEL_2, "Allocating in block %+F\n",
957 /* clear assignments */
958 block_info = get_block_info(block);
959 assignments = block_info->assignments;
961 for (r = 0; r < n_regs; ++r) {
962 assignment_t *assignment = &assignments[r];
963 ir_node *value = assignment->value;
964 allocation_info_t *info;
969 info = get_allocation_info(value);
970 info->current_assignment = assignment;
973 ir_nodeset_init(&live_nodes);
975 /* gather regalloc infos of predecessor blocks */
976 n_preds = get_Block_n_cfgpreds(block);
977 pred_block_infos = ALLOCAN(block_info_t*, n_preds);
978 for (i = 0; i < n_preds; ++i) {
979 ir_node *pred = get_Block_cfgpred_block(block, i);
980 pred_block_infos[i] = get_block_info(pred);
983 /* collect live-in nodes and preassigned values */
984 be_lv_foreach(lv, block, be_lv_state_in, i) {
985 const arch_register_t *reg;
987 node = be_lv_get_irn(lv, block, i);
988 if (!arch_irn_consider_in_reg_alloc(cls, node))
991 /* remember that this node is live at the beginning of the block */
992 ir_nodeset_insert(&live_nodes, node);
994 /* if the node already has a register assigned use it */
995 reg = arch_get_irn_register(node);
997 /* TODO: consult pred-block infos here. The value could be copied
998 away in some/all predecessor blocks. We need to construct
999 phi-nodes in this case.
1000 We even need to construct some Phi_0 like constructs in cases
1001 where the predecessor allocation is not determined yet. */
1006 /* handle phis... */
1007 node = sched_first(block);
1008 for ( ; is_Phi(node); node = sched_next(node)) {
1009 const arch_register_t *reg;
1011 if (!arch_irn_consider_in_reg_alloc(cls, node))
1014 /* fill in regs already assigned */
1015 reg = arch_get_irn_register(node);
1019 /* TODO: give boni for registers already assigned at the
1021 assign_reg(block, node);
1026 /* assign regs for live-in values */
1027 foreach_ir_nodeset(&live_nodes, node, iter) {
1028 const arch_register_t *reg;
1029 reg = arch_get_irn_register(node);
1033 assign_reg(block, node);
1036 /* permutate values at end of predecessor blocks in case of phi-nodes */
1039 for (p = 0; p < n_preds; ++p) {
1040 add_phi_permutations(block, p);
1044 /* assign instructions in the block */
1045 for (node = start; !sched_is_end(node); node = sched_next(node)) {
1046 int arity = get_irn_arity(node);
1049 /* enforce use constraints */
1050 enforce_constraints(&live_nodes, node);
1052 /* exchange values to copied values where needed */
1053 for (i = 0; i < arity; ++i) {
1054 ir_node *op = get_irn_n(node, i);
1055 assignment_t *assignment;
1057 if (!arch_irn_consider_in_reg_alloc(cls, op))
1059 assignment = get_current_assignment(op);
1060 assert(assignment != NULL);
1061 if (op != assignment->value) {
1062 set_irn_n(node, i, assignment->value);
1066 free_last_uses(&live_nodes, node);
1068 /* assign output registers */
1069 /* TODO: 2 phases: first: pre-assigned ones, 2nd real regs */
1070 if (get_irn_mode(node) == mode_T) {
1071 const ir_edge_t *edge;
1072 foreach_out_edge(node, edge) {
1073 ir_node *proj = get_edge_src_irn(edge);
1074 if (!arch_irn_consider_in_reg_alloc(cls, proj))
1076 assign_reg(block, proj);
1078 } else if (arch_irn_consider_in_reg_alloc(cls, node)) {
1079 assign_reg(block, node);
1083 ir_nodeset_destroy(&live_nodes);
1086 block_info->processed = 1;
1088 /* if we have exactly 1 successor then we might be able to produce phi
1090 if (get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) == 1) {
1091 const ir_edge_t *edge
1092 = get_irn_out_edge_first_kind(block, EDGE_KIND_BLOCK);
1093 ir_node *succ = get_edge_src_irn(edge);
1094 int p = get_edge_src_pos(edge);
1095 block_info_t *succ_info = get_block_info(succ);
1097 if (succ_info->processed) {
1098 add_phi_permutations(succ, p);
1104 * Run the register allocator for the current register class.
1106 static void be_straight_alloc_cls(void)
1108 n_regs = arch_register_class_n_regs(cls);
1109 lv = be_assure_liveness(birg);
1110 be_liveness_assure_sets(lv);
1111 be_liveness_assure_chk(lv);
1115 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
1116 inc_irg_visited(irg);
1118 DB((dbg, LEVEL_2, "=== Allocating registers of %s ===\n", cls->name));
1120 irg_block_walk_graph(irg, NULL, analyze_block, NULL);
1121 irg_block_walk_graph(irg, NULL, allocate_coalesce_block, NULL);
1123 ir_free_resources(irg, IR_RESOURCE_IRN_LINK | IR_RESOURCE_IRN_VISITED);
1127 * Run the spiller on the current graph.
1129 static void spill(void)
1131 /* make sure all nodes show their real register pressure */
1132 BE_TIMER_PUSH(t_ra_constr);
1133 be_pre_spill_prepare_constr(birg, cls);
1134 BE_TIMER_POP(t_ra_constr);
1137 BE_TIMER_PUSH(t_ra_spill);
1138 be_do_spill(birg, cls);
1139 BE_TIMER_POP(t_ra_spill);
1141 BE_TIMER_PUSH(t_ra_spill_apply);
1142 check_for_memory_operands(irg);
1143 BE_TIMER_POP(t_ra_spill_apply);
1147 * The straight register allocator for a whole procedure.
1149 static void be_straight_alloc(be_irg_t *new_birg)
1151 const arch_env_t *arch_env = new_birg->main_env->arch_env;
1152 int n_cls = arch_env_get_n_reg_class(arch_env);
1155 obstack_init(&obst);
1158 irg = be_get_birg_irg(birg);
1159 execfreqs = birg->exec_freq;
1161 /* TODO: extract some of the stuff from bechordal allocator, like
1162 * statistics, time measurements, etc. and use them here too */
1164 for (c = 0; c < n_cls; ++c) {
1165 cls = arch_env_get_reg_class(arch_env, c);
1166 if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
1169 stat_ev_ctx_push_str("bestraight_cls", cls->name);
1171 n_regs = cls->n_regs;
1172 ignore_regs = bitset_malloc(n_regs);
1173 be_put_ignore_regs(birg, cls, ignore_regs);
1177 /* verify schedule and register pressure */
1178 BE_TIMER_PUSH(t_verify);
1179 if (birg->main_env->options->vrfy_option == BE_CH_VRFY_WARN) {
1180 be_verify_schedule(birg);
1181 be_verify_register_pressure(birg, cls, irg);
1182 } else if (birg->main_env->options->vrfy_option == BE_CH_VRFY_ASSERT) {
1183 assert(be_verify_schedule(birg) && "Schedule verification failed");
1184 assert(be_verify_register_pressure(birg, cls, irg)
1185 && "Register pressure verification failed");
1187 BE_TIMER_POP(t_verify);
1189 BE_TIMER_PUSH(t_ra_color);
1190 be_straight_alloc_cls();
1191 BE_TIMER_POP(t_ra_color);
1193 bitset_free(ignore_regs);
1195 /* TODO: dump intermediate results */
1197 stat_ev_ctx_pop("bestraight_cls");
1200 obstack_free(&obst, NULL);
1204 * Initializes this module.
1206 void be_init_straight_alloc(void)
1208 static be_ra_t be_ra_straight = {
1212 FIRM_DBG_REGISTER(dbg, "firm.be.straightalloc");
1214 be_register_allocator("straight", &be_ra_straight);
1217 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_straight_alloc);