2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Preference Guided Register Assignment
23 * @author Matthias Braun
27 * The idea is to allocate registers in 2 passes:
28 * 1. A first pass to determine "preferred" registers for live-ranges. This
29 * calculates for each register and each live-range a value indicating
30 * the usefulness. (You can roughly think of the value as the negative
31 * costs needed for copies when the value is in the specific registers...)
33 * 2. Walk blocks and assigns registers in a greedy fashion. Preferring
34 * registers with high preferences. When register constraints are not met,
35 * add copies and split live-ranges.
38 * - make use of free registers in the permute_values code
50 #include "iredges_t.h"
51 #include "irgraph_t.h"
57 #include "raw_bitset.h"
58 #include "unionfind.h"
60 #include "hungarian.h"
63 #include "bechordal_t.h"
72 #include "bespillutil.h"
77 #define USE_FACTOR 1.0f
78 #define DEF_FACTOR 1.0f
79 #define NEIGHBOR_FACTOR 0.2f
80 #define AFF_SHOULD_BE_SAME 0.5f
82 #define SPLIT_DELTA 1.0f
83 #define MAX_OPTIMISTIC_SPLIT_RECURSION 0
85 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
87 static struct obstack obst;
89 static const arch_register_class_t *cls;
91 static const ir_exec_freq *execfreqs;
92 static unsigned n_regs;
93 static unsigned *normal_regs;
94 static int *congruence_classes;
95 static ir_node **block_order;
96 static int n_block_order;
97 static int create_preferences = true;
98 static int create_congruence_classes = true;
99 static int propagate_phi_registers = true;
101 static const lc_opt_table_entry_t options[] = {
102 LC_OPT_ENT_BOOL("prefs", "use preference based coloring", &create_preferences),
103 LC_OPT_ENT_BOOL("congruences", "create congruence classes", &create_congruence_classes),
104 LC_OPT_ENT_BOOL("prop_phi", "propagate phi registers", &propagate_phi_registers),
108 /** currently active assignments (while processing a basic block)
109 * maps registers to values(their current copies) */
110 static ir_node **assignments;
113 * allocation information: last_uses, register preferences
114 * the information is per firm-node.
116 struct allocation_info_t {
117 unsigned last_uses[2]; /**< bitset indicating last uses (input pos) */
118 ir_node *current_value; /**< copy of the value that should be used */
119 ir_node *original_value; /**< for copies point to original value */
120 float prefs[0]; /**< register preferences */
122 typedef struct allocation_info_t allocation_info_t;
124 /** helper datastructure used when sorting register preferences */
129 typedef struct reg_pref_t reg_pref_t;
131 /** per basic-block information */
132 struct block_info_t {
133 bool processed; /**< indicate whether block is processed */
134 ir_node *assignments[0]; /**< register assignments at end of block */
136 typedef struct block_info_t block_info_t;
139 * Get the allocation info for a node.
140 * The info is allocated on the first visit of a node.
142 static allocation_info_t *get_allocation_info(ir_node *node)
144 allocation_info_t *info = get_irn_link(node);
146 info = OALLOCFZ(&obst, allocation_info_t, prefs, n_regs);
147 info->current_value = node;
148 info->original_value = node;
149 set_irn_link(node, info);
155 static allocation_info_t *try_get_allocation_info(const ir_node *node)
157 return (allocation_info_t*) get_irn_link(node);
161 * Get allocation information for a basic block
163 static block_info_t *get_block_info(ir_node *block)
165 block_info_t *info = get_irn_link(block);
167 assert(is_Block(block));
169 info = OALLOCFZ(&obst, block_info_t, assignments, n_regs);
170 set_irn_link(block, info);
177 * Link the allocation info of a node to a copy.
178 * Afterwards, both nodes uses the same allocation info.
179 * Copy must not have an allocation info assigned yet.
181 * @param copy the node that gets the allocation info assigned
182 * @param value the original node
184 static void mark_as_copy_of(ir_node *copy, ir_node *value)
187 allocation_info_t *info = get_allocation_info(value);
188 allocation_info_t *copy_info = get_allocation_info(copy);
190 /* find original value */
191 original = info->original_value;
192 if (original != value) {
193 info = get_allocation_info(original);
196 assert(info->original_value == original);
197 info->current_value = copy;
199 /* the copy should not be linked to something else yet */
200 assert(copy_info->original_value == copy);
201 copy_info->original_value = original;
203 /* copy over allocation preferences */
204 memcpy(copy_info->prefs, info->prefs, n_regs * sizeof(copy_info->prefs[0]));
208 * Calculate the penalties for every register on a node and its live neighbors.
210 * @param live_nodes the set of live nodes at the current position, may be NULL
211 * @param penalty the penalty to subtract from
212 * @param limited a raw bitset containing the limited set for the node
213 * @param node the node
215 static void give_penalties_for_limits(const ir_nodeset_t *live_nodes,
216 float penalty, const unsigned* limited,
219 ir_nodeset_iterator_t iter;
222 allocation_info_t *info = get_allocation_info(node);
225 /* give penalty for all forbidden regs */
226 for (r = 0; r < n_regs; ++r) {
227 if (rbitset_is_set(limited, r))
230 info->prefs[r] -= penalty;
233 /* all other live values should get a penalty for allowed regs */
234 if (live_nodes == NULL)
237 penalty *= NEIGHBOR_FACTOR;
238 n_allowed = rbitset_popcount(limited, n_regs);
240 /* only create a very weak penalty if multiple regs are allowed */
241 penalty = (penalty * 0.8f) / n_allowed;
243 foreach_ir_nodeset(live_nodes, neighbor, iter) {
244 allocation_info_t *neighbor_info;
246 /* TODO: if op is used on multiple inputs we might not do a
248 if (neighbor == node)
251 neighbor_info = get_allocation_info(neighbor);
252 for (r = 0; r < n_regs; ++r) {
253 if (!rbitset_is_set(limited, r))
256 neighbor_info->prefs[r] -= penalty;
262 * Calculate the preferences of a definition for the current register class.
263 * If the definition uses a limited set of registers, reduce the preferences
264 * for the limited register on the node and its neighbors.
266 * @param live_nodes the set of live nodes at the current node
267 * @param weight the weight
268 * @param node the current node
270 static void check_defs(const ir_nodeset_t *live_nodes, float weight,
273 const arch_register_req_t *req = arch_get_register_req_out(node);
274 if (req->type & arch_register_req_type_limited) {
275 const unsigned *limited = req->limited;
276 float penalty = weight * DEF_FACTOR;
277 give_penalties_for_limits(live_nodes, penalty, limited, node);
280 if (req->type & arch_register_req_type_should_be_same) {
281 ir_node *insn = skip_Proj(node);
282 allocation_info_t *info = get_allocation_info(node);
283 int arity = get_irn_arity(insn);
286 float factor = 1.0f / rbitset_popcount(&req->other_same, arity);
287 for (i = 0; i < arity; ++i) {
290 allocation_info_t *op_info;
292 if (!rbitset_is_set(&req->other_same, i))
295 op = get_irn_n(insn, i);
297 /* if we the value at the should_be_same input doesn't die at the
298 * node, then it is no use to propagate the constraints (since a
299 * copy will emerge anyway) */
300 if (ir_nodeset_contains(live_nodes, op))
303 op_info = get_allocation_info(op);
304 for (r = 0; r < n_regs; ++r) {
305 op_info->prefs[r] += info->prefs[r] * factor;
312 * Walker: Runs an a block calculates the preferences for any
313 * node and every register from the considered register class.
315 static void analyze_block(ir_node *block, void *data)
317 float weight = (float)get_block_execfreq(execfreqs, block);
318 ir_nodeset_t live_nodes;
322 ir_nodeset_init(&live_nodes);
323 be_liveness_end_of_block(lv, cls, block, &live_nodes);
325 sched_foreach_reverse(block, node) {
326 allocation_info_t *info;
333 if (create_preferences) {
335 be_foreach_definition(node, cls, value,
336 check_defs(&live_nodes, weight, value);
341 arity = get_irn_arity(node);
343 /* the allocation info node currently only uses 1 unsigned value
344 to mark last used inputs. So we will fail for a node with more than
346 if (arity >= (int) sizeof(info->last_uses) * 8) {
347 panic("Node with more than %d inputs not supported yet",
348 (int) sizeof(info->last_uses) * 8);
351 info = get_allocation_info(node);
352 for (i = 0; i < arity; ++i) {
353 ir_node *op = get_irn_n(node, i);
354 const arch_register_req_t *req = arch_get_register_req_out(op);
358 /* last usage of a value? */
359 if (!ir_nodeset_contains(&live_nodes, op)) {
360 rbitset_set(info->last_uses, i);
364 be_liveness_transfer(cls, node, &live_nodes);
366 if (create_preferences) {
367 /* update weights based on usage constraints */
368 for (i = 0; i < arity; ++i) {
369 const arch_register_req_t *req;
370 const unsigned *limited;
371 ir_node *op = get_irn_n(node, i);
373 if (!arch_irn_consider_in_reg_alloc(cls, op))
376 req = arch_get_register_req(node, i);
377 if (!(req->type & arch_register_req_type_limited))
380 limited = req->limited;
381 give_penalties_for_limits(&live_nodes, weight * USE_FACTOR,
387 ir_nodeset_destroy(&live_nodes);
390 static void congruence_def(ir_nodeset_t *live_nodes, const ir_node *node)
392 const arch_register_req_t *req = arch_get_register_req_out(node);
394 /* should be same constraint? */
395 if (req->type & arch_register_req_type_should_be_same) {
396 const ir_node *insn = skip_Proj_const(node);
397 int arity = get_irn_arity(insn);
399 unsigned node_idx = get_irn_idx(node);
400 node_idx = uf_find(congruence_classes, node_idx);
402 for (i = 0; i < arity; ++i) {
406 ir_nodeset_iterator_t iter;
407 bool interferes = false;
409 if (!rbitset_is_set(&req->other_same, i))
412 op = get_irn_n(insn, i);
413 op_idx = get_irn_idx(op);
414 op_idx = uf_find(congruence_classes, op_idx);
416 /* do we interfere with the value */
417 foreach_ir_nodeset(live_nodes, live, iter) {
418 int lv_idx = get_irn_idx(live);
419 lv_idx = uf_find(congruence_classes, lv_idx);
420 if (lv_idx == op_idx) {
425 /* don't put in same affinity class if we interfere */
429 node_idx = uf_union(congruence_classes, node_idx, op_idx);
430 DB((dbg, LEVEL_3, "Merge %+F and %+F congruence classes\n",
432 /* one should_be_same is enough... */
438 static void create_congruence_class(ir_node *block, void *data)
440 ir_nodeset_t live_nodes;
444 ir_nodeset_init(&live_nodes);
445 be_liveness_end_of_block(lv, cls, block, &live_nodes);
447 /* check should be same constraints */
448 sched_foreach_reverse(block, node) {
453 be_foreach_definition(node, cls, value,
454 congruence_def(&live_nodes, value);
456 be_liveness_transfer(cls, node, &live_nodes);
459 /* check phi congruence classes */
460 sched_foreach_reverse_from(node, node) {
464 assert(is_Phi(node));
466 if (!arch_irn_consider_in_reg_alloc(cls, node))
469 node_idx = get_irn_idx(node);
470 node_idx = uf_find(congruence_classes, node_idx);
472 arity = get_irn_arity(node);
473 for (i = 0; i < arity; ++i) {
474 bool interferes = false;
475 ir_nodeset_iterator_t iter;
480 allocation_info_t *head_info;
481 allocation_info_t *other_info;
482 ir_node *op = get_Phi_pred(node, i);
483 int op_idx = get_irn_idx(op);
484 op_idx = uf_find(congruence_classes, op_idx);
486 /* do we interfere with the value */
487 foreach_ir_nodeset(&live_nodes, live, iter) {
488 int lv_idx = get_irn_idx(live);
489 lv_idx = uf_find(congruence_classes, lv_idx);
490 if (lv_idx == op_idx) {
495 /* don't put in same affinity class if we interfere */
498 /* any other phi has the same input? */
499 sched_foreach(block, phi) {
504 if (!arch_irn_consider_in_reg_alloc(cls, phi))
506 oop = get_Phi_pred(phi, i);
509 oop_idx = get_irn_idx(oop);
510 oop_idx = uf_find(congruence_classes, oop_idx);
511 if (oop_idx == op_idx) {
519 /* merge the 2 congruence classes and sum up their preferences */
520 old_node_idx = node_idx;
521 node_idx = uf_union(congruence_classes, node_idx, op_idx);
522 DB((dbg, LEVEL_3, "Merge %+F and %+F congruence classes\n",
525 old_node_idx = node_idx == old_node_idx ? op_idx : old_node_idx;
526 head_info = get_allocation_info(get_idx_irn(irg, node_idx));
527 other_info = get_allocation_info(get_idx_irn(irg, old_node_idx));
528 for (r = 0; r < n_regs; ++r) {
529 head_info->prefs[r] += other_info->prefs[r];
535 static void set_congruence_prefs(ir_node *node, void *data)
537 allocation_info_t *info;
538 allocation_info_t *head_info;
539 unsigned node_idx = get_irn_idx(node);
540 unsigned node_set = uf_find(congruence_classes, node_idx);
544 /* head of congruence class or not in any class */
545 if (node_set == node_idx)
548 if (!arch_irn_consider_in_reg_alloc(cls, node))
551 head_info = get_allocation_info(get_idx_irn(irg, node_set));
552 info = get_allocation_info(node);
554 memcpy(info->prefs, head_info->prefs, n_regs * sizeof(info->prefs[0]));
557 static void combine_congruence_classes(void)
559 size_t n = get_irg_last_idx(irg);
560 congruence_classes = XMALLOCN(int, n);
561 uf_init(congruence_classes, n);
563 /* create congruence classes */
564 irg_block_walk_graph(irg, create_congruence_class, NULL, NULL);
565 /* merge preferences */
566 irg_walk_graph(irg, set_congruence_prefs, NULL, NULL);
567 free(congruence_classes);
573 * Assign register reg to the given node.
575 * @param node the node
576 * @param reg the register
578 static void use_reg(ir_node *node, const arch_register_t *reg)
580 unsigned r = arch_register_get_index(reg);
581 assignments[r] = node;
582 arch_set_irn_register(node, reg);
585 static void free_reg_of_value(ir_node *node)
587 const arch_register_t *reg;
590 if (!arch_irn_consider_in_reg_alloc(cls, node))
593 reg = arch_get_irn_register(node);
594 r = arch_register_get_index(reg);
595 /* assignment->value may be NULL if a value is used at 2 inputs
596 so it gets freed twice. */
597 assert(assignments[r] == node || assignments[r] == NULL);
598 assignments[r] = NULL;
602 * Compare two register preferences in decreasing order.
604 static int compare_reg_pref(const void *e1, const void *e2)
606 const reg_pref_t *rp1 = (const reg_pref_t*) e1;
607 const reg_pref_t *rp2 = (const reg_pref_t*) e2;
608 if (rp1->pref < rp2->pref)
610 if (rp1->pref > rp2->pref)
615 static void fill_sort_candidates(reg_pref_t *regprefs,
616 const allocation_info_t *info)
620 for (r = 0; r < n_regs; ++r) {
621 float pref = info->prefs[r];
623 regprefs[r].pref = pref;
625 /* TODO: use a stable sort here to avoid unnecessary register jumping */
626 qsort(regprefs, n_regs, sizeof(regprefs[0]), compare_reg_pref);
629 static bool try_optimistic_split(ir_node *to_split, ir_node *before,
630 float pref, float pref_delta,
631 unsigned *forbidden_regs, int recursion)
633 const arch_register_t *from_reg;
634 const arch_register_t *reg;
635 ir_node *original_insn;
641 allocation_info_t *info = get_allocation_info(to_split);
644 float split_threshold;
648 /* stupid hack: don't optimisticallt split don't spill nodes...
649 * (so we don't split away the values produced because of
650 * must_be_different constraints) */
651 original_insn = skip_Proj(info->original_value);
652 if (arch_irn_get_flags(original_insn) & arch_irn_flags_dont_spill)
655 from_reg = arch_get_irn_register(to_split);
656 from_r = arch_register_get_index(from_reg);
657 block = get_nodes_block(before);
658 split_threshold = (float)get_block_execfreq(execfreqs, block) * SPLIT_DELTA;
660 if (pref_delta < split_threshold*0.5)
663 /* find the best free position where we could move to */
664 prefs = ALLOCAN(reg_pref_t, n_regs);
665 fill_sort_candidates(prefs, info);
666 for (i = 0; i < n_regs; ++i) {
670 bool old_source_state;
672 /* we need a normal register which is not an output register
673 an different from the current register of to_split */
675 if (!rbitset_is_set(normal_regs, r))
677 if (rbitset_is_set(forbidden_regs, r))
682 /* is the split worth it? */
683 delta = pref_delta + prefs[i].pref;
684 if (delta < split_threshold) {
685 DB((dbg, LEVEL_3, "Not doing optimistical split of %+F (depth %d), win %f too low\n",
686 to_split, recursion, delta));
690 /* if the register is free then we can do the split */
691 if (assignments[r] == NULL)
694 /* otherwise we might try recursively calling optimistic_split */
695 if (recursion+1 > MAX_OPTIMISTIC_SPLIT_RECURSION)
698 apref = prefs[i].pref;
699 apref_delta = i+1 < n_regs ? apref - prefs[i+1].pref : 0;
700 apref_delta += pref_delta - split_threshold;
702 /* our source register isn't a useful destination for recursive
704 old_source_state = rbitset_is_set(forbidden_regs, from_r);
705 rbitset_set(forbidden_regs, from_r);
706 /* try recursive split */
707 res = try_optimistic_split(assignments[r], before, apref,
708 apref_delta, forbidden_regs, recursion+1);
709 /* restore our destination */
710 if (old_source_state) {
711 rbitset_set(forbidden_regs, from_r);
713 rbitset_clear(forbidden_regs, from_r);
722 reg = arch_register_for_index(cls, r);
723 copy = be_new_Copy(cls, block, to_split);
724 mark_as_copy_of(copy, to_split);
725 /* hacky, but correct here */
726 if (assignments[arch_register_get_index(from_reg)] == to_split)
727 free_reg_of_value(to_split);
729 sched_add_before(before, copy);
732 "Optimistic live-range split %+F move %+F(%s) -> %s before %+F (win %f, depth %d)\n",
733 copy, to_split, from_reg->name, reg->name, before, delta, recursion));
738 * Determine and assign a register for node @p node
740 static void assign_reg(const ir_node *block, ir_node *node,
741 unsigned *forbidden_regs)
743 const arch_register_t *reg;
744 allocation_info_t *info;
745 const arch_register_req_t *req;
746 reg_pref_t *reg_prefs;
749 const unsigned *allowed_regs;
752 assert(!is_Phi(node));
753 /* preassigned register? */
754 reg = arch_get_irn_register(node);
756 DB((dbg, LEVEL_2, "Preassignment %+F -> %s\n", node, reg->name));
761 req = arch_get_register_req_out(node);
762 /* ignore reqs must be preassigned */
763 assert (! (req->type & arch_register_req_type_ignore));
765 /* give should_be_same boni */
766 info = get_allocation_info(node);
767 in_node = skip_Proj(node);
768 if (req->type & arch_register_req_type_should_be_same) {
769 float weight = (float)get_block_execfreq(execfreqs, block);
770 int arity = get_irn_arity(in_node);
773 assert(arity <= (int) sizeof(req->other_same) * 8);
774 for (i = 0; i < arity; ++i) {
776 const arch_register_t *reg;
778 if (!rbitset_is_set(&req->other_same, i))
781 in = get_irn_n(in_node, i);
782 reg = arch_get_irn_register(in);
784 r = arch_register_get_index(reg);
786 /* if the value didn't die here then we should not propagate the
787 * should_be_same info */
788 if (assignments[r] == in)
791 info->prefs[r] += weight * AFF_SHOULD_BE_SAME;
795 /* create list of register candidates and sort by their preference */
796 DB((dbg, LEVEL_2, "Candidates for %+F:", node));
797 reg_prefs = alloca(n_regs * sizeof(reg_prefs[0]));
798 fill_sort_candidates(reg_prefs, info);
799 for (i = 0; i < n_regs; ++i) {
800 unsigned num = reg_prefs[i].num;
801 const arch_register_t *reg;
803 if (!rbitset_is_set(normal_regs, num))
806 reg = arch_register_for_index(cls, num);
807 DB((dbg, LEVEL_2, " %s(%f)", reg->name, reg_prefs[i].pref));
809 DB((dbg, LEVEL_2, "\n"));
811 allowed_regs = normal_regs;
812 if (req->type & arch_register_req_type_limited) {
813 allowed_regs = req->limited;
816 for (i = 0; i < n_regs; ++i) {
821 r = reg_prefs[i].num;
822 if (!rbitset_is_set(allowed_regs, r))
824 if (assignments[r] == NULL)
826 pref = reg_prefs[i].pref;
827 delta = i+1 < n_regs ? pref - reg_prefs[i+1].pref : 0;
828 before = skip_Proj(node);
829 res = try_optimistic_split(assignments[r], before,
830 pref, delta, forbidden_regs, 0);
835 /* the common reason to hit this panic is when 1 of your nodes is not
836 * register pressure faithful */
837 panic("No register left for %+F\n", node);
840 reg = arch_register_for_index(cls, r);
841 DB((dbg, LEVEL_2, "Assign %+F -> %s\n", node, reg->name));
846 * Add an permutation in front of a node and change the assignments
847 * due to this permutation.
849 * To understand this imagine a permutation like this:
859 * First we count how many destinations a single value has. At the same time
860 * we can be sure that each destination register has at most 1 source register
861 * (it can have 0 which means we don't care what value is in it).
862 * We ignore all fullfilled permuations (like 7->7)
863 * In a first pass we create as much copy instructions as possible as they
864 * are generally cheaper than exchanges. We do this by counting into how many
865 * destinations a register has to be copied (in the example it's 2 for register
866 * 3, or 1 for the registers 1,2,4 and 7).
867 * We can then create a copy into every destination register when the usecount
868 * of that register is 0 (= noone else needs the value in the register).
870 * After this step we should have cycles left. We implement a cyclic permutation
871 * of n registers with n-1 transpositions.
873 * @param live_nodes the set of live nodes, updated due to live range split
874 * @param before the node before we add the permutation
875 * @param permutation the permutation array indices are the destination
876 * registers, the values in the array are the source
879 static void permute_values(ir_nodeset_t *live_nodes, ir_node *before,
880 unsigned *permutation)
882 unsigned *n_used = ALLOCANZ(unsigned, n_regs);
886 /* determine how often each source register needs to be read */
887 for (r = 0; r < n_regs; ++r) {
888 unsigned old_reg = permutation[r];
891 value = assignments[old_reg];
893 /* nothing to do here, reg is not live. Mark it as fixpoint
894 * so we ignore it in the next steps */
902 block = get_nodes_block(before);
904 /* step1: create copies where immediately possible */
905 for (r = 0; r < n_regs; /* empty */) {
908 const arch_register_t *reg;
909 unsigned old_r = permutation[r];
911 /* - no need to do anything for fixed points.
912 - we can't copy if the value in the dest reg is still needed */
913 if (old_r == r || n_used[r] > 0) {
919 src = assignments[old_r];
920 copy = be_new_Copy(cls, block, src);
921 sched_add_before(before, copy);
922 reg = arch_register_for_index(cls, r);
923 DB((dbg, LEVEL_2, "Copy %+F (from %+F, before %+F) -> %s\n",
924 copy, src, before, reg->name));
925 mark_as_copy_of(copy, src);
928 if (live_nodes != NULL) {
929 ir_nodeset_insert(live_nodes, copy);
932 /* old register has 1 user less, permutation is resolved */
933 assert(arch_register_get_index(arch_get_irn_register(src)) == old_r);
936 assert(n_used[old_r] > 0);
938 if (n_used[old_r] == 0) {
939 if (live_nodes != NULL) {
940 ir_nodeset_remove(live_nodes, src);
942 free_reg_of_value(src);
945 /* advance or jump back (if this copy enabled another copy) */
946 if (old_r < r && n_used[old_r] == 0) {
953 /* at this point we only have "cycles" left which we have to resolve with
955 * TODO: if we have free registers left, then we should really use copy
956 * instructions for any cycle longer than 2 registers...
957 * (this is probably architecture dependent, there might be archs where
958 * copies are preferable even for 2-cycles) */
960 /* create perms with the rest */
961 for (r = 0; r < n_regs; /* empty */) {
962 const arch_register_t *reg;
963 unsigned old_r = permutation[r];
975 /* we shouldn't have copies from 1 value to multiple destinations left*/
976 assert(n_used[old_r] == 1);
978 /* exchange old_r and r2; after that old_r is a fixed point */
979 r2 = permutation[old_r];
981 in[0] = assignments[r2];
982 in[1] = assignments[old_r];
983 perm = be_new_Perm(cls, block, 2, in);
984 sched_add_before(before, perm);
985 DB((dbg, LEVEL_2, "Perm %+F (perm %+F,%+F, before %+F)\n",
986 perm, in[0], in[1], before));
988 proj0 = new_r_Proj(perm, get_irn_mode(in[0]), 0);
989 mark_as_copy_of(proj0, in[0]);
990 reg = arch_register_for_index(cls, old_r);
993 proj1 = new_r_Proj(perm, get_irn_mode(in[1]), 1);
994 mark_as_copy_of(proj1, in[1]);
995 reg = arch_register_for_index(cls, r2);
998 /* 1 value is now in the correct register */
999 permutation[old_r] = old_r;
1000 /* the source of r changed to r2 */
1001 permutation[r] = r2;
1003 /* if we have reached a fixpoint update data structures */
1004 if (live_nodes != NULL) {
1005 ir_nodeset_remove(live_nodes, in[0]);
1006 ir_nodeset_remove(live_nodes, in[1]);
1007 ir_nodeset_remove(live_nodes, proj0);
1008 ir_nodeset_insert(live_nodes, proj1);
1012 #ifdef DEBUG_libfirm
1013 /* now we should only have fixpoints left */
1014 for (r = 0; r < n_regs; ++r) {
1015 assert(permutation[r] == r);
1021 * Free regs for values last used.
1023 * @param live_nodes set of live nodes, will be updated
1024 * @param node the node to consider
1026 static void free_last_uses(ir_nodeset_t *live_nodes, ir_node *node)
1028 allocation_info_t *info = get_allocation_info(node);
1029 const unsigned *last_uses = info->last_uses;
1030 int arity = get_irn_arity(node);
1033 for (i = 0; i < arity; ++i) {
1036 /* check if one operand is the last use */
1037 if (!rbitset_is_set(last_uses, i))
1040 op = get_irn_n(node, i);
1041 free_reg_of_value(op);
1042 ir_nodeset_remove(live_nodes, op);
1047 * change inputs of a node to the current value (copies/perms)
1049 static void rewire_inputs(ir_node *node)
1052 int arity = get_irn_arity(node);
1054 for (i = 0; i < arity; ++i) {
1055 ir_node *op = get_irn_n(node, i);
1056 allocation_info_t *info = try_get_allocation_info(op);
1061 info = get_allocation_info(info->original_value);
1062 if (info->current_value != op) {
1063 set_irn_n(node, i, info->current_value);
1069 * Create a bitset of registers occupied with value living through an
1072 static void determine_live_through_regs(unsigned *bitset, ir_node *node)
1074 const allocation_info_t *info = get_allocation_info(node);
1079 /* mark all used registers as potentially live-through */
1080 for (r = 0; r < n_regs; ++r) {
1081 if (assignments[r] == NULL)
1083 if (!rbitset_is_set(normal_regs, r))
1086 rbitset_set(bitset, r);
1089 /* remove registers of value dying at the instruction */
1090 arity = get_irn_arity(node);
1091 for (i = 0; i < arity; ++i) {
1093 const arch_register_t *reg;
1095 if (!rbitset_is_set(info->last_uses, i))
1098 op = get_irn_n(node, i);
1099 reg = arch_get_irn_register(op);
1100 rbitset_clear(bitset, arch_register_get_index(reg));
1105 * Enforce constraints at a node by live range splits.
1107 * @param live_nodes the set of live nodes, might be changed
1108 * @param node the current node
1110 static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node,
1111 unsigned *forbidden_regs)
1113 int arity = get_irn_arity(node);
1115 hungarian_problem_t *bp;
1117 unsigned *assignment;
1120 /* construct a list of register occupied by live-through values */
1121 unsigned *live_through_regs = NULL;
1123 /* see if any use constraints are not met */
1125 for (i = 0; i < arity; ++i) {
1126 ir_node *op = get_irn_n(node, i);
1127 const arch_register_t *reg;
1128 const arch_register_req_t *req;
1129 const unsigned *limited;
1132 if (!arch_irn_consider_in_reg_alloc(cls, op))
1135 /* are there any limitations for the i'th operand? */
1136 req = arch_get_register_req(node, i);
1137 if (!(req->type & arch_register_req_type_limited))
1140 limited = req->limited;
1141 reg = arch_get_irn_register(op);
1142 r = arch_register_get_index(reg);
1143 if (!rbitset_is_set(limited, r)) {
1144 /* found an assignment outside the limited set */
1150 /* is any of the live-throughs using a constrained output register? */
1151 be_foreach_definition(node, cls, value,
1152 if (! (req_->type & arch_register_req_type_limited))
1154 if (live_through_regs == NULL) {
1155 rbitset_alloca(live_through_regs, n_regs);
1156 determine_live_through_regs(live_through_regs, node);
1158 rbitset_or(forbidden_regs, req_->limited, n_regs);
1159 if (rbitsets_have_common(req_->limited, live_through_regs, n_regs))
1166 /* create these arrays if we haven't yet */
1167 if (live_through_regs == NULL) {
1168 rbitset_alloca(live_through_regs, n_regs);
1171 /* at this point we have to construct a bipartite matching problem to see
1172 * which values should go to which registers
1173 * Note: We're building the matrix in "reverse" - source registers are
1174 * right, destinations left because this will produce the solution
1175 * in the format required for permute_values.
1177 bp = hungarian_new(n_regs, n_regs, HUNGARIAN_MATCH_PERFECT);
1179 /* add all combinations, then remove not allowed ones */
1180 for (l = 0; l < n_regs; ++l) {
1181 if (!rbitset_is_set(normal_regs, l)) {
1182 hungarian_add(bp, l, l, 1);
1186 for (r = 0; r < n_regs; ++r) {
1187 if (!rbitset_is_set(normal_regs, r))
1189 /* livethrough values may not use constrainted output registers */
1190 if (rbitset_is_set(live_through_regs, l)
1191 && rbitset_is_set(forbidden_regs, r))
1194 hungarian_add(bp, r, l, l == r ? 9 : 8);
1198 for (i = 0; i < arity; ++i) {
1199 ir_node *op = get_irn_n(node, i);
1200 const arch_register_t *reg;
1201 const arch_register_req_t *req;
1202 const unsigned *limited;
1203 unsigned current_reg;
1205 if (!arch_irn_consider_in_reg_alloc(cls, op))
1208 req = arch_get_register_req(node, i);
1209 if (!(req->type & arch_register_req_type_limited))
1212 limited = req->limited;
1213 reg = arch_get_irn_register(op);
1214 current_reg = arch_register_get_index(reg);
1215 for (r = 0; r < n_regs; ++r) {
1216 if (rbitset_is_set(limited, r))
1218 hungarian_remove(bp, r, current_reg);
1222 //hungarian_print_cost_matrix(bp, 1);
1223 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
1225 assignment = ALLOCAN(unsigned, n_regs);
1226 res = hungarian_solve(bp, assignment, NULL, 0);
1230 fprintf(stderr, "Swap result:");
1231 for (i = 0; i < (int) n_regs; ++i) {
1232 fprintf(stderr, " %d", assignment[i]);
1234 fprintf(stderr, "\n");
1239 permute_values(live_nodes, node, assignment);
1242 /** test wether a node @p n is a copy of the value of node @p of */
1243 static bool is_copy_of(ir_node *value, ir_node *test_value)
1245 allocation_info_t *test_info;
1246 allocation_info_t *info;
1248 if (value == test_value)
1251 info = get_allocation_info(value);
1252 test_info = get_allocation_info(test_value);
1253 return test_info->original_value == info->original_value;
1257 * find a value in the end-assignment of a basic block
1258 * @returns the index into the assignment array if found
1261 static int find_value_in_block_info(block_info_t *info, ir_node *value)
1264 ir_node **assignments = info->assignments;
1265 for (r = 0; r < n_regs; ++r) {
1266 ir_node *a_value = assignments[r];
1268 if (a_value == NULL)
1270 if (is_copy_of(a_value, value))
1278 * Create the necessary permutations at the end of a basic block to fullfill
1279 * the register assignment for phi-nodes in the next block
1281 static void add_phi_permutations(ir_node *block, int p)
1284 unsigned *permutation;
1285 ir_node **old_assignments;
1286 bool need_permutation;
1288 ir_node *pred = get_Block_cfgpred_block(block, p);
1290 block_info_t *pred_info = get_block_info(pred);
1292 /* predecessor not processed yet? nothing to do */
1293 if (!pred_info->processed)
1296 permutation = ALLOCAN(unsigned, n_regs);
1297 for (r = 0; r < n_regs; ++r) {
1301 /* check phi nodes */
1302 need_permutation = false;
1303 phi = sched_first(block);
1304 for ( ; is_Phi(phi); phi = sched_next(phi)) {
1305 const arch_register_t *reg;
1306 const arch_register_t *op_reg;
1311 if (!arch_irn_consider_in_reg_alloc(cls, phi))
1314 op = get_Phi_pred(phi, p);
1315 a = find_value_in_block_info(pred_info, op);
1318 reg = arch_get_irn_register(phi);
1319 regn = arch_register_get_index(reg);
1320 /* same register? nothing to do */
1324 op = pred_info->assignments[a];
1325 op_reg = arch_get_irn_register(op);
1326 /* virtual or joker registers are ok too */
1327 if ((op_reg->type & arch_register_type_joker)
1328 || (op_reg->type & arch_register_type_virtual))
1331 permutation[regn] = a;
1332 need_permutation = true;
1335 if (need_permutation) {
1336 /* permute values at end of predecessor */
1337 old_assignments = assignments;
1338 assignments = pred_info->assignments;
1339 permute_values(NULL, be_get_end_of_block_insertion_point(pred),
1341 assignments = old_assignments;
1344 /* change phi nodes to use the copied values */
1345 phi = sched_first(block);
1346 for ( ; is_Phi(phi); phi = sched_next(phi)) {
1350 if (!arch_irn_consider_in_reg_alloc(cls, phi))
1353 op = get_Phi_pred(phi, p);
1355 /* we have permuted all values into the correct registers so we can
1356 simply query which value occupies the phis register in the
1358 a = arch_register_get_index(arch_get_irn_register(phi));
1359 op = pred_info->assignments[a];
1360 set_Phi_pred(phi, p, op);
1365 * Set preferences for a phis register based on the registers used on the
1368 static void adapt_phi_prefs(ir_node *phi)
1371 int arity = get_irn_arity(phi);
1372 ir_node *block = get_nodes_block(phi);
1373 allocation_info_t *info = get_allocation_info(phi);
1375 for (i = 0; i < arity; ++i) {
1376 ir_node *op = get_irn_n(phi, i);
1377 const arch_register_t *reg = arch_get_irn_register(op);
1378 ir_node *pred_block;
1379 block_info_t *pred_block_info;
1385 /* we only give the bonus if the predecessor already has registers
1386 * assigned, otherwise we only see a dummy value
1387 * and any conclusions about its register are useless */
1388 pred_block = get_Block_cfgpred_block(block, i);
1389 pred_block_info = get_block_info(pred_block);
1390 if (!pred_block_info->processed)
1393 /* give bonus for already assigned register */
1394 weight = (float)get_block_execfreq(execfreqs, pred_block);
1395 r = arch_register_get_index(reg);
1396 info->prefs[r] += weight * AFF_PHI;
1401 * After a phi has been assigned a register propagate preference inputs
1402 * to the phi inputs.
1404 static void propagate_phi_register(ir_node *phi, unsigned assigned_r)
1407 ir_node *block = get_nodes_block(phi);
1408 int arity = get_irn_arity(phi);
1410 for (i = 0; i < arity; ++i) {
1411 ir_node *op = get_Phi_pred(phi, i);
1412 allocation_info_t *info = get_allocation_info(op);
1413 ir_node *pred_block = get_Block_cfgpred_block(block, i);
1416 = (float)get_block_execfreq(execfreqs, pred_block) * AFF_PHI;
1418 if (info->prefs[assigned_r] >= weight)
1421 /* promote the prefered register */
1422 for (r = 0; r < n_regs; ++r) {
1423 if (info->prefs[r] > -weight) {
1424 info->prefs[r] = -weight;
1427 info->prefs[assigned_r] = weight;
1430 propagate_phi_register(op, assigned_r);
1434 static void assign_phi_registers(ir_node *block)
1439 unsigned *assignment;
1441 hungarian_problem_t *bp;
1443 /* count phi nodes */
1444 sched_foreach(block, node) {
1447 if (!arch_irn_consider_in_reg_alloc(cls, node))
1455 /* build a bipartite matching problem for all phi nodes */
1456 bp = hungarian_new(n_phis, n_regs, HUNGARIAN_MATCH_PERFECT);
1458 sched_foreach(block, node) {
1461 allocation_info_t *info;
1464 if (!arch_irn_consider_in_reg_alloc(cls, node))
1467 /* give boni for predecessor colorings */
1468 adapt_phi_prefs(node);
1469 /* add stuff to bipartite problem */
1470 info = get_allocation_info(node);
1471 DB((dbg, LEVEL_3, "Prefs for %+F: ", node));
1472 for (r = 0; r < n_regs; ++r) {
1475 if (!rbitset_is_set(normal_regs, r))
1478 costs = info->prefs[r];
1479 costs = costs < 0 ? -logf(-costs+1) : logf(costs+1);
1482 hungarian_add(bp, n, r, (int)costs);
1483 DB((dbg, LEVEL_3, " %s(%f)", arch_register_for_index(cls, r)->name,
1486 DB((dbg, LEVEL_3, "\n"));
1490 //hungarian_print_cost_matrix(bp, 7);
1491 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
1493 assignment = ALLOCAN(unsigned, n_regs);
1494 res = hungarian_solve(bp, assignment, NULL, 0);
1499 sched_foreach(block, node) {
1501 const arch_register_t *reg;
1505 if (!arch_irn_consider_in_reg_alloc(cls, node))
1508 r = assignment[n++];
1509 assert(rbitset_is_set(normal_regs, r));
1510 reg = arch_register_for_index(cls, r);
1511 DB((dbg, LEVEL_2, "Assign %+F -> %s\n", node, reg->name));
1514 /* adapt preferences for phi inputs */
1515 if (propagate_phi_registers)
1516 propagate_phi_register(node, r);
1521 * Walker: assign registers to all nodes of a block that
1522 * need registers from the currently considered register class.
1524 static void allocate_coalesce_block(ir_node *block, void *data)
1527 ir_nodeset_t live_nodes;
1530 block_info_t *block_info;
1531 block_info_t **pred_block_infos;
1533 unsigned *forbidden_regs; /**< collects registers which must
1534 not be used for optimistic splits */
1537 DB((dbg, LEVEL_2, "* Block %+F\n", block));
1539 /* clear assignments */
1540 block_info = get_block_info(block);
1541 assignments = block_info->assignments;
1543 ir_nodeset_init(&live_nodes);
1545 /* gather regalloc infos of predecessor blocks */
1546 n_preds = get_Block_n_cfgpreds(block);
1547 pred_block_infos = ALLOCAN(block_info_t*, n_preds);
1548 for (i = 0; i < n_preds; ++i) {
1549 ir_node *pred = get_Block_cfgpred_block(block, i);
1550 block_info_t *pred_info = get_block_info(pred);
1551 pred_block_infos[i] = pred_info;
1554 phi_ins = ALLOCAN(ir_node*, n_preds);
1556 /* collect live-in nodes and preassigned values */
1557 be_lv_foreach(lv, block, be_lv_state_in, i) {
1558 bool need_phi = false;
1559 const arch_register_req_t *req;
1560 const arch_register_t *reg;
1563 node = be_lv_get_irn(lv, block, i);
1564 req = arch_get_register_req_out(node);
1565 if (req->cls != cls)
1568 if (req->type & arch_register_req_type_ignore) {
1569 allocation_info_t *info = get_allocation_info(node);
1570 info->current_value = node;
1572 reg = arch_get_irn_register(node);
1573 assert(reg != NULL); /* ignore values must be preassigned */
1578 /* check all predecessors for this value, if it is not everywhere the
1579 same or unknown then we have to construct a phi
1580 (we collect the potential phi inputs here) */
1581 for (p = 0; p < n_preds; ++p) {
1582 block_info_t *pred_info = pred_block_infos[p];
1584 if (!pred_info->processed) {
1585 /* use node for now, it will get fixed later */
1589 int a = find_value_in_block_info(pred_info, node);
1591 /* must live out of predecessor */
1593 phi_ins[p] = pred_info->assignments[a];
1594 /* different value from last time? then we need a phi */
1595 if (p > 0 && phi_ins[p-1] != phi_ins[p]) {
1602 ir_mode *mode = get_irn_mode(node);
1603 ir_node *phi = be_new_Phi(block, n_preds, phi_ins, mode, cls);
1605 DB((dbg, LEVEL_3, "Create Phi %+F (for %+F) -", phi, node));
1606 #ifdef DEBUG_libfirm
1609 for (i = 0; i < n_preds; ++i) {
1610 DB((dbg, LEVEL_3, " %+F", phi_ins[i]));
1612 DB((dbg, LEVEL_3, "\n"));
1615 mark_as_copy_of(phi, node);
1616 sched_add_after(block, phi);
1620 allocation_info_t *info = get_allocation_info(node);
1621 info->current_value = phi_ins[0];
1623 /* Grab 1 of the inputs we constructed (might not be the same as
1624 * "node" as we could see the same copy of the value in all
1629 /* if the node already has a register assigned use it */
1630 reg = arch_get_irn_register(node);
1635 /* remember that this node is live at the beginning of the block */
1636 ir_nodeset_insert(&live_nodes, node);
1639 rbitset_alloca(forbidden_regs, n_regs);
1641 /* handle phis... */
1642 assign_phi_registers(block);
1644 /* all live-ins must have a register */
1645 #ifdef DEBUG_libfirm
1647 ir_nodeset_iterator_t iter;
1648 foreach_ir_nodeset(&live_nodes, node, iter) {
1649 const arch_register_t *reg = arch_get_irn_register(node);
1650 assert(reg != NULL);
1655 /* assign instructions in the block */
1656 sched_foreach(block, node) {
1661 /* phis are already assigned */
1665 rewire_inputs(node);
1667 /* enforce use constraints */
1668 rbitset_clear_all(forbidden_regs, n_regs);
1669 enforce_constraints(&live_nodes, node, forbidden_regs);
1671 rewire_inputs(node);
1673 /* we may not use registers used for inputs for optimistic splits */
1674 arity = get_irn_arity(node);
1675 for (i = 0; i < arity; ++i) {
1676 ir_node *op = get_irn_n(node, i);
1677 const arch_register_t *reg;
1678 if (!arch_irn_consider_in_reg_alloc(cls, op))
1681 reg = arch_get_irn_register(op);
1682 rbitset_set(forbidden_regs, arch_register_get_index(reg));
1685 /* free registers of values last used at this instruction */
1686 free_last_uses(&live_nodes, node);
1688 /* assign output registers */
1689 be_foreach_definition_(node, cls, value,
1690 assign_reg(block, value, forbidden_regs);
1694 ir_nodeset_destroy(&live_nodes);
1697 block_info->processed = true;
1699 /* permute values at end of predecessor blocks in case of phi-nodes */
1702 for (p = 0; p < n_preds; ++p) {
1703 add_phi_permutations(block, p);
1707 /* if we have exactly 1 successor then we might be able to produce phi
1709 if (get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) == 1) {
1710 const ir_edge_t *edge
1711 = get_irn_out_edge_first_kind(block, EDGE_KIND_BLOCK);
1712 ir_node *succ = get_edge_src_irn(edge);
1713 int p = get_edge_src_pos(edge);
1714 block_info_t *succ_info = get_block_info(succ);
1716 if (succ_info->processed) {
1717 add_phi_permutations(succ, p);
1722 typedef struct block_costs_t block_costs_t;
1723 struct block_costs_t {
1724 float costs; /**< costs of the block */
1725 int dfs_num; /**< depth first search number (to detect backedges) */
1728 static int cmp_block_costs(const void *d1, const void *d2)
1730 const ir_node * const *block1 = d1;
1731 const ir_node * const *block2 = d2;
1732 const block_costs_t *info1 = get_irn_link(*block1);
1733 const block_costs_t *info2 = get_irn_link(*block2);
1734 return QSORT_CMP(info2->costs, info1->costs);
1737 static void determine_block_order(void)
1740 ir_node **blocklist = be_get_cfgpostorder(irg);
1741 int n_blocks = ARR_LEN(blocklist);
1743 pdeq *worklist = new_pdeq();
1744 ir_node **order = XMALLOCN(ir_node*, n_blocks);
1747 /* clear block links... */
1748 for (i = 0; i < n_blocks; ++i) {
1749 ir_node *block = blocklist[i];
1750 set_irn_link(block, NULL);
1753 /* walk blocks in reverse postorder, the costs for each block are the
1754 * sum of the costs of its predecessors (excluding the costs on backedges
1755 * which we can't determine) */
1756 for (i = n_blocks-1; i >= 0; --i) {
1757 block_costs_t *cost_info;
1758 ir_node *block = blocklist[i];
1760 float execfreq = (float)get_block_execfreq(execfreqs, block);
1761 float costs = execfreq;
1762 int n_cfgpreds = get_Block_n_cfgpreds(block);
1764 for (p = 0; p < n_cfgpreds; ++p) {
1765 ir_node *pred_block = get_Block_cfgpred_block(block, p);
1766 block_costs_t *pred_costs = get_irn_link(pred_block);
1767 /* we don't have any info for backedges */
1768 if (pred_costs == NULL)
1770 costs += pred_costs->costs;
1773 cost_info = OALLOCZ(&obst, block_costs_t);
1774 cost_info->costs = costs;
1775 cost_info->dfs_num = dfs_num++;
1776 set_irn_link(block, cost_info);
1779 /* sort array by block costs */
1780 qsort(blocklist, n_blocks, sizeof(blocklist[0]), cmp_block_costs);
1782 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_VISITED);
1783 inc_irg_block_visited(irg);
1785 for (i = 0; i < n_blocks; ++i) {
1786 ir_node *block = blocklist[i];
1787 if (Block_block_visited(block))
1790 /* continually add predecessors with highest costs to worklist
1791 * (without using backedges) */
1793 block_costs_t *info = get_irn_link(block);
1794 ir_node *best_pred = NULL;
1795 float best_costs = -1;
1796 int n_cfgpred = get_Block_n_cfgpreds(block);
1799 pdeq_putr(worklist, block);
1800 mark_Block_block_visited(block);
1801 for (i = 0; i < n_cfgpred; ++i) {
1802 ir_node *pred_block = get_Block_cfgpred_block(block, i);
1803 block_costs_t *pred_info = get_irn_link(pred_block);
1805 /* ignore backedges */
1806 if (pred_info->dfs_num > info->dfs_num)
1809 if (info->costs > best_costs) {
1810 best_costs = info->costs;
1811 best_pred = pred_block;
1815 } while (block != NULL && !Block_block_visited(block));
1817 /* now put all nodes in the worklist in our final order */
1818 while (!pdeq_empty(worklist)) {
1819 ir_node *pblock = pdeq_getr(worklist);
1820 assert(order_p < n_blocks);
1821 order[order_p++] = pblock;
1824 assert(order_p == n_blocks);
1827 ir_free_resources(irg, IR_RESOURCE_BLOCK_VISITED);
1829 DEL_ARR_F(blocklist);
1831 obstack_free(&obst, NULL);
1832 obstack_init(&obst);
1834 block_order = order;
1835 n_block_order = n_blocks;
1839 * Run the register allocator for the current register class.
1841 static void be_pref_alloc_cls(void)
1845 lv = be_assure_liveness(irg);
1846 be_liveness_assure_sets(lv);
1848 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
1850 DB((dbg, LEVEL_2, "=== Allocating registers of %s ===\n", cls->name));
1852 be_clear_links(irg);
1854 irg_block_walk_graph(irg, NULL, analyze_block, NULL);
1855 if (create_congruence_classes)
1856 combine_congruence_classes();
1858 for (i = 0; i < n_block_order; ++i) {
1859 ir_node *block = block_order[i];
1860 allocate_coalesce_block(block, NULL);
1863 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1866 static void dump(int mask, ir_graph *irg, const char *suffix)
1868 if (be_get_irg_options(irg)->dump_flags & mask)
1869 dump_ir_graph(irg, suffix);
1873 * Run the spiller on the current graph.
1875 static void spill(void)
1877 /* make sure all nodes show their real register pressure */
1878 be_timer_push(T_RA_CONSTR);
1879 be_pre_spill_prepare_constr(irg, cls);
1880 be_timer_pop(T_RA_CONSTR);
1882 dump(DUMP_RA, irg, "-spillprepare");
1885 be_timer_push(T_RA_SPILL);
1886 be_do_spill(irg, cls);
1887 be_timer_pop(T_RA_SPILL);
1889 be_timer_push(T_RA_SPILL_APPLY);
1890 check_for_memory_operands(irg);
1891 be_timer_pop(T_RA_SPILL_APPLY);
1893 dump(DUMP_RA, irg, "-spill");
1897 * The pref register allocator for a whole procedure.
1899 static void be_pref_alloc(ir_graph *new_irg)
1901 const arch_env_t *arch_env = be_get_irg_arch_env(new_irg);
1902 int n_cls = arch_env_get_n_reg_class(arch_env);
1905 obstack_init(&obst);
1908 execfreqs = be_get_irg_exec_freq(irg);
1910 /* determine a good coloring order */
1911 determine_block_order();
1913 for (c = 0; c < n_cls; ++c) {
1914 cls = arch_env_get_reg_class(arch_env, c);
1915 if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
1918 stat_ev_ctx_push_str("regcls", cls->name);
1920 n_regs = arch_register_class_n_regs(cls);
1921 normal_regs = rbitset_malloc(n_regs);
1922 be_abi_set_non_ignore_regs(be_get_irg_abi(irg), cls, normal_regs);
1926 /* verify schedule and register pressure */
1927 be_timer_push(T_VERIFY);
1928 if (be_get_irg_options(irg)->verify_option == BE_VERIFY_WARN) {
1929 be_verify_schedule(irg);
1930 be_verify_register_pressure(irg, cls);
1931 } else if (be_get_irg_options(irg)->verify_option == BE_VERIFY_ASSERT) {
1932 assert(be_verify_schedule(irg) && "Schedule verification failed");
1933 assert(be_verify_register_pressure(irg, cls)
1934 && "Register pressure verification failed");
1936 be_timer_pop(T_VERIFY);
1938 be_timer_push(T_RA_COLOR);
1939 be_pref_alloc_cls();
1940 be_timer_pop(T_RA_COLOR);
1942 /* we most probably constructed new Phis so liveness info is invalid
1944 /* TODO: test liveness_introduce */
1945 be_liveness_invalidate(lv);
1948 stat_ev_ctx_pop("regcls");
1951 be_timer_push(T_RA_SPILL_APPLY);
1952 be_abi_fix_stack_nodes(irg);
1953 be_timer_pop(T_RA_SPILL_APPLY);
1955 be_timer_push(T_VERIFY);
1956 if (be_get_irg_options(irg)->verify_option == BE_VERIFY_WARN) {
1957 be_verify_register_allocation(irg);
1958 } else if (be_get_irg_options(irg)->verify_option == BE_VERIFY_ASSERT) {
1959 assert(be_verify_register_allocation(irg)
1960 && "Register allocation invalid");
1962 be_timer_pop(T_VERIFY);
1964 obstack_free(&obst, NULL);
1967 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_pref_alloc);
1968 void be_init_pref_alloc(void)
1970 static be_ra_t be_ra_pref = {
1973 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
1974 lc_opt_entry_t *prefalloc_group = lc_opt_get_grp(be_grp, "prefalloc");
1975 lc_opt_add_table(prefalloc_group, options);
1977 be_register_allocator("pref", &be_ra_pref);
1978 FIRM_DBG_REGISTER(dbg, "firm.be.prefalloc");