2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief New approach to allocation and copy coalescing
23 * @author Matthias Braun
27 * ... WE NEED A NAME FOR THIS ...
29 * Only a proof of concept at this moment...
31 * The idea is to allocate registers in 2 passes:
32 * 1. A first pass to determine "preferred" registers for live-ranges. This
33 * calculates for each register and each live-range a value indicating
34 * the usefulness. (You can roughly think of the value as the negative
35 * costs needed for copies when the value is in the specific registers...)
37 * 2. Walk blocks and assigns registers in a greedy fashion. Preferring
38 * registers with high preferences. When register constraints are not met,
39 * add copies and split live-ranges.
42 * - make use of free registers in the permute_values code
43 * - think about a smarter sequence of visiting the blocks. Sorted by
44 * execfreq might be good, or looptree from inner to outermost loops going
45 * over blocks in a reverse postorder
46 * - propagate preferences through Phis
58 #include "iredges_t.h"
59 #include "irgraph_t.h"
64 #include "raw_bitset.h"
65 #include "unionfind.h"
67 #include "hungarian.h"
70 #include "bechordal_t.h"
79 #include "bespillutil.h"
83 #define USE_FACTOR 1.0f
84 #define DEF_FACTOR 1.0f
85 #define NEIGHBOR_FACTOR 0.2f
86 #define AFF_SHOULD_BE_SAME 0.5f
88 #define SPLIT_DELTA 1.0f
90 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
92 static struct obstack obst;
93 static be_irg_t *birg;
95 static const arch_register_class_t *cls;
96 static const arch_register_req_t *default_cls_req;
98 static const ir_exec_freq *execfreqs;
99 static unsigned n_regs;
100 static unsigned *normal_regs;
101 static int *congruence_classes;
102 static ir_node **block_order;
103 static int n_block_order;
105 /** currently active assignments (while processing a basic block)
106 * maps registers to values(their current copies) */
107 static ir_node **assignments;
110 * allocation information: last_uses, register preferences
111 * the information is per firm-node.
113 struct allocation_info_t {
114 unsigned last_uses; /**< bitset indicating last uses (input pos) */
115 ir_node *current_value; /**< copy of the value that should be used */
116 ir_node *original_value; /**< for copies point to original value */
117 float prefs[0]; /**< register preferences */
119 typedef struct allocation_info_t allocation_info_t;
121 /** helper datastructure used when sorting register preferences */
126 typedef struct reg_pref_t reg_pref_t;
128 /** per basic-block information */
129 struct block_info_t {
130 bool processed; /**< indicate wether block is processed */
131 ir_node *assignments[0]; /**< register assignments at end of block */
133 typedef struct block_info_t block_info_t;
136 * Get the allocation info for a node.
137 * The info is allocated on the first visit of a node.
139 static allocation_info_t *get_allocation_info(ir_node *node)
141 allocation_info_t *info = get_irn_link(node);
143 info = OALLOCFZ(&obst, allocation_info_t, prefs, n_regs);
144 info->current_value = node;
145 info->original_value = node;
146 set_irn_link(node, info);
153 * Get allocation information for a basic block
155 static block_info_t *get_block_info(ir_node *block)
157 block_info_t *info = get_irn_link(block);
159 assert(is_Block(block));
161 info = OALLOCFZ(&obst, block_info_t, assignments, n_regs);
162 set_irn_link(block, info);
169 * Get default register requirement for the current register class
171 static const arch_register_req_t *get_default_req_current_cls(void)
173 if (default_cls_req == NULL) {
174 struct obstack *obst = get_irg_obstack(irg);
175 arch_register_req_t *req = OALLOCZ(obst, arch_register_req_t);
177 req->type = arch_register_req_type_normal;
180 default_cls_req = req;
182 return default_cls_req;
186 * Link the allocation info of a node to a copy.
187 * Afterwards, both nodes uses the same allocation info.
188 * Copy must not have an allocation info assigned yet.
190 * @param copy the node that gets the allocation info assigned
191 * @param value the original node
193 static void mark_as_copy_of(ir_node *copy, ir_node *value)
196 allocation_info_t *info = get_allocation_info(value);
197 allocation_info_t *copy_info = get_allocation_info(copy);
199 /* find original value */
200 original = info->original_value;
201 if (original != value) {
202 info = get_allocation_info(original);
205 assert(info->original_value == original);
206 info->current_value = copy;
208 /* the copy should not be linked to something else yet */
209 assert(copy_info->original_value == copy);
210 copy_info->original_value = original;
212 /* copy over allocation preferences */
213 memcpy(copy_info->prefs, info->prefs, n_regs * sizeof(copy_info->prefs[0]));
217 * Calculate the penalties for every register on a node and its live neighbors.
219 * @param live_nodes the set of live nodes at the current position, may be NULL
220 * @param penalty the penalty to subtract from
221 * @param limited a raw bitset containing the limited set for the node
222 * @param node the node
224 static void give_penalties_for_limits(const ir_nodeset_t *live_nodes,
225 float penalty, const unsigned* limited,
228 ir_nodeset_iterator_t iter;
231 allocation_info_t *info = get_allocation_info(node);
234 /* give penalty for all forbidden regs */
235 for (r = 0; r < n_regs; ++r) {
236 if (rbitset_is_set(limited, r))
239 info->prefs[r] -= penalty;
242 /* all other live values should get a penalty for allowed regs */
243 if (live_nodes == NULL)
246 penalty *= NEIGHBOR_FACTOR;
247 n_allowed = rbitset_popcnt(limited, n_regs);
249 /* only create a very weak penalty if multiple regs are allowed */
250 penalty = (penalty * 0.8) / n_allowed;
252 foreach_ir_nodeset(live_nodes, neighbor, iter) {
253 allocation_info_t *neighbor_info;
255 /* TODO: if op is used on multiple inputs we might not do a
257 if (neighbor == node)
260 neighbor_info = get_allocation_info(neighbor);
261 for (r = 0; r < n_regs; ++r) {
262 if (!rbitset_is_set(limited, r))
265 neighbor_info->prefs[r] -= penalty;
271 * Calculate the preferences of a definition for the current register class.
272 * If the definition uses a limited set of registers, reduce the preferences
273 * for the limited register on the node and its neighbors.
275 * @param live_nodes the set of live nodes at the current node
276 * @param weight the weight
277 * @param node the current node
279 static void check_defs(const ir_nodeset_t *live_nodes, float weight,
282 const arch_register_req_t *req;
284 if (get_irn_mode(node) == mode_T) {
285 const ir_edge_t *edge;
286 foreach_out_edge(node, edge) {
287 ir_node *proj = get_edge_src_irn(edge);
288 check_defs(live_nodes, weight, proj);
293 if (!arch_irn_consider_in_reg_alloc(cls, node))
296 req = arch_get_register_req_out(node);
297 if (req->type & arch_register_req_type_limited) {
298 const unsigned *limited = req->limited;
299 float penalty = weight * DEF_FACTOR;
300 give_penalties_for_limits(live_nodes, penalty, limited, node);
303 if (req->type & arch_register_req_type_should_be_same) {
304 ir_node *insn = skip_Proj(node);
305 allocation_info_t *info = get_allocation_info(node);
306 int arity = get_irn_arity(insn);
309 float factor = 1.0f / rbitset_popcnt(&req->other_same, arity);
310 for (i = 0; i < arity; ++i) {
313 allocation_info_t *op_info;
315 if (!rbitset_is_set(&req->other_same, i))
318 op = get_irn_n(insn, i);
320 /* if we the value at the should_be_same input doesn't die at the
321 * node, then it is no use to propagate the constraints (since a
322 * copy will emerge anyway) */
323 if (ir_nodeset_contains(live_nodes, op))
326 op_info = get_allocation_info(op);
327 for (r = 0; r < n_regs; ++r) {
328 op_info->prefs[r] += info->prefs[r] * factor;
335 * Walker: Runs an a block calculates the preferences for any
336 * node and every register from the considered register class.
338 static void analyze_block(ir_node *block, void *data)
340 float weight = get_block_execfreq(execfreqs, block);
341 ir_nodeset_t live_nodes;
345 ir_nodeset_init(&live_nodes);
346 be_liveness_end_of_block(lv, cls, block, &live_nodes);
348 sched_foreach_reverse(block, node) {
349 allocation_info_t *info;
356 check_defs(&live_nodes, weight, node);
359 arity = get_irn_arity(node);
361 /* the allocation info node currently only uses 1 unsigned value
362 to mark last used inputs. So we will fail for a node with more than
364 if (arity >= (int) sizeof(unsigned) * 8) {
365 panic("Node with more than %d inputs not supported yet",
366 (int) sizeof(unsigned) * 8);
369 info = get_allocation_info(node);
370 for (i = 0; i < arity; ++i) {
371 ir_node *op = get_irn_n(node, i);
372 if (!arch_irn_consider_in_reg_alloc(cls, op))
375 /* last usage of a value? */
376 if (!ir_nodeset_contains(&live_nodes, op)) {
377 rbitset_set(&info->last_uses, i);
381 be_liveness_transfer(cls, node, &live_nodes);
383 /* update weights based on usage constraints */
384 for (i = 0; i < arity; ++i) {
385 const arch_register_req_t *req;
386 const unsigned *limited;
387 ir_node *op = get_irn_n(node, i);
389 if (!arch_irn_consider_in_reg_alloc(cls, op))
392 req = arch_get_register_req(node, i);
393 if (!(req->type & arch_register_req_type_limited))
396 limited = req->limited;
397 give_penalties_for_limits(&live_nodes, weight * USE_FACTOR, limited,
402 ir_nodeset_destroy(&live_nodes);
405 static void congruence_def(ir_nodeset_t *live_nodes, ir_node *node)
407 const arch_register_req_t *req;
409 if (get_irn_mode(node) == mode_T) {
410 const ir_edge_t *edge;
411 foreach_out_edge(node, edge) {
412 ir_node *def = get_edge_src_irn(edge);
413 congruence_def(live_nodes, def);
418 if (!arch_irn_consider_in_reg_alloc(cls, node))
421 /* should be same constraint? */
422 req = arch_get_register_req_out(node);
423 if (req->type & arch_register_req_type_should_be_same) {
424 ir_node *insn = skip_Proj(node);
425 int arity = get_irn_arity(insn);
427 unsigned node_idx = get_irn_idx(node);
428 node_idx = uf_find(congruence_classes, node_idx);
430 for (i = 0; i < arity; ++i) {
434 ir_nodeset_iterator_t iter;
435 bool interferes = false;
437 if (!rbitset_is_set(&req->other_same, i))
440 op = get_irn_n(insn, i);
441 op_idx = get_irn_idx(op);
442 op_idx = uf_find(congruence_classes, op_idx);
444 /* do we interfere with the value */
445 foreach_ir_nodeset(live_nodes, live, iter) {
446 int lv_idx = get_irn_idx(live);
447 lv_idx = uf_find(congruence_classes, lv_idx);
448 if (lv_idx == op_idx) {
453 /* don't put in same affinity class if we interfere */
457 node_idx = uf_union(congruence_classes, node_idx, op_idx);
458 DB((dbg, LEVEL_3, "Merge %+F and %+F congruence classes\n",
460 /* one should_be_same is enough... */
466 static void create_congurence_class(ir_node *block, void *data)
468 ir_nodeset_t live_nodes;
472 ir_nodeset_init(&live_nodes);
473 be_liveness_end_of_block(lv, cls, block, &live_nodes);
475 /* check should be same constraints */
476 sched_foreach_reverse(block, node) {
480 congruence_def(&live_nodes, node);
481 be_liveness_transfer(cls, node, &live_nodes);
484 /* check phi congruence classes */
485 sched_foreach_reverse_from(node, node) {
489 assert(is_Phi(node));
491 if (!arch_irn_consider_in_reg_alloc(cls, node))
494 node_idx = get_irn_idx(node);
495 node_idx = uf_find(congruence_classes, node_idx);
497 arity = get_irn_arity(node);
498 for (i = 0; i < arity; ++i) {
499 bool interferes = false;
500 ir_nodeset_iterator_t iter;
503 ir_node *op = get_Phi_pred(node, i);
504 int op_idx = get_irn_idx(op);
505 op_idx = uf_find(congruence_classes, op_idx);
507 /* do we interfere with the value */
508 foreach_ir_nodeset(&live_nodes, live, iter) {
509 int lv_idx = get_irn_idx(live);
510 lv_idx = uf_find(congruence_classes, lv_idx);
511 if (lv_idx == op_idx) {
516 /* don't put in same affinity class if we interfere */
519 /* any other phi has the same input? */
520 sched_foreach(block, phi) {
525 if (!arch_irn_consider_in_reg_alloc(cls, phi))
527 oop = get_Phi_pred(phi, i);
530 oop_idx = get_irn_idx(oop);
531 oop_idx = uf_find(congruence_classes, oop_idx);
532 if (oop_idx == op_idx) {
540 node_idx = uf_union(congruence_classes, node_idx, op_idx);
541 DB((dbg, LEVEL_3, "Merge %+F and %+F congruence classes\n",
547 static void merge_congruence_prefs(ir_node *node, void *data)
549 allocation_info_t *info;
550 allocation_info_t *head_info;
551 unsigned node_idx = get_irn_idx(node);
552 unsigned node_set = uf_find(congruence_classes, node_idx);
557 /* head of congruence class or not in any class */
558 if (node_set == node_idx)
561 if (!arch_irn_consider_in_reg_alloc(cls, node))
564 head_info = get_allocation_info(get_idx_irn(irg, node_set));
565 info = get_allocation_info(node);
567 for (r = 0; r < n_regs; ++r) {
568 head_info->prefs[r] += info->prefs[r];
572 static void set_congruence_prefs(ir_node *node, void *data)
574 allocation_info_t *info;
575 allocation_info_t *head_info;
576 unsigned node_idx = get_irn_idx(node);
577 unsigned node_set = uf_find(congruence_classes, node_idx);
581 /* head of congruence class or not in any class */
582 if (node_set == node_idx)
585 if (!arch_irn_consider_in_reg_alloc(cls, node))
588 head_info = get_allocation_info(get_idx_irn(irg, node_set));
589 info = get_allocation_info(node);
591 memcpy(info->prefs, head_info->prefs, n_regs * sizeof(info->prefs[0]));
594 static void combine_congruence_classes(void)
596 size_t n = get_irg_last_idx(irg);
597 congruence_classes = XMALLOCN(int, n);
598 uf_init(congruence_classes, n);
600 /* create congruence classes */
601 irg_block_walk_graph(irg, create_congurence_class, NULL, NULL);
602 /* merge preferences */
603 irg_walk_graph(irg, merge_congruence_prefs, NULL, NULL);
604 irg_walk_graph(irg, set_congruence_prefs, NULL, NULL);
612 * Assign register reg to the given node.
614 * @param node the node
615 * @param reg the register
617 static void use_reg(ir_node *node, const arch_register_t *reg)
619 unsigned r = arch_register_get_index(reg);
620 assignments[r] = node;
621 arch_set_irn_register(node, reg);
624 static void free_reg_of_value(ir_node *node)
626 const arch_register_t *reg;
629 if (!arch_irn_consider_in_reg_alloc(cls, node))
632 reg = arch_get_irn_register(node);
633 r = arch_register_get_index(reg);
634 /* assignment->value may be NULL if a value is used at 2 inputs
635 so it gets freed twice. */
636 assert(assignments[r] == node || assignments[r] == NULL);
637 assignments[r] = NULL;
641 * Compare two register preferences in decreasing order.
643 static int compare_reg_pref(const void *e1, const void *e2)
645 const reg_pref_t *rp1 = (const reg_pref_t*) e1;
646 const reg_pref_t *rp2 = (const reg_pref_t*) e2;
647 if (rp1->pref < rp2->pref)
649 if (rp1->pref > rp2->pref)
654 static void fill_sort_candidates(reg_pref_t *regprefs,
655 const allocation_info_t *info)
659 for (r = 0; r < n_regs; ++r) {
660 float pref = info->prefs[r];
662 regprefs[r].pref = pref;
664 /* TODO: use a stable sort here to avoid unnecessary register jumping */
665 qsort(regprefs, n_regs, sizeof(regprefs[0]), compare_reg_pref);
668 static bool try_optimistic_split(ir_node *to_split, ir_node *before,
669 float pref, float pref_delta,
670 unsigned *output_regs)
672 const arch_register_t *reg;
673 ir_node *original_insn;
678 allocation_info_t *info = get_allocation_info(to_split);
681 float split_threshold;
685 /* stupid hack: don't optimisticallt split don't spill nodes...
686 * (so we don't split away the values produced because of
687 * must_be_different constraints) */
688 original_insn = skip_Proj(info->original_value);
689 if (arch_irn_get_flags(original_insn) & arch_irn_flags_dont_spill)
692 /* find the best free position where we could move to */
693 prefs = ALLOCAN(reg_pref_t, n_regs);
694 fill_sort_candidates(prefs, info);
695 for (i = 0; i < n_regs; ++i) {
697 if (!rbitset_is_set(normal_regs, r))
699 if (rbitset_is_set(output_regs, r))
701 if (assignments[r] == NULL)
707 /* TODO: use execfreq somehow... */
708 block = get_nodes_block(before);
709 delta = pref_delta + prefs[i].pref;
710 split_threshold = get_block_execfreq(execfreqs, block) * SPLIT_DELTA;
711 if (delta < split_threshold) {
712 DB((dbg, LEVEL_3, "Not doing optimistical split, win %f too low\n",
717 reg = arch_register_for_index(cls, r);
718 copy = be_new_Copy(cls, block, to_split);
719 mark_as_copy_of(copy, to_split);
720 free_reg_of_value(to_split);
722 sched_add_before(before, copy);
725 "Optimistic live-range split %+F move %+F -> %s before %+F (win %f)\n",
726 copy, to_split, reg->name, before, delta));
731 * Determine and assign a register for node @p node
733 static void assign_reg(const ir_node *block, ir_node *node,
734 unsigned *output_regs)
736 const arch_register_t *reg;
737 allocation_info_t *info;
738 const arch_register_req_t *req;
739 reg_pref_t *reg_prefs;
742 const unsigned *allowed_regs;
745 assert(arch_irn_consider_in_reg_alloc(cls, node));
747 /* preassigned register? */
748 reg = arch_get_irn_register(node);
750 DB((dbg, LEVEL_2, "Preassignment %+F -> %s\n", node, reg->name));
755 /* give should_be_same boni */
756 info = get_allocation_info(node);
757 req = arch_get_register_req_out(node);
759 in_node = skip_Proj(node);
760 if (req->type & arch_register_req_type_should_be_same) {
761 float weight = get_block_execfreq(execfreqs, block);
762 int arity = get_irn_arity(in_node);
765 assert(arity <= (int) sizeof(req->other_same) * 8);
766 for (i = 0; i < arity; ++i) {
768 const arch_register_t *reg;
770 if (!rbitset_is_set(&req->other_same, i))
773 in = get_irn_n(in_node, i);
774 reg = arch_get_irn_register(in);
776 r = arch_register_get_index(reg);
778 /* if the value didn't die here then we should not propagate the
779 * should_be_same info */
780 if (assignments[r] == in)
783 info->prefs[r] += weight * AFF_SHOULD_BE_SAME;
787 /* create list of register candidates and sort by their preference */
788 DB((dbg, LEVEL_2, "Candidates for %+F:", node));
789 reg_prefs = alloca(n_regs * sizeof(reg_prefs[0]));
790 fill_sort_candidates(reg_prefs, info);
791 for (i = 0; i < n_regs; ++i) {
792 unsigned num = reg_prefs[i].num;
793 const arch_register_t *reg;
795 if (!rbitset_is_set(normal_regs, num))
798 reg = arch_register_for_index(cls, num);
799 DB((dbg, LEVEL_2, " %s(%f)", reg->name, reg_prefs[i].pref));
801 DB((dbg, LEVEL_2, "\n"));
803 allowed_regs = normal_regs;
804 if (req->type & arch_register_req_type_limited) {
805 allowed_regs = req->limited;
808 for (i = 0; i < n_regs; ++i) {
809 r = reg_prefs[i].num;
810 if (!rbitset_is_set(allowed_regs, r))
812 if (assignments[r] == NULL)
815 float pref = reg_prefs[i].pref;
816 float delta = i+1 < n_regs ? pref - reg_prefs[i+1].pref : 0;
817 ir_node *before = skip_Proj(node);
818 bool res = try_optimistic_split(assignments[r], before,
826 panic("No register left for %+F\n", node);
829 reg = arch_register_for_index(cls, r);
830 DB((dbg, LEVEL_2, "Assign %+F -> %s\n", node, reg->name));
835 * Add an permutation in front of a node and change the assignments
836 * due to this permutation.
838 * To understand this imagine a permutation like this:
848 * First we count how many destinations a single value has. At the same time
849 * we can be sure that each destination register has at most 1 source register
850 * (it can have 0 which means we don't care what value is in it).
851 * We ignore all fullfilled permuations (like 7->7)
852 * In a first pass we create as much copy instructions as possible as they
853 * are generally cheaper than exchanges. We do this by counting into how many
854 * destinations a register has to be copied (in the example it's 2 for register
855 * 3, or 1 for the registers 1,2,4 and 7).
856 * We can then create a copy into every destination register when the usecount
857 * of that register is 0 (= noone else needs the value in the register).
859 * After this step we should have cycles left. We implement a cyclic permutation
860 * of n registers with n-1 transpositions.
862 * @param live_nodes the set of live nodes, updated due to live range split
863 * @param before the node before we add the permutation
864 * @param permutation the permutation array indices are the destination
865 * registers, the values in the array are the source
868 static void permute_values(ir_nodeset_t *live_nodes, ir_node *before,
869 unsigned *permutation)
871 unsigned *n_used = ALLOCANZ(unsigned, n_regs);
875 /* determine how often each source register needs to be read */
876 for (r = 0; r < n_regs; ++r) {
877 unsigned old_reg = permutation[r];
880 value = assignments[old_reg];
882 /* nothing to do here, reg is not live. Mark it as fixpoint
883 * so we ignore it in the next steps */
891 block = get_nodes_block(before);
893 /* step1: create copies where immediately possible */
894 for (r = 0; r < n_regs; /* empty */) {
897 const arch_register_t *reg;
898 unsigned old_r = permutation[r];
900 /* - no need to do anything for fixed points.
901 - we can't copy if the value in the dest reg is still needed */
902 if (old_r == r || n_used[r] > 0) {
908 src = assignments[old_r];
909 copy = be_new_Copy(cls, block, src);
910 sched_add_before(before, copy);
911 reg = arch_register_for_index(cls, r);
912 DB((dbg, LEVEL_2, "Copy %+F (from %+F, before %+F) -> %s\n",
913 copy, src, before, reg->name));
914 mark_as_copy_of(copy, src);
917 if (live_nodes != NULL) {
918 ir_nodeset_insert(live_nodes, copy);
921 /* old register has 1 user less, permutation is resolved */
922 assert(arch_register_get_index(arch_get_irn_register(src)) == old_r);
925 assert(n_used[old_r] > 0);
927 if (n_used[old_r] == 0) {
928 if (live_nodes != NULL) {
929 ir_nodeset_remove(live_nodes, src);
931 free_reg_of_value(src);
934 /* advance or jump back (if this copy enabled another copy) */
935 if (old_r < r && n_used[old_r] == 0) {
942 /* at this point we only have "cycles" left which we have to resolve with
944 * TODO: if we have free registers left, then we should really use copy
945 * instructions for any cycle longer than 2 registers...
946 * (this is probably architecture dependent, there might be archs where
947 * copies are preferable even for 2-cycles) */
949 /* create perms with the rest */
950 for (r = 0; r < n_regs; /* empty */) {
951 const arch_register_t *reg;
952 unsigned old_r = permutation[r];
964 /* we shouldn't have copies from 1 value to multiple destinations left*/
965 assert(n_used[old_r] == 1);
967 /* exchange old_r and r2; after that old_r is a fixed point */
968 r2 = permutation[old_r];
970 in[0] = assignments[r2];
971 in[1] = assignments[old_r];
972 perm = be_new_Perm(cls, block, 2, in);
973 sched_add_before(before, perm);
974 DB((dbg, LEVEL_2, "Perm %+F (perm %+F,%+F, before %+F)\n",
975 perm, in[0], in[1], before));
977 proj0 = new_r_Proj(block, perm, get_irn_mode(in[0]), 0);
978 mark_as_copy_of(proj0, in[0]);
979 reg = arch_register_for_index(cls, old_r);
982 proj1 = new_r_Proj(block, perm, get_irn_mode(in[1]), 1);
983 mark_as_copy_of(proj1, in[1]);
984 reg = arch_register_for_index(cls, r2);
987 /* 1 value is now in the correct register */
988 permutation[old_r] = old_r;
989 /* the source of r changed to r2 */
992 /* if we have reached a fixpoint update data structures */
993 if (live_nodes != NULL) {
994 ir_nodeset_remove(live_nodes, in[0]);
995 ir_nodeset_remove(live_nodes, in[1]);
996 ir_nodeset_remove(live_nodes, proj0);
997 ir_nodeset_insert(live_nodes, proj1);
1001 #ifdef DEBUG_libfirm
1002 /* now we should only have fixpoints left */
1003 for (r = 0; r < n_regs; ++r) {
1004 assert(permutation[r] == r);
1010 * Free regs for values last used.
1012 * @param live_nodes set of live nodes, will be updated
1013 * @param node the node to consider
1015 static void free_last_uses(ir_nodeset_t *live_nodes, ir_node *node)
1017 allocation_info_t *info = get_allocation_info(node);
1018 const unsigned *last_uses = &info->last_uses;
1019 int arity = get_irn_arity(node);
1022 for (i = 0; i < arity; ++i) {
1025 /* check if one operand is the last use */
1026 if (!rbitset_is_set(last_uses, i))
1029 op = get_irn_n(node, i);
1030 free_reg_of_value(op);
1031 ir_nodeset_remove(live_nodes, op);
1036 * change inputs of a node to the current value (copies/perms)
1038 static void rewire_inputs(ir_node *node)
1041 int arity = get_irn_arity(node);
1043 for (i = 0; i < arity; ++i) {
1044 ir_node *op = get_irn_n(node, i);
1045 allocation_info_t *info;
1047 if (!arch_irn_consider_in_reg_alloc(cls, op))
1050 info = get_allocation_info(op);
1051 info = get_allocation_info(info->original_value);
1052 if (info->current_value != op) {
1053 set_irn_n(node, i, info->current_value);
1059 * Create a bitset of registers occupied with value living through an
1062 static void determine_live_through_regs(unsigned *bitset, ir_node *node)
1064 const allocation_info_t *info = get_allocation_info(node);
1069 /* mark all used registers as potentially live-through */
1070 for (r = 0; r < n_regs; ++r) {
1071 if (assignments[r] == NULL)
1073 if (!rbitset_is_set(normal_regs, r))
1076 rbitset_set(bitset, r);
1079 /* remove registers of value dying at the instruction */
1080 arity = get_irn_arity(node);
1081 for (i = 0; i < arity; ++i) {
1083 const arch_register_t *reg;
1085 if (!rbitset_is_set(&info->last_uses, i))
1088 op = get_irn_n(node, i);
1089 reg = arch_get_irn_register(op);
1090 rbitset_clear(bitset, arch_register_get_index(reg));
1095 * Enforce constraints at a node by live range splits.
1097 * @param live_nodes the set of live nodes, might be changed
1098 * @param node the current node
1100 static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node,
1101 unsigned *output_regs)
1103 int arity = get_irn_arity(node);
1105 hungarian_problem_t *bp;
1107 unsigned *assignment;
1109 /* construct a list of register occupied by live-through values */
1110 unsigned *live_through_regs = NULL;
1112 /* see if any use constraints are not met */
1114 for (i = 0; i < arity; ++i) {
1115 ir_node *op = get_irn_n(node, i);
1116 const arch_register_t *reg;
1117 const arch_register_req_t *req;
1118 const unsigned *limited;
1121 if (!arch_irn_consider_in_reg_alloc(cls, op))
1124 /* are there any limitations for the i'th operand? */
1125 req = arch_get_register_req(node, i);
1126 if (!(req->type & arch_register_req_type_limited))
1129 limited = req->limited;
1130 reg = arch_get_irn_register(op);
1131 r = arch_register_get_index(reg);
1132 if (!rbitset_is_set(limited, r)) {
1133 /* found an assignment outside the limited set */
1139 /* is any of the live-throughs using a constrained output register? */
1140 if (get_irn_mode(node) == mode_T) {
1141 const ir_edge_t *edge;
1143 foreach_out_edge(node, edge) {
1144 ir_node *proj = get_edge_src_irn(edge);
1145 const arch_register_req_t *req;
1147 if (!arch_irn_consider_in_reg_alloc(cls, proj))
1150 req = arch_get_register_req_out(proj);
1151 if (!(req->type & arch_register_req_type_limited))
1154 if (live_through_regs == NULL) {
1155 rbitset_alloca(live_through_regs, n_regs);
1156 determine_live_through_regs(live_through_regs, node);
1159 rbitset_or(output_regs, req->limited, n_regs);
1160 if (rbitsets_have_common(req->limited, live_through_regs, n_regs)) {
1165 if (arch_irn_consider_in_reg_alloc(cls, node)) {
1166 const arch_register_req_t *req = arch_get_register_req_out(node);
1167 if (req->type & arch_register_req_type_limited) {
1168 rbitset_alloca(live_through_regs, n_regs);
1169 determine_live_through_regs(live_through_regs, node);
1170 if (rbitsets_have_common(req->limited, live_through_regs, n_regs)) {
1172 rbitset_or(output_regs, req->limited, n_regs);
1181 /* create these arrays if we haven't yet */
1182 if (live_through_regs == NULL) {
1183 rbitset_alloca(live_through_regs, n_regs);
1186 /* at this point we have to construct a bipartite matching problem to see
1187 * which values should go to which registers
1188 * Note: We're building the matrix in "reverse" - source registers are
1189 * right, destinations left because this will produce the solution
1190 * in the format required for permute_values.
1192 bp = hungarian_new(n_regs, n_regs, HUNGARIAN_MATCH_PERFECT);
1194 /* add all combinations, then remove not allowed ones */
1195 for (l = 0; l < n_regs; ++l) {
1196 if (!rbitset_is_set(normal_regs, l)) {
1197 hungarian_add(bp, l, l, 1);
1201 for (r = 0; r < n_regs; ++r) {
1202 if (!rbitset_is_set(normal_regs, r))
1204 /* livethrough values may not use constrainted output registers */
1205 if (rbitset_is_set(live_through_regs, l)
1206 && rbitset_is_set(output_regs, r))
1209 hungarian_add(bp, r, l, l == r ? 9 : 8);
1213 for (i = 0; i < arity; ++i) {
1214 ir_node *op = get_irn_n(node, i);
1215 const arch_register_t *reg;
1216 const arch_register_req_t *req;
1217 const unsigned *limited;
1218 unsigned current_reg;
1220 if (!arch_irn_consider_in_reg_alloc(cls, op))
1223 req = arch_get_register_req(node, i);
1224 if (!(req->type & arch_register_req_type_limited))
1227 limited = req->limited;
1228 reg = arch_get_irn_register(op);
1229 current_reg = arch_register_get_index(reg);
1230 for (r = 0; r < n_regs; ++r) {
1231 if (rbitset_is_set(limited, r))
1233 hungarian_remv(bp, r, current_reg);
1237 //hungarian_print_cost_matrix(bp, 1);
1238 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
1240 assignment = ALLOCAN(unsigned, n_regs);
1241 res = hungarian_solve(bp, (int*) assignment, NULL, 0);
1245 fprintf(stderr, "Swap result:");
1246 for (i = 0; i < (int) n_regs; ++i) {
1247 fprintf(stderr, " %d", assignment[i]);
1249 fprintf(stderr, "\n");
1254 permute_values(live_nodes, node, assignment);
1257 /** test wether a node @p n is a copy of the value of node @p of */
1258 static bool is_copy_of(ir_node *value, ir_node *test_value)
1260 allocation_info_t *test_info;
1261 allocation_info_t *info;
1263 if (value == test_value)
1266 info = get_allocation_info(value);
1267 test_info = get_allocation_info(test_value);
1268 return test_info->original_value == info->original_value;
1272 * find a value in the end-assignment of a basic block
1273 * @returns the index into the assignment array if found
1276 static int find_value_in_block_info(block_info_t *info, ir_node *value)
1279 ir_node **assignments = info->assignments;
1280 for (r = 0; r < n_regs; ++r) {
1281 ir_node *a_value = assignments[r];
1283 if (a_value == NULL)
1285 if (is_copy_of(a_value, value))
1293 * Create the necessary permutations at the end of a basic block to fullfill
1294 * the register assignment for phi-nodes in the next block
1296 static void add_phi_permutations(ir_node *block, int p)
1299 unsigned *permutation;
1300 ir_node **old_assignments;
1301 bool need_permutation;
1303 ir_node *pred = get_Block_cfgpred_block(block, p);
1305 block_info_t *pred_info = get_block_info(pred);
1307 /* predecessor not processed yet? nothing to do */
1308 if (!pred_info->processed)
1311 permutation = ALLOCAN(unsigned, n_regs);
1312 for (r = 0; r < n_regs; ++r) {
1316 /* check phi nodes */
1317 need_permutation = false;
1318 node = sched_first(block);
1319 for ( ; is_Phi(node); node = sched_next(node)) {
1320 const arch_register_t *reg;
1325 if (!arch_irn_consider_in_reg_alloc(cls, node))
1328 op = get_Phi_pred(node, p);
1329 if (!arch_irn_consider_in_reg_alloc(cls, op))
1332 a = find_value_in_block_info(pred_info, op);
1335 reg = arch_get_irn_register(node);
1336 regn = arch_register_get_index(reg);
1338 permutation[regn] = a;
1339 need_permutation = true;
1343 if (need_permutation) {
1344 /* permute values at end of predecessor */
1345 old_assignments = assignments;
1346 assignments = pred_info->assignments;
1347 permute_values(NULL, be_get_end_of_block_insertion_point(pred),
1349 assignments = old_assignments;
1352 /* change phi nodes to use the copied values */
1353 node = sched_first(block);
1354 for ( ; is_Phi(node); node = sched_next(node)) {
1358 if (!arch_irn_consider_in_reg_alloc(cls, node))
1361 op = get_Phi_pred(node, p);
1362 /* no need to do anything for Unknown inputs */
1363 if (!arch_irn_consider_in_reg_alloc(cls, op))
1366 /* we have permuted all values into the correct registers so we can
1367 simply query which value occupies the phis register in the
1369 a = arch_register_get_index(arch_get_irn_register(node));
1370 op = pred_info->assignments[a];
1371 set_Phi_pred(node, p, op);
1376 * Set preferences for a phis register based on the registers used on the
1379 static void adapt_phi_prefs(ir_node *phi)
1382 int arity = get_irn_arity(phi);
1383 ir_node *block = get_nodes_block(phi);
1384 allocation_info_t *info = get_allocation_info(phi);
1386 for (i = 0; i < arity; ++i) {
1387 ir_node *op = get_irn_n(phi, i);
1388 const arch_register_t *reg = arch_get_irn_register(op);
1389 ir_node *pred_block;
1390 block_info_t *pred_block_info;
1396 /* we only give the bonus if the predecessor already has registers
1397 * assigned, otherwise we only see a dummy value
1398 * and any conclusions about its register are useless */
1399 pred_block = get_Block_cfgpred_block(block, i);
1400 pred_block_info = get_block_info(pred_block);
1401 if (!pred_block_info->processed)
1404 /* give bonus for already assigned register */
1405 weight = get_block_execfreq(execfreqs, pred_block);
1406 r = arch_register_get_index(reg);
1407 info->prefs[r] += weight * AFF_PHI;
1412 * After a phi has been assigned a register propagate preference inputs
1413 * to the phi inputs.
1415 static void propagate_phi_register(ir_node *phi, unsigned assigned_r)
1418 ir_node *block = get_nodes_block(phi);
1419 int arity = get_irn_arity(phi);
1421 for (i = 0; i < arity; ++i) {
1422 ir_node *op = get_Phi_pred(phi, i);
1423 allocation_info_t *info = get_allocation_info(op);
1424 ir_node *pred_block = get_Block_cfgpred_block(block, i);
1427 = get_block_execfreq(execfreqs, pred_block) * AFF_PHI;
1429 if (info->prefs[assigned_r] >= weight)
1432 /* promote the prefered register */
1433 for (r = 0; r < n_regs; ++r) {
1434 if (r == assigned_r) {
1435 info->prefs[r] = AFF_PHI * weight;
1437 info->prefs[r] -= AFF_PHI * weight;
1442 propagate_phi_register(op, assigned_r);
1446 static void assign_phi_registers(ir_node *block)
1453 hungarian_problem_t *bp;
1455 /* count phi nodes */
1456 sched_foreach(block, node) {
1459 if (!arch_irn_consider_in_reg_alloc(cls, node))
1467 /* build a bipartite matching problem for all phi nodes */
1468 bp = hungarian_new(n_phis, n_regs, HUNGARIAN_MATCH_PERFECT);
1470 sched_foreach(block, node) {
1473 allocation_info_t *info;
1476 if (!arch_irn_consider_in_reg_alloc(cls, node))
1479 /* give boni for predecessor colorings */
1480 adapt_phi_prefs(node);
1481 /* add stuff to bipartite problem */
1482 info = get_allocation_info(node);
1483 DB((dbg, LEVEL_3, "Prefs for %+F: ", node));
1484 for (r = 0; r < n_regs; ++r) {
1487 if (!rbitset_is_set(normal_regs, r))
1490 costs = info->prefs[r];
1491 costs = costs < 0 ? -logf(-costs+1) : logf(costs+1);
1494 hungarian_add(bp, n, r, costs);
1495 DB((dbg, LEVEL_3, " %s(%f)", arch_register_for_index(cls, r)->name,
1498 DB((dbg, LEVEL_3, "\n"));
1502 //hungarian_print_cost_matrix(bp, 7);
1503 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
1505 assignment = ALLOCAN(int, n_regs);
1506 res = hungarian_solve(bp, assignment, NULL, 0);
1511 sched_foreach(block, node) {
1513 const arch_register_t *reg;
1517 if (!arch_irn_consider_in_reg_alloc(cls, node))
1520 r = assignment[n++];
1521 assert(rbitset_is_set(normal_regs, r));
1522 reg = arch_register_for_index(cls, r);
1523 DB((dbg, LEVEL_2, "Assign %+F -> %s\n", node, reg->name));
1526 /* adapt preferences for phi inputs */
1527 propagate_phi_register(node, r);
1532 * Walker: assign registers to all nodes of a block that
1533 * need registers from the currently considered register class.
1535 static void allocate_coalesce_block(ir_node *block, void *data)
1538 ir_nodeset_t live_nodes;
1539 ir_nodeset_iterator_t iter;
1542 block_info_t *block_info;
1543 block_info_t **pred_block_infos;
1545 unsigned *output_regs; /**< collects registers which must not
1546 be used for optimistic splits */
1549 DB((dbg, LEVEL_2, "* Block %+F\n", block));
1551 /* clear assignments */
1552 block_info = get_block_info(block);
1553 assignments = block_info->assignments;
1555 ir_nodeset_init(&live_nodes);
1557 /* gather regalloc infos of predecessor blocks */
1558 n_preds = get_Block_n_cfgpreds(block);
1559 pred_block_infos = ALLOCAN(block_info_t*, n_preds);
1560 for (i = 0; i < n_preds; ++i) {
1561 ir_node *pred = get_Block_cfgpred_block(block, i);
1562 block_info_t *pred_info = get_block_info(pred);
1563 pred_block_infos[i] = pred_info;
1566 phi_ins = ALLOCAN(ir_node*, n_preds);
1568 /* collect live-in nodes and preassigned values */
1569 be_lv_foreach(lv, block, be_lv_state_in, i) {
1570 const arch_register_t *reg;
1572 bool need_phi = false;
1574 node = be_lv_get_irn(lv, block, i);
1575 if (!arch_irn_consider_in_reg_alloc(cls, node))
1578 /* check all predecessors for this value, if it is not everywhere the
1579 same or unknown then we have to construct a phi
1580 (we collect the potential phi inputs here) */
1581 for (p = 0; p < n_preds; ++p) {
1582 block_info_t *pred_info = pred_block_infos[p];
1584 if (!pred_info->processed) {
1585 /* use node for now, it will get fixed later */
1589 int a = find_value_in_block_info(pred_info, node);
1591 /* must live out of predecessor */
1593 phi_ins[p] = pred_info->assignments[a];
1594 /* different value from last time? then we need a phi */
1595 if (p > 0 && phi_ins[p-1] != phi_ins[p]) {
1602 ir_mode *mode = get_irn_mode(node);
1603 const arch_register_req_t *req = get_default_req_current_cls();
1607 phi = new_r_Phi(block, n_preds, phi_ins, mode);
1608 be_set_phi_reg_req(phi, req);
1610 DB((dbg, LEVEL_3, "Create Phi %+F (for %+F) -", phi, node));
1611 #ifdef DEBUG_libfirm
1612 for (i = 0; i < n_preds; ++i) {
1613 DB((dbg, LEVEL_3, " %+F", phi_ins[i]));
1615 DB((dbg, LEVEL_3, "\n"));
1617 mark_as_copy_of(phi, node);
1618 sched_add_after(block, phi);
1622 allocation_info_t *info = get_allocation_info(node);
1623 info->current_value = phi_ins[0];
1625 /* Grab 1 of the inputs we constructed (might not be the same as
1626 * "node" as we could see the same copy of the value in all
1631 /* if the node already has a register assigned use it */
1632 reg = arch_get_irn_register(node);
1634 /* TODO: consult pred-block infos here. The value could be copied
1635 away in some/all predecessor blocks. We need to construct
1636 phi-nodes in this case.
1637 We even need to construct some Phi_0 like constructs in cases
1638 where the predecessor allocation is not determined yet. */
1642 /* remember that this node is live at the beginning of the block */
1643 ir_nodeset_insert(&live_nodes, node);
1646 rbitset_alloca(output_regs, n_regs);
1648 /* handle phis... */
1649 assign_phi_registers(block);
1651 /* assign regs for live-in values */
1652 foreach_ir_nodeset(&live_nodes, node, iter) {
1653 const arch_register_t *reg = arch_get_irn_register(node);
1657 assign_reg(block, node, output_regs);
1658 /* shouldn't happen if we color in dominance order */
1659 assert (!is_Phi(node));
1662 /* assign instructions in the block */
1663 sched_foreach(block, node) {
1666 /* phis are already assigned */
1670 rewire_inputs(node);
1672 /* enforce use constraints */
1673 rbitset_clear_all(output_regs, n_regs);
1674 enforce_constraints(&live_nodes, node, output_regs);
1675 /* we may not use registers occupied here for optimistic splits */
1676 for (r = 0; r < n_regs; ++r) {
1677 if (assignments[r] != NULL)
1678 rbitset_set(output_regs, r);
1681 rewire_inputs(node);
1683 /* free registers of values last used at this instruction */
1684 free_last_uses(&live_nodes, node);
1686 /* assign output registers */
1687 /* TODO: 2 phases: first: pre-assigned ones, 2nd real regs */
1688 if (get_irn_mode(node) == mode_T) {
1689 const ir_edge_t *edge;
1690 foreach_out_edge(node, edge) {
1691 ir_node *proj = get_edge_src_irn(edge);
1692 if (!arch_irn_consider_in_reg_alloc(cls, proj))
1694 assign_reg(block, proj, output_regs);
1696 } else if (arch_irn_consider_in_reg_alloc(cls, node)) {
1697 assign_reg(block, node, output_regs);
1701 ir_nodeset_destroy(&live_nodes);
1704 block_info->processed = true;
1706 /* permute values at end of predecessor blocks in case of phi-nodes */
1709 for (p = 0; p < n_preds; ++p) {
1710 add_phi_permutations(block, p);
1714 /* if we have exactly 1 successor then we might be able to produce phi
1716 if (get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) == 1) {
1717 const ir_edge_t *edge
1718 = get_irn_out_edge_first_kind(block, EDGE_KIND_BLOCK);
1719 ir_node *succ = get_edge_src_irn(edge);
1720 int p = get_edge_src_pos(edge);
1721 block_info_t *succ_info = get_block_info(succ);
1723 if (succ_info->processed) {
1724 add_phi_permutations(succ, p);
1729 typedef struct block_costs_t block_costs_t;
1730 struct block_costs_t {
1731 float costs; /**< costs of the block */
1732 int dfs_num; /**< depth first search number (to detect backedges) */
1735 static int cmp_block_costs(const void *d1, const void *d2)
1737 const ir_node * const *block1 = d1;
1738 const ir_node * const *block2 = d2;
1739 const block_costs_t *info1 = get_irn_link(*block1);
1740 const block_costs_t *info2 = get_irn_link(*block2);
1741 return QSORT_CMP(info2->costs, info1->costs);
1744 static void determine_block_order(void)
1747 ir_node **blocklist = be_get_cfgpostorder(irg);
1748 int n_blocks = ARR_LEN(blocklist);
1750 pdeq *worklist = new_pdeq();
1751 ir_node **order = XMALLOCN(ir_node*, n_blocks);
1754 /* clear block links... */
1755 for (i = 0; i < n_blocks; ++i) {
1756 ir_node *block = blocklist[i];
1757 set_irn_link(block, NULL);
1760 /* walk blocks in reverse postorder, the costs for each block are the
1761 * sum of the costs of its predecessors (excluding the costs on backedges
1762 * which we can't determine) */
1763 for (i = n_blocks-1; i >= 0; --i) {
1764 block_costs_t *cost_info;
1765 ir_node *block = blocklist[i];
1767 float execfreq = get_block_execfreq(execfreqs, block);
1768 float costs = execfreq;
1769 int n_cfgpreds = get_Block_n_cfgpreds(block);
1771 for (p = 0; p < n_cfgpreds; ++p) {
1772 ir_node *pred_block = get_Block_cfgpred_block(block, p);
1773 block_costs_t *pred_costs = get_irn_link(pred_block);
1774 /* we don't have any info for backedges */
1775 if (pred_costs == NULL)
1777 costs += pred_costs->costs;
1780 cost_info = OALLOCZ(&obst, block_costs_t);
1781 cost_info->costs = costs;
1782 cost_info->dfs_num = dfs_num++;
1783 set_irn_link(block, cost_info);
1786 /* sort array by block costs */
1787 qsort(blocklist, n_blocks, sizeof(blocklist[0]), cmp_block_costs);
1789 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_VISITED);
1790 inc_irg_block_visited(irg);
1792 for (i = 0; i < n_blocks; ++i) {
1793 ir_node *block = blocklist[i];
1794 if (Block_block_visited(block))
1797 /* continually add predecessors with highest costs to worklist
1798 * (without using backedges) */
1800 block_costs_t *info = get_irn_link(block);
1801 ir_node *best_pred = NULL;
1802 float best_costs = -1;
1803 int n_cfgpred = get_Block_n_cfgpreds(block);
1806 pdeq_putr(worklist, block);
1807 mark_Block_block_visited(block);
1808 for (i = 0; i < n_cfgpred; ++i) {
1809 ir_node *pred_block = get_Block_cfgpred_block(block, i);
1810 block_costs_t *pred_info = get_irn_link(pred_block);
1812 /* ignore backedges */
1813 if (pred_info->dfs_num > info->dfs_num)
1816 if (info->costs > best_costs) {
1817 best_costs = info->costs;
1818 best_pred = pred_block;
1822 } while(block != NULL && !Block_block_visited(block));
1824 /* now put all nodes in the worklist in our final order */
1825 while (!pdeq_empty(worklist)) {
1826 ir_node *pblock = pdeq_getr(worklist);
1827 assert(order_p < n_blocks);
1828 order[order_p++] = pblock;
1831 assert(order_p == n_blocks);
1834 ir_free_resources(irg, IR_RESOURCE_BLOCK_VISITED);
1836 DEL_ARR_F(blocklist);
1838 obstack_free(&obst, NULL);
1839 obstack_init(&obst);
1841 block_order = order;
1842 n_block_order = n_blocks;
1846 * Run the register allocator for the current register class.
1848 static void be_straight_alloc_cls(void)
1852 lv = be_assure_liveness(birg);
1853 be_liveness_assure_sets(lv);
1854 be_liveness_assure_chk(lv);
1856 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
1858 DB((dbg, LEVEL_2, "=== Allocating registers of %s ===\n", cls->name));
1860 be_clear_links(irg);
1861 irg_block_walk_graph(irg, NULL, analyze_block, NULL);
1862 combine_congruence_classes();
1864 for (i = 0; i < n_block_order; ++i) {
1865 ir_node *block = block_order[i];
1866 allocate_coalesce_block(block, NULL);
1869 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1872 static void dump(int mask, ir_graph *irg, const char *suffix,
1873 void (*dumper)(ir_graph *, const char *))
1875 if(birg->main_env->options->dump_flags & mask)
1876 be_dump(irg, suffix, dumper);
1880 * Run the spiller on the current graph.
1882 static void spill(void)
1884 /* make sure all nodes show their real register pressure */
1885 BE_TIMER_PUSH(t_ra_constr);
1886 be_pre_spill_prepare_constr(birg, cls);
1887 BE_TIMER_POP(t_ra_constr);
1889 dump(DUMP_RA, irg, "-spillprepare", dump_ir_block_graph_sched);
1892 BE_TIMER_PUSH(t_ra_spill);
1893 be_do_spill(birg, cls);
1894 BE_TIMER_POP(t_ra_spill);
1896 BE_TIMER_PUSH(t_ra_spill_apply);
1897 check_for_memory_operands(irg);
1898 BE_TIMER_POP(t_ra_spill_apply);
1900 dump(DUMP_RA, irg, "-spill", dump_ir_block_graph_sched);
1904 * The straight register allocator for a whole procedure.
1906 static void be_straight_alloc(be_irg_t *new_birg)
1908 const arch_env_t *arch_env = new_birg->main_env->arch_env;
1909 int n_cls = arch_env_get_n_reg_class(arch_env);
1912 obstack_init(&obst);
1915 irg = be_get_birg_irg(birg);
1916 execfreqs = birg->exec_freq;
1918 /* determine a good coloring order */
1919 determine_block_order();
1921 for (c = 0; c < n_cls; ++c) {
1922 cls = arch_env_get_reg_class(arch_env, c);
1923 default_cls_req = NULL;
1924 if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
1927 stat_ev_ctx_push_str("regcls", cls->name);
1929 n_regs = arch_register_class_n_regs(cls);
1930 normal_regs = rbitset_malloc(n_regs);
1931 be_abi_set_non_ignore_regs(birg->abi, cls, normal_regs);
1935 /* verify schedule and register pressure */
1936 BE_TIMER_PUSH(t_verify);
1937 if (birg->main_env->options->vrfy_option == BE_VRFY_WARN) {
1938 be_verify_schedule(birg);
1939 be_verify_register_pressure(birg, cls, irg);
1940 } else if (birg->main_env->options->vrfy_option == BE_VRFY_ASSERT) {
1941 assert(be_verify_schedule(birg) && "Schedule verification failed");
1942 assert(be_verify_register_pressure(birg, cls, irg)
1943 && "Register pressure verification failed");
1945 BE_TIMER_POP(t_verify);
1947 BE_TIMER_PUSH(t_ra_color);
1948 be_straight_alloc_cls();
1949 BE_TIMER_POP(t_ra_color);
1951 /* we most probably constructed new Phis so liveness info is invalid
1953 /* TODO: test liveness_introduce */
1954 be_liveness_invalidate(lv);
1957 stat_ev_ctx_pop("regcls");
1960 BE_TIMER_PUSH(t_ra_spill_apply);
1961 be_abi_fix_stack_nodes(birg->abi);
1962 BE_TIMER_POP(t_ra_spill_apply);
1964 BE_TIMER_PUSH(t_verify);
1965 if (birg->main_env->options->vrfy_option == BE_VRFY_WARN) {
1966 be_verify_register_allocation(birg);
1967 } else if (birg->main_env->options->vrfy_option == BE_VRFY_ASSERT) {
1968 assert(be_verify_register_allocation(birg)
1969 && "Register allocation invalid");
1971 BE_TIMER_POP(t_verify);
1973 obstack_free(&obst, NULL);
1977 * Initializes this module.
1979 void be_init_straight_alloc(void)
1981 static be_ra_t be_ra_straight = {
1985 FIRM_DBG_REGISTER(dbg, "firm.be.straightalloc");
1987 be_register_allocator("straight", &be_ra_straight);
1990 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_straight_alloc);