2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Preference Guided Register Assignment
23 * @author Matthias Braun
27 * The idea is to allocate registers in 2 passes:
28 * 1. A first pass to determine "preferred" registers for live-ranges. This
29 * calculates for each register and each live-range a value indicating
30 * the usefulness. (You can roughly think of the value as the negative
31 * costs needed for copies when the value is in the specific registers...)
33 * 2. Walk blocks and assigns registers in a greedy fashion. Preferring
34 * registers with high preferences. When register constraints are not met,
35 * add copies and split live-ranges.
38 * - make use of free registers in the permute_values code
51 #include "iredges_t.h"
52 #include "irgraph_t.h"
60 #include "raw_bitset.h"
61 #include "unionfind.h"
63 #include "hungarian.h"
66 #include "bechordal_t.h"
75 #include "bespillutil.h"
80 #define USE_FACTOR 1.0f
81 #define DEF_FACTOR 1.0f
82 #define NEIGHBOR_FACTOR 0.2f
83 #define AFF_SHOULD_BE_SAME 0.5f
85 #define SPLIT_DELTA 1.0f
86 #define MAX_OPTIMISTIC_SPLIT_RECURSION 0
88 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
90 static struct obstack obst;
92 static const arch_register_class_t *cls;
94 static const ir_exec_freq *execfreqs;
95 static unsigned n_regs;
96 static unsigned *normal_regs;
97 static int *congruence_classes;
98 static ir_node **block_order;
99 static size_t n_block_order;
100 static int create_preferences = true;
101 static int create_congruence_classes = true;
102 static int propagate_phi_registers = true;
104 static const lc_opt_table_entry_t options[] = {
105 LC_OPT_ENT_BOOL("prefs", "use preference based coloring", &create_preferences),
106 LC_OPT_ENT_BOOL("congruences", "create congruence classes", &create_congruence_classes),
107 LC_OPT_ENT_BOOL("prop_phi", "propagate phi registers", &propagate_phi_registers),
111 /** currently active assignments (while processing a basic block)
112 * maps registers to values(their current copies) */
113 static ir_node **assignments;
116 * allocation information: last_uses, register preferences
117 * the information is per firm-node.
119 struct allocation_info_t {
120 unsigned last_uses[2]; /**< bitset indicating last uses (input pos) */
121 ir_node *current_value; /**< copy of the value that should be used */
122 ir_node *original_value; /**< for copies point to original value */
123 float prefs[0]; /**< register preferences */
125 typedef struct allocation_info_t allocation_info_t;
127 /** helper datastructure used when sorting register preferences */
132 typedef struct reg_pref_t reg_pref_t;
134 /** per basic-block information */
135 struct block_info_t {
136 bool processed; /**< indicate whether block is processed */
137 ir_node *assignments[0]; /**< register assignments at end of block */
139 typedef struct block_info_t block_info_t;
142 * Get the allocation info for a node.
143 * The info is allocated on the first visit of a node.
145 static allocation_info_t *get_allocation_info(ir_node *node)
147 allocation_info_t *info = (allocation_info_t*)get_irn_link(node);
149 info = OALLOCFZ(&obst, allocation_info_t, prefs, n_regs);
150 info->current_value = node;
151 info->original_value = node;
152 set_irn_link(node, info);
158 static allocation_info_t *try_get_allocation_info(const ir_node *node)
160 return (allocation_info_t*) get_irn_link(node);
164 * Get allocation information for a basic block
166 static block_info_t *get_block_info(ir_node *block)
168 block_info_t *info = (block_info_t*)get_irn_link(block);
170 assert(is_Block(block));
172 info = OALLOCFZ(&obst, block_info_t, assignments, n_regs);
173 set_irn_link(block, info);
180 * Link the allocation info of a node to a copy.
181 * Afterwards, both nodes uses the same allocation info.
182 * Copy must not have an allocation info assigned yet.
184 * @param copy the node that gets the allocation info assigned
185 * @param value the original node
187 static void mark_as_copy_of(ir_node *copy, ir_node *value)
190 allocation_info_t *info = get_allocation_info(value);
191 allocation_info_t *copy_info = get_allocation_info(copy);
193 /* find original value */
194 original = info->original_value;
195 if (original != value) {
196 info = get_allocation_info(original);
199 assert(info->original_value == original);
200 info->current_value = copy;
202 /* the copy should not be linked to something else yet */
203 assert(copy_info->original_value == copy);
204 copy_info->original_value = original;
206 /* copy over allocation preferences */
207 memcpy(copy_info->prefs, info->prefs, n_regs * sizeof(copy_info->prefs[0]));
211 * Calculate the penalties for every register on a node and its live neighbors.
213 * @param live_nodes the set of live nodes at the current position, may be NULL
214 * @param penalty the penalty to subtract from
215 * @param limited a raw bitset containing the limited set for the node
216 * @param node the node
218 static void give_penalties_for_limits(const ir_nodeset_t *live_nodes,
219 float penalty, const unsigned* limited,
222 ir_nodeset_iterator_t iter;
225 allocation_info_t *info = get_allocation_info(node);
228 /* give penalty for all forbidden regs */
229 for (r = 0; r < n_regs; ++r) {
230 if (rbitset_is_set(limited, r))
233 info->prefs[r] -= penalty;
236 /* all other live values should get a penalty for allowed regs */
237 if (live_nodes == NULL)
240 penalty *= NEIGHBOR_FACTOR;
241 n_allowed = rbitset_popcount(limited, n_regs);
243 /* only create a very weak penalty if multiple regs are allowed */
244 penalty = (penalty * 0.8f) / n_allowed;
246 foreach_ir_nodeset(live_nodes, neighbor, iter) {
247 allocation_info_t *neighbor_info;
249 /* TODO: if op is used on multiple inputs we might not do a
251 if (neighbor == node)
254 neighbor_info = get_allocation_info(neighbor);
255 for (r = 0; r < n_regs; ++r) {
256 if (!rbitset_is_set(limited, r))
259 neighbor_info->prefs[r] -= penalty;
265 * Calculate the preferences of a definition for the current register class.
266 * If the definition uses a limited set of registers, reduce the preferences
267 * for the limited register on the node and its neighbors.
269 * @param live_nodes the set of live nodes at the current node
270 * @param weight the weight
271 * @param node the current node
273 static void check_defs(const ir_nodeset_t *live_nodes, float weight,
276 const arch_register_req_t *req = arch_get_irn_register_req(node);
277 if (req->type & arch_register_req_type_limited) {
278 const unsigned *limited = req->limited;
279 float penalty = weight * DEF_FACTOR;
280 give_penalties_for_limits(live_nodes, penalty, limited, node);
283 if (req->type & arch_register_req_type_should_be_same) {
284 ir_node *insn = skip_Proj(node);
285 allocation_info_t *info = get_allocation_info(node);
286 int arity = get_irn_arity(insn);
289 float factor = 1.0f / rbitset_popcount(&req->other_same, arity);
290 for (i = 0; i < arity; ++i) {
293 allocation_info_t *op_info;
295 if (!rbitset_is_set(&req->other_same, i))
298 op = get_irn_n(insn, i);
300 /* if we the value at the should_be_same input doesn't die at the
301 * node, then it is no use to propagate the constraints (since a
302 * copy will emerge anyway) */
303 if (ir_nodeset_contains(live_nodes, op))
306 op_info = get_allocation_info(op);
307 for (r = 0; r < n_regs; ++r) {
308 op_info->prefs[r] += info->prefs[r] * factor;
315 * Walker: Runs an a block calculates the preferences for any
316 * node and every register from the considered register class.
318 static void analyze_block(ir_node *block, void *data)
320 float weight = (float)get_block_execfreq(execfreqs, block);
321 ir_nodeset_t live_nodes;
325 ir_nodeset_init(&live_nodes);
326 be_liveness_end_of_block(lv, cls, block, &live_nodes);
328 sched_foreach_reverse(block, node) {
329 allocation_info_t *info;
336 if (create_preferences) {
338 be_foreach_definition(node, cls, value,
339 check_defs(&live_nodes, weight, value);
344 arity = get_irn_arity(node);
346 /* the allocation info node currently only uses 1 unsigned value
347 to mark last used inputs. So we will fail for a node with more than
349 if (arity >= (int) sizeof(info->last_uses) * 8) {
350 panic("Node with more than %d inputs not supported yet",
351 (int) sizeof(info->last_uses) * 8);
354 info = get_allocation_info(node);
355 for (i = 0; i < arity; ++i) {
356 ir_node *op = get_irn_n(node, i);
357 const arch_register_req_t *req = arch_get_irn_register_req(op);
361 /* last usage of a value? */
362 if (!ir_nodeset_contains(&live_nodes, op)) {
363 rbitset_set(info->last_uses, i);
367 be_liveness_transfer(cls, node, &live_nodes);
369 if (create_preferences) {
370 /* update weights based on usage constraints */
371 for (i = 0; i < arity; ++i) {
372 const arch_register_req_t *req;
373 const unsigned *limited;
374 ir_node *op = get_irn_n(node, i);
376 if (!arch_irn_consider_in_reg_alloc(cls, op))
379 req = arch_get_irn_register_req_in(node, i);
380 if (!(req->type & arch_register_req_type_limited))
383 limited = req->limited;
384 give_penalties_for_limits(&live_nodes, weight * USE_FACTOR,
390 ir_nodeset_destroy(&live_nodes);
393 static void congruence_def(ir_nodeset_t *live_nodes, const ir_node *node)
395 const arch_register_req_t *req = arch_get_irn_register_req(node);
397 /* should be same constraint? */
398 if (req->type & arch_register_req_type_should_be_same) {
399 const ir_node *insn = skip_Proj_const(node);
400 int arity = get_irn_arity(insn);
402 unsigned node_idx = get_irn_idx(node);
403 node_idx = uf_find(congruence_classes, node_idx);
405 for (i = 0; i < arity; ++i) {
409 ir_nodeset_iterator_t iter;
410 bool interferes = false;
412 if (!rbitset_is_set(&req->other_same, i))
415 op = get_irn_n(insn, i);
416 op_idx = get_irn_idx(op);
417 op_idx = uf_find(congruence_classes, op_idx);
419 /* do we interfere with the value */
420 foreach_ir_nodeset(live_nodes, live, iter) {
421 int lv_idx = get_irn_idx(live);
422 lv_idx = uf_find(congruence_classes, lv_idx);
423 if (lv_idx == op_idx) {
428 /* don't put in same affinity class if we interfere */
432 node_idx = uf_union(congruence_classes, node_idx, op_idx);
433 DB((dbg, LEVEL_3, "Merge %+F and %+F congruence classes\n",
435 /* one should_be_same is enough... */
441 static void create_congruence_class(ir_node *block, void *data)
443 ir_nodeset_t live_nodes;
447 ir_nodeset_init(&live_nodes);
448 be_liveness_end_of_block(lv, cls, block, &live_nodes);
450 /* check should be same constraints */
451 sched_foreach_reverse(block, node) {
456 be_foreach_definition(node, cls, value,
457 congruence_def(&live_nodes, value);
459 be_liveness_transfer(cls, node, &live_nodes);
462 /* check phi congruence classes */
463 sched_foreach_reverse_from(node, node) {
467 assert(is_Phi(node));
469 if (!arch_irn_consider_in_reg_alloc(cls, node))
472 node_idx = get_irn_idx(node);
473 node_idx = uf_find(congruence_classes, node_idx);
475 arity = get_irn_arity(node);
476 for (i = 0; i < arity; ++i) {
477 bool interferes = false;
478 ir_nodeset_iterator_t iter;
483 allocation_info_t *head_info;
484 allocation_info_t *other_info;
485 ir_node *op = get_Phi_pred(node, i);
486 int op_idx = get_irn_idx(op);
487 op_idx = uf_find(congruence_classes, op_idx);
489 /* do we interfere with the value */
490 foreach_ir_nodeset(&live_nodes, live, iter) {
491 int lv_idx = get_irn_idx(live);
492 lv_idx = uf_find(congruence_classes, lv_idx);
493 if (lv_idx == op_idx) {
498 /* don't put in same affinity class if we interfere */
501 /* any other phi has the same input? */
502 sched_foreach(block, phi) {
507 if (!arch_irn_consider_in_reg_alloc(cls, phi))
509 oop = get_Phi_pred(phi, i);
512 oop_idx = get_irn_idx(oop);
513 oop_idx = uf_find(congruence_classes, oop_idx);
514 if (oop_idx == op_idx) {
522 /* merge the 2 congruence classes and sum up their preferences */
523 old_node_idx = node_idx;
524 node_idx = uf_union(congruence_classes, node_idx, op_idx);
525 DB((dbg, LEVEL_3, "Merge %+F and %+F congruence classes\n",
528 old_node_idx = node_idx == old_node_idx ? op_idx : old_node_idx;
529 head_info = get_allocation_info(get_idx_irn(irg, node_idx));
530 other_info = get_allocation_info(get_idx_irn(irg, old_node_idx));
531 for (r = 0; r < n_regs; ++r) {
532 head_info->prefs[r] += other_info->prefs[r];
538 static void set_congruence_prefs(ir_node *node, void *data)
540 allocation_info_t *info;
541 allocation_info_t *head_info;
542 unsigned node_idx = get_irn_idx(node);
543 unsigned node_set = uf_find(congruence_classes, node_idx);
547 /* head of congruence class or not in any class */
548 if (node_set == node_idx)
551 if (!arch_irn_consider_in_reg_alloc(cls, node))
554 head_info = get_allocation_info(get_idx_irn(irg, node_set));
555 info = get_allocation_info(node);
557 memcpy(info->prefs, head_info->prefs, n_regs * sizeof(info->prefs[0]));
560 static void combine_congruence_classes(void)
562 size_t n = get_irg_last_idx(irg);
563 congruence_classes = XMALLOCN(int, n);
564 uf_init(congruence_classes, n);
566 /* create congruence classes */
567 irg_block_walk_graph(irg, create_congruence_class, NULL, NULL);
568 /* merge preferences */
569 irg_walk_graph(irg, set_congruence_prefs, NULL, NULL);
570 free(congruence_classes);
576 * Assign register reg to the given node.
578 * @param node the node
579 * @param reg the register
581 static void use_reg(ir_node *node, const arch_register_t *reg)
583 unsigned r = arch_register_get_index(reg);
584 assignments[r] = node;
585 arch_set_irn_register(node, reg);
588 static void free_reg_of_value(ir_node *node)
590 const arch_register_t *reg;
593 if (!arch_irn_consider_in_reg_alloc(cls, node))
596 reg = arch_get_irn_register(node);
597 r = arch_register_get_index(reg);
598 /* assignment->value may be NULL if a value is used at 2 inputs
599 so it gets freed twice. */
600 assert(assignments[r] == node || assignments[r] == NULL);
601 assignments[r] = NULL;
605 * Compare two register preferences in decreasing order.
607 static int compare_reg_pref(const void *e1, const void *e2)
609 const reg_pref_t *rp1 = (const reg_pref_t*) e1;
610 const reg_pref_t *rp2 = (const reg_pref_t*) e2;
611 if (rp1->pref < rp2->pref)
613 if (rp1->pref > rp2->pref)
618 static void fill_sort_candidates(reg_pref_t *regprefs,
619 const allocation_info_t *info)
623 for (r = 0; r < n_regs; ++r) {
624 float pref = info->prefs[r];
626 regprefs[r].pref = pref;
628 /* TODO: use a stable sort here to avoid unnecessary register jumping */
629 qsort(regprefs, n_regs, sizeof(regprefs[0]), compare_reg_pref);
632 static bool try_optimistic_split(ir_node *to_split, ir_node *before,
633 float pref, float pref_delta,
634 unsigned *forbidden_regs, int recursion)
636 const arch_register_t *from_reg;
637 const arch_register_t *reg;
638 ir_node *original_insn;
644 allocation_info_t *info = get_allocation_info(to_split);
647 float split_threshold;
651 /* stupid hack: don't optimisticallt split don't spill nodes...
652 * (so we don't split away the values produced because of
653 * must_be_different constraints) */
654 original_insn = skip_Proj(info->original_value);
655 if (arch_get_irn_flags(original_insn) & arch_irn_flags_dont_spill)
658 from_reg = arch_get_irn_register(to_split);
659 from_r = arch_register_get_index(from_reg);
660 block = get_nodes_block(before);
661 split_threshold = (float)get_block_execfreq(execfreqs, block) * SPLIT_DELTA;
663 if (pref_delta < split_threshold*0.5)
666 /* find the best free position where we could move to */
667 prefs = ALLOCAN(reg_pref_t, n_regs);
668 fill_sort_candidates(prefs, info);
669 for (i = 0; i < n_regs; ++i) {
673 bool old_source_state;
675 /* we need a normal register which is not an output register
676 an different from the current register of to_split */
678 if (!rbitset_is_set(normal_regs, r))
680 if (rbitset_is_set(forbidden_regs, r))
685 /* is the split worth it? */
686 delta = pref_delta + prefs[i].pref;
687 if (delta < split_threshold) {
688 DB((dbg, LEVEL_3, "Not doing optimistical split of %+F (depth %d), win %f too low\n",
689 to_split, recursion, delta));
693 /* if the register is free then we can do the split */
694 if (assignments[r] == NULL)
697 /* otherwise we might try recursively calling optimistic_split */
698 if (recursion+1 > MAX_OPTIMISTIC_SPLIT_RECURSION)
701 apref = prefs[i].pref;
702 apref_delta = i+1 < n_regs ? apref - prefs[i+1].pref : 0;
703 apref_delta += pref_delta - split_threshold;
705 /* our source register isn't a useful destination for recursive
707 old_source_state = rbitset_is_set(forbidden_regs, from_r);
708 rbitset_set(forbidden_regs, from_r);
709 /* try recursive split */
710 res = try_optimistic_split(assignments[r], before, apref,
711 apref_delta, forbidden_regs, recursion+1);
712 /* restore our destination */
713 if (old_source_state) {
714 rbitset_set(forbidden_regs, from_r);
716 rbitset_clear(forbidden_regs, from_r);
725 reg = arch_register_for_index(cls, r);
726 copy = be_new_Copy(block, to_split);
727 mark_as_copy_of(copy, to_split);
728 /* hacky, but correct here */
729 if (assignments[arch_register_get_index(from_reg)] == to_split)
730 free_reg_of_value(to_split);
732 sched_add_before(before, copy);
735 "Optimistic live-range split %+F move %+F(%s) -> %s before %+F (win %f, depth %d)\n",
736 copy, to_split, from_reg->name, reg->name, before, delta, recursion));
741 * Determine and assign a register for node @p node
743 static void assign_reg(const ir_node *block, ir_node *node,
744 unsigned *forbidden_regs)
746 const arch_register_t *final_reg;
747 allocation_info_t *info;
748 const arch_register_req_t *req;
749 reg_pref_t *reg_prefs;
752 const unsigned *allowed_regs;
753 unsigned final_reg_index = 0;
755 assert(!is_Phi(node));
756 /* preassigned register? */
757 final_reg = arch_get_irn_register(node);
758 if (final_reg != NULL) {
759 DB((dbg, LEVEL_2, "Preassignment %+F -> %s\n", node, final_reg->name));
760 use_reg(node, final_reg);
764 req = arch_get_irn_register_req(node);
765 /* ignore reqs must be preassigned */
766 assert (! (req->type & arch_register_req_type_ignore));
768 /* give should_be_same boni */
769 info = get_allocation_info(node);
770 in_node = skip_Proj(node);
771 if (req->type & arch_register_req_type_should_be_same) {
772 float weight = (float)get_block_execfreq(execfreqs, block);
773 int arity = get_irn_arity(in_node);
776 assert(arity <= (int) sizeof(req->other_same) * 8);
777 for (i = 0; i < arity; ++i) {
779 const arch_register_t *reg;
781 if (!rbitset_is_set(&req->other_same, i))
784 in = get_irn_n(in_node, i);
785 reg = arch_get_irn_register(in);
787 reg_index = arch_register_get_index(reg);
789 /* if the value didn't die here then we should not propagate the
790 * should_be_same info */
791 if (assignments[reg_index] == in)
794 info->prefs[reg_index] += weight * AFF_SHOULD_BE_SAME;
798 /* create list of register candidates and sort by their preference */
799 DB((dbg, LEVEL_2, "Candidates for %+F:", node));
800 reg_prefs = ALLOCAN(reg_pref_t, n_regs);
801 fill_sort_candidates(reg_prefs, info);
802 for (r = 0; r < n_regs; ++r) {
803 unsigned num = reg_prefs[r].num;
804 const arch_register_t *reg;
806 if (!rbitset_is_set(normal_regs, num))
808 reg = arch_register_for_index(cls, num);
809 DB((dbg, LEVEL_2, " %s(%f)", reg->name, reg_prefs[r].pref));
811 DB((dbg, LEVEL_2, "\n"));
813 allowed_regs = normal_regs;
814 if (req->type & arch_register_req_type_limited) {
815 allowed_regs = req->limited;
818 for (r = 0; r < n_regs; ++r) {
823 final_reg_index = reg_prefs[r].num;
824 if (!rbitset_is_set(allowed_regs, final_reg_index))
826 /* alignment constraint? */
827 if (req->width > 1 && (req->type & arch_register_req_type_aligned)
828 && (final_reg_index % req->width) != 0)
831 if (assignments[final_reg_index] == NULL)
833 pref = reg_prefs[r].pref;
834 delta = r+1 < n_regs ? pref - reg_prefs[r+1].pref : 0;
835 before = skip_Proj(node);
836 res = try_optimistic_split(assignments[final_reg_index], before,
837 pref, delta, forbidden_regs, 0);
842 /* the common reason to hit this panic is when 1 of your nodes is not
843 * register pressure faithful */
844 panic("No register left for %+F\n", node);
847 final_reg = arch_register_for_index(cls, final_reg_index);
848 DB((dbg, LEVEL_2, "Assign %+F -> %s\n", node, final_reg->name));
849 use_reg(node, final_reg);
853 * Add an permutation in front of a node and change the assignments
854 * due to this permutation.
856 * To understand this imagine a permutation like this:
866 * First we count how many destinations a single value has. At the same time
867 * we can be sure that each destination register has at most 1 source register
868 * (it can have 0 which means we don't care what value is in it).
869 * We ignore all fulfilled permuations (like 7->7)
870 * In a first pass we create as much copy instructions as possible as they
871 * are generally cheaper than exchanges. We do this by counting into how many
872 * destinations a register has to be copied (in the example it's 2 for register
873 * 3, or 1 for the registers 1,2,4 and 7).
874 * We can then create a copy into every destination register when the usecount
875 * of that register is 0 (= noone else needs the value in the register).
877 * After this step we should only have cycles left. We implement a cyclic
878 * permutation of n registers with n-1 transpositions.
880 * @param live_nodes the set of live nodes, updated due to live range split
881 * @param before the node before we add the permutation
882 * @param permutation the permutation array indices are the destination
883 * registers, the values in the array are the source
886 static void permute_values(ir_nodeset_t *live_nodes, ir_node *before,
887 unsigned *permutation)
889 unsigned *n_used = ALLOCANZ(unsigned, n_regs);
893 /* determine how often each source register needs to be read */
894 for (r = 0; r < n_regs; ++r) {
895 unsigned old_reg = permutation[r];
898 value = assignments[old_reg];
900 /* nothing to do here, reg is not live. Mark it as fixpoint
901 * so we ignore it in the next steps */
909 block = get_nodes_block(before);
911 /* step1: create copies where immediately possible */
912 for (r = 0; r < n_regs; /* empty */) {
915 const arch_register_t *reg;
916 unsigned old_r = permutation[r];
918 /* - no need to do anything for fixed points.
919 - we can't copy if the value in the dest reg is still needed */
920 if (old_r == r || n_used[r] > 0) {
926 src = assignments[old_r];
927 copy = be_new_Copy(block, src);
928 sched_add_before(before, copy);
929 reg = arch_register_for_index(cls, r);
930 DB((dbg, LEVEL_2, "Copy %+F (from %+F, before %+F) -> %s\n",
931 copy, src, before, reg->name));
932 mark_as_copy_of(copy, src);
935 if (live_nodes != NULL) {
936 ir_nodeset_insert(live_nodes, copy);
939 /* old register has 1 user less, permutation is resolved */
940 assert(arch_register_get_index(arch_get_irn_register(src)) == old_r);
943 assert(n_used[old_r] > 0);
945 if (n_used[old_r] == 0) {
946 if (live_nodes != NULL) {
947 ir_nodeset_remove(live_nodes, src);
949 free_reg_of_value(src);
952 /* advance or jump back (if this copy enabled another copy) */
953 if (old_r < r && n_used[old_r] == 0) {
960 /* at this point we only have "cycles" left which we have to resolve with
962 * TODO: if we have free registers left, then we should really use copy
963 * instructions for any cycle longer than 2 registers...
964 * (this is probably architecture dependent, there might be archs where
965 * copies are preferable even for 2-cycles) */
967 /* create perms with the rest */
968 for (r = 0; r < n_regs; /* empty */) {
969 const arch_register_t *reg;
970 unsigned old_r = permutation[r];
982 /* we shouldn't have copies from 1 value to multiple destinations left*/
983 assert(n_used[old_r] == 1);
985 /* exchange old_r and r2; after that old_r is a fixed point */
986 r2 = permutation[old_r];
988 in[0] = assignments[r2];
989 in[1] = assignments[old_r];
990 perm = be_new_Perm(cls, block, 2, in);
991 sched_add_before(before, perm);
992 DB((dbg, LEVEL_2, "Perm %+F (perm %+F,%+F, before %+F)\n",
993 perm, in[0], in[1], before));
995 proj0 = new_r_Proj(perm, get_irn_mode(in[0]), 0);
996 mark_as_copy_of(proj0, in[0]);
997 reg = arch_register_for_index(cls, old_r);
1000 proj1 = new_r_Proj(perm, get_irn_mode(in[1]), 1);
1001 mark_as_copy_of(proj1, in[1]);
1002 reg = arch_register_for_index(cls, r2);
1003 use_reg(proj1, reg);
1005 /* 1 value is now in the correct register */
1006 permutation[old_r] = old_r;
1007 /* the source of r changed to r2 */
1008 permutation[r] = r2;
1010 /* if we have reached a fixpoint update data structures */
1011 if (live_nodes != NULL) {
1012 ir_nodeset_remove(live_nodes, in[0]);
1013 ir_nodeset_remove(live_nodes, in[1]);
1014 ir_nodeset_remove(live_nodes, proj0);
1015 ir_nodeset_insert(live_nodes, proj1);
1019 #ifdef DEBUG_libfirm
1020 /* now we should only have fixpoints left */
1021 for (r = 0; r < n_regs; ++r) {
1022 assert(permutation[r] == r);
1028 * Free regs for values last used.
1030 * @param live_nodes set of live nodes, will be updated
1031 * @param node the node to consider
1033 static void free_last_uses(ir_nodeset_t *live_nodes, ir_node *node)
1035 allocation_info_t *info = get_allocation_info(node);
1036 const unsigned *last_uses = info->last_uses;
1037 int arity = get_irn_arity(node);
1040 for (i = 0; i < arity; ++i) {
1043 /* check if one operand is the last use */
1044 if (!rbitset_is_set(last_uses, i))
1047 op = get_irn_n(node, i);
1048 free_reg_of_value(op);
1049 ir_nodeset_remove(live_nodes, op);
1054 * change inputs of a node to the current value (copies/perms)
1056 static void rewire_inputs(ir_node *node)
1059 int arity = get_irn_arity(node);
1061 for (i = 0; i < arity; ++i) {
1062 ir_node *op = get_irn_n(node, i);
1063 allocation_info_t *info = try_get_allocation_info(op);
1068 info = get_allocation_info(info->original_value);
1069 if (info->current_value != op) {
1070 set_irn_n(node, i, info->current_value);
1076 * Create a bitset of registers occupied with value living through an
1079 static void determine_live_through_regs(unsigned *bitset, ir_node *node)
1081 const allocation_info_t *info = get_allocation_info(node);
1086 /* mark all used registers as potentially live-through */
1087 for (r = 0; r < n_regs; ++r) {
1088 if (assignments[r] == NULL)
1090 if (!rbitset_is_set(normal_regs, r))
1093 rbitset_set(bitset, r);
1096 /* remove registers of value dying at the instruction */
1097 arity = get_irn_arity(node);
1098 for (i = 0; i < arity; ++i) {
1100 const arch_register_t *reg;
1102 if (!rbitset_is_set(info->last_uses, i))
1105 op = get_irn_n(node, i);
1106 reg = arch_get_irn_register(op);
1107 rbitset_clear(bitset, arch_register_get_index(reg));
1111 static void solve_lpp(ir_nodeset_t *live_nodes, ir_node *node,
1112 unsigned *forbidden_regs, unsigned *live_through_regs)
1114 unsigned *forbidden_edges = rbitset_malloc(n_regs * n_regs);
1115 int *lpp_vars = XMALLOCNZ(int, n_regs*n_regs);
1116 int arity = get_irn_arity(node);
1121 lpp_t *lpp = lpp_new("prefalloc", lpp_minimize);
1122 //lpp_set_time_limit(lpp, 20);
1123 lpp_set_log(lpp, stdout);
1125 /** mark some edges as forbidden */
1126 for (i = 0; i < arity; ++i) {
1127 ir_node *op = get_irn_n(node, i);
1128 const arch_register_t *reg;
1129 const arch_register_req_t *req;
1130 const unsigned *limited;
1131 unsigned current_reg;
1133 if (!arch_irn_consider_in_reg_alloc(cls, op))
1136 req = arch_get_irn_register_req_in(node, i);
1137 if (!(req->type & arch_register_req_type_limited))
1140 limited = req->limited;
1141 reg = arch_get_irn_register(op);
1142 current_reg = arch_register_get_index(reg);
1143 for (r = 0; r < n_regs; ++r) {
1144 if (rbitset_is_set(limited, r))
1147 rbitset_set(forbidden_edges, current_reg*n_regs + r);
1151 /* add all combinations, except for not allowed ones */
1152 for (l = 0; l < n_regs; ++l) {
1153 if (!rbitset_is_set(normal_regs, l)) {
1155 snprintf(name, sizeof(name), "%u_to_%u", l, l);
1156 lpp_vars[l*n_regs+l] = lpp_add_var(lpp, name, lpp_binary, 1);
1160 for (r = 0; r < n_regs; ++r) {
1161 if (!rbitset_is_set(normal_regs, r))
1163 if (rbitset_is_set(forbidden_edges, l*n_regs + r))
1165 /* livethrough values may not use constrained output registers */
1166 if (rbitset_is_set(live_through_regs, l)
1167 && rbitset_is_set(forbidden_regs, r))
1171 snprintf(name, sizeof(name), "%u_to_%u", l, r);
1173 double costs = l==r ? 9 : 8;
1174 lpp_vars[l*n_regs+r]
1175 = lpp_add_var(lpp, name, lpp_binary, costs);
1176 assert(lpp_vars[l*n_regs+r] > 0);
1179 /* add constraints */
1180 for (l = 0; l < n_regs; ++l) {
1182 /* only 1 destination per register */
1184 for (r = 0; r < n_regs; ++r) {
1185 int var = lpp_vars[l*n_regs+r];
1188 if (constraint < 0) {
1190 snprintf(name, sizeof(name), "%u_to_dest", l);
1191 constraint = lpp_add_cst(lpp, name, lpp_equal, 1);
1193 lpp_set_factor_fast(lpp, constraint, var, 1);
1195 /* each destination used by at most 1 value */
1197 for (r = 0; r < n_regs; ++r) {
1198 int var = lpp_vars[r*n_regs+l];
1201 if (constraint < 0) {
1203 snprintf(name, sizeof(name), "one_to_%u", l);
1204 constraint = lpp_add_cst(lpp, name, lpp_less_equal, 1);
1206 lpp_set_factor_fast(lpp, constraint, var, 1);
1210 lpp_dump_plain(lpp, fopen("lppdump.txt", "w"));
1214 ir_graph *irg = get_irn_irg(node);
1215 be_options_t *options = be_get_irg_options(irg);
1216 unsigned *assignment;
1217 lpp_solve(lpp, options->ilp_server, options->ilp_solver);
1218 if (!lpp_is_sol_valid(lpp))
1219 panic("ilp solution not valid!");
1221 assignment = ALLOCAN(unsigned, n_regs);
1222 for (l = 0; l < n_regs; ++l) {
1223 unsigned dest_reg = (unsigned)-1;
1224 for (r = 0; r < n_regs; ++r) {
1225 int var = lpp_vars[l*n_regs+r];
1228 double val = lpp_get_var_sol(lpp, var);
1230 assert(dest_reg == (unsigned)-1);
1234 assert(dest_reg != (unsigned)-1);
1235 assignment[dest_reg] = l;
1238 fprintf(stderr, "Assignment: ");
1239 for (l = 0; l < n_regs; ++l) {
1240 fprintf(stderr, "%u ", assignment[l]);
1242 fprintf(stderr, "\n");
1244 permute_values(live_nodes, node, assignment);
1250 * Enforce constraints at a node by live range splits.
1252 * @param live_nodes the set of live nodes, might be changed
1253 * @param node the current node
1255 static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node,
1256 unsigned *forbidden_regs)
1258 int arity = get_irn_arity(node);
1260 hungarian_problem_t *bp;
1262 unsigned *assignment;
1265 /* construct a list of register occupied by live-through values */
1266 unsigned *live_through_regs = NULL;
1268 /* see if any use constraints are not met */
1269 bool double_width = false;
1271 for (i = 0; i < arity; ++i) {
1272 ir_node *op = get_irn_n(node, i);
1273 const arch_register_t *reg;
1274 const arch_register_req_t *req;
1275 const unsigned *limited;
1278 if (!arch_irn_consider_in_reg_alloc(cls, op))
1281 /* are there any limitations for the i'th operand? */
1282 req = arch_get_irn_register_req_in(node, i);
1284 double_width = true;
1285 if (!(req->type & arch_register_req_type_limited))
1288 limited = req->limited;
1289 reg = arch_get_irn_register(op);
1290 reg_index = arch_register_get_index(reg);
1291 if (!rbitset_is_set(limited, reg_index)) {
1292 /* found an assignment outside the limited set */
1298 /* is any of the live-throughs using a constrained output register? */
1299 be_foreach_definition(node, cls, value,
1300 if (req_->width > 1)
1301 double_width = true;
1302 if (! (req_->type & arch_register_req_type_limited))
1304 if (live_through_regs == NULL) {
1305 rbitset_alloca(live_through_regs, n_regs);
1306 determine_live_through_regs(live_through_regs, node);
1308 rbitset_or(forbidden_regs, req_->limited, n_regs);
1309 if (rbitsets_have_common(req_->limited, live_through_regs, n_regs))
1316 /* create these arrays if we haven't yet */
1317 if (live_through_regs == NULL) {
1318 rbitset_alloca(live_through_regs, n_regs);
1322 /* only the ILP variant can solve this yet */
1323 solve_lpp(live_nodes, node, forbidden_regs, live_through_regs);
1327 /* at this point we have to construct a bipartite matching problem to see
1328 * which values should go to which registers
1329 * Note: We're building the matrix in "reverse" - source registers are
1330 * right, destinations left because this will produce the solution
1331 * in the format required for permute_values.
1333 bp = hungarian_new(n_regs, n_regs, HUNGARIAN_MATCH_PERFECT);
1335 /* add all combinations, then remove not allowed ones */
1336 for (l = 0; l < n_regs; ++l) {
1337 if (!rbitset_is_set(normal_regs, l)) {
1338 hungarian_add(bp, l, l, 1);
1342 for (r = 0; r < n_regs; ++r) {
1343 if (!rbitset_is_set(normal_regs, r))
1345 /* livethrough values may not use constrainted output registers */
1346 if (rbitset_is_set(live_through_regs, l)
1347 && rbitset_is_set(forbidden_regs, r))
1350 hungarian_add(bp, r, l, l == r ? 9 : 8);
1354 for (i = 0; i < arity; ++i) {
1355 ir_node *op = get_irn_n(node, i);
1356 const arch_register_t *reg;
1357 const arch_register_req_t *req;
1358 const unsigned *limited;
1359 unsigned current_reg;
1361 if (!arch_irn_consider_in_reg_alloc(cls, op))
1364 req = arch_get_irn_register_req_in(node, i);
1365 if (!(req->type & arch_register_req_type_limited))
1368 limited = req->limited;
1369 reg = arch_get_irn_register(op);
1370 current_reg = arch_register_get_index(reg);
1371 for (r = 0; r < n_regs; ++r) {
1372 if (rbitset_is_set(limited, r))
1374 hungarian_remove(bp, r, current_reg);
1378 //hungarian_print_cost_matrix(bp, 1);
1379 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
1381 assignment = ALLOCAN(unsigned, n_regs);
1382 res = hungarian_solve(bp, assignment, NULL, 0);
1386 fprintf(stderr, "Swap result:");
1387 for (i = 0; i < (int) n_regs; ++i) {
1388 fprintf(stderr, " %d", assignment[i]);
1390 fprintf(stderr, "\n");
1395 permute_values(live_nodes, node, assignment);
1398 /** test whether a node @p n is a copy of the value of node @p of */
1399 static bool is_copy_of(ir_node *value, ir_node *test_value)
1401 allocation_info_t *test_info;
1402 allocation_info_t *info;
1404 if (value == test_value)
1407 info = get_allocation_info(value);
1408 test_info = get_allocation_info(test_value);
1409 return test_info->original_value == info->original_value;
1413 * find a value in the end-assignment of a basic block
1414 * @returns the index into the assignment array if found
1417 static int find_value_in_block_info(block_info_t *info, ir_node *value)
1420 ir_node **end_assignments = info->assignments;
1421 for (r = 0; r < n_regs; ++r) {
1422 ir_node *a_value = end_assignments[r];
1424 if (a_value == NULL)
1426 if (is_copy_of(a_value, value))
1434 * Create the necessary permutations at the end of a basic block to fullfill
1435 * the register assignment for phi-nodes in the next block
1437 static void add_phi_permutations(ir_node *block, int p)
1440 unsigned *permutation;
1441 ir_node **old_assignments;
1442 bool need_permutation;
1444 ir_node *pred = get_Block_cfgpred_block(block, p);
1446 block_info_t *pred_info = get_block_info(pred);
1448 /* predecessor not processed yet? nothing to do */
1449 if (!pred_info->processed)
1452 permutation = ALLOCAN(unsigned, n_regs);
1453 for (r = 0; r < n_regs; ++r) {
1457 /* check phi nodes */
1458 need_permutation = false;
1459 phi = sched_first(block);
1460 for ( ; is_Phi(phi); phi = sched_next(phi)) {
1461 const arch_register_t *reg;
1462 const arch_register_t *op_reg;
1467 if (!arch_irn_consider_in_reg_alloc(cls, phi))
1470 op = get_Phi_pred(phi, p);
1471 a = find_value_in_block_info(pred_info, op);
1474 reg = arch_get_irn_register(phi);
1475 regn = arch_register_get_index(reg);
1476 /* same register? nothing to do */
1480 op = pred_info->assignments[a];
1481 op_reg = arch_get_irn_register(op);
1482 /* virtual or joker registers are ok too */
1483 if ((op_reg->type & arch_register_type_joker)
1484 || (op_reg->type & arch_register_type_virtual))
1487 permutation[regn] = a;
1488 need_permutation = true;
1491 if (need_permutation) {
1492 /* permute values at end of predecessor */
1493 old_assignments = assignments;
1494 assignments = pred_info->assignments;
1495 permute_values(NULL, be_get_end_of_block_insertion_point(pred),
1497 assignments = old_assignments;
1500 /* change phi nodes to use the copied values */
1501 phi = sched_first(block);
1502 for ( ; is_Phi(phi); phi = sched_next(phi)) {
1506 if (!arch_irn_consider_in_reg_alloc(cls, phi))
1509 op = get_Phi_pred(phi, p);
1511 /* we have permuted all values into the correct registers so we can
1512 simply query which value occupies the phis register in the
1514 a = arch_register_get_index(arch_get_irn_register(phi));
1515 op = pred_info->assignments[a];
1516 set_Phi_pred(phi, p, op);
1521 * Set preferences for a phis register based on the registers used on the
1524 static void adapt_phi_prefs(ir_node *phi)
1527 int arity = get_irn_arity(phi);
1528 ir_node *block = get_nodes_block(phi);
1529 allocation_info_t *info = get_allocation_info(phi);
1531 for (i = 0; i < arity; ++i) {
1532 ir_node *op = get_irn_n(phi, i);
1533 const arch_register_t *reg = arch_get_irn_register(op);
1534 ir_node *pred_block;
1535 block_info_t *pred_block_info;
1541 /* we only give the bonus if the predecessor already has registers
1542 * assigned, otherwise we only see a dummy value
1543 * and any conclusions about its register are useless */
1544 pred_block = get_Block_cfgpred_block(block, i);
1545 pred_block_info = get_block_info(pred_block);
1546 if (!pred_block_info->processed)
1549 /* give bonus for already assigned register */
1550 weight = (float)get_block_execfreq(execfreqs, pred_block);
1551 r = arch_register_get_index(reg);
1552 info->prefs[r] += weight * AFF_PHI;
1557 * After a phi has been assigned a register propagate preference inputs
1558 * to the phi inputs.
1560 static void propagate_phi_register(ir_node *phi, unsigned assigned_r)
1563 ir_node *block = get_nodes_block(phi);
1564 int arity = get_irn_arity(phi);
1566 for (i = 0; i < arity; ++i) {
1567 ir_node *op = get_Phi_pred(phi, i);
1568 allocation_info_t *info = get_allocation_info(op);
1569 ir_node *pred_block = get_Block_cfgpred_block(block, i);
1572 = (float)get_block_execfreq(execfreqs, pred_block) * AFF_PHI;
1574 if (info->prefs[assigned_r] >= weight)
1577 /* promote the prefered register */
1578 for (r = 0; r < n_regs; ++r) {
1579 if (info->prefs[r] > -weight) {
1580 info->prefs[r] = -weight;
1583 info->prefs[assigned_r] = weight;
1586 propagate_phi_register(op, assigned_r);
1590 static void assign_phi_registers(ir_node *block)
1595 unsigned *assignment;
1597 hungarian_problem_t *bp;
1599 /* count phi nodes */
1600 sched_foreach(block, node) {
1603 if (!arch_irn_consider_in_reg_alloc(cls, node))
1611 /* build a bipartite matching problem for all phi nodes */
1612 bp = hungarian_new(n_phis, n_regs, HUNGARIAN_MATCH_PERFECT);
1614 sched_foreach(block, node) {
1617 allocation_info_t *info;
1620 if (!arch_irn_consider_in_reg_alloc(cls, node))
1623 /* give boni for predecessor colorings */
1624 adapt_phi_prefs(node);
1625 /* add stuff to bipartite problem */
1626 info = get_allocation_info(node);
1627 DB((dbg, LEVEL_3, "Prefs for %+F: ", node));
1628 for (r = 0; r < n_regs; ++r) {
1631 if (!rbitset_is_set(normal_regs, r))
1634 costs = info->prefs[r];
1635 costs = costs < 0 ? -logf(-costs+1) : logf(costs+1);
1638 hungarian_add(bp, n, r, (int)costs);
1639 DB((dbg, LEVEL_3, " %s(%f)", arch_register_for_index(cls, r)->name,
1642 DB((dbg, LEVEL_3, "\n"));
1646 //hungarian_print_cost_matrix(bp, 7);
1647 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
1649 assignment = ALLOCAN(unsigned, n_regs);
1650 res = hungarian_solve(bp, assignment, NULL, 0);
1655 sched_foreach(block, node) {
1657 const arch_register_t *reg;
1661 if (!arch_irn_consider_in_reg_alloc(cls, node))
1664 r = assignment[n++];
1665 assert(rbitset_is_set(normal_regs, r));
1666 reg = arch_register_for_index(cls, r);
1667 DB((dbg, LEVEL_2, "Assign %+F -> %s\n", node, reg->name));
1670 /* adapt preferences for phi inputs */
1671 if (propagate_phi_registers)
1672 propagate_phi_register(node, r);
1677 * Walker: assign registers to all nodes of a block that
1678 * need registers from the currently considered register class.
1680 static void allocate_coalesce_block(ir_node *block, void *data)
1683 ir_nodeset_t live_nodes;
1686 block_info_t *block_info;
1687 block_info_t **pred_block_infos;
1689 unsigned *forbidden_regs; /**< collects registers which must
1690 not be used for optimistic splits */
1693 DB((dbg, LEVEL_2, "* Block %+F\n", block));
1695 /* clear assignments */
1696 block_info = get_block_info(block);
1697 assignments = block_info->assignments;
1699 ir_nodeset_init(&live_nodes);
1701 /* gather regalloc infos of predecessor blocks */
1702 n_preds = get_Block_n_cfgpreds(block);
1703 pred_block_infos = ALLOCAN(block_info_t*, n_preds);
1704 for (i = 0; i < n_preds; ++i) {
1705 ir_node *pred = get_Block_cfgpred_block(block, i);
1706 block_info_t *pred_info = get_block_info(pred);
1707 pred_block_infos[i] = pred_info;
1710 phi_ins = ALLOCAN(ir_node*, n_preds);
1712 /* collect live-in nodes and preassigned values */
1713 be_lv_foreach(lv, block, be_lv_state_in, i) {
1714 bool need_phi = false;
1715 const arch_register_req_t *req;
1716 const arch_register_t *reg;
1719 node = be_lv_get_irn(lv, block, i);
1720 req = arch_get_irn_register_req(node);
1721 if (req->cls != cls)
1724 if (req->type & arch_register_req_type_ignore) {
1725 allocation_info_t *info = get_allocation_info(node);
1726 info->current_value = node;
1728 reg = arch_get_irn_register(node);
1729 assert(reg != NULL); /* ignore values must be preassigned */
1734 /* check all predecessors for this value, if it is not everywhere the
1735 same or unknown then we have to construct a phi
1736 (we collect the potential phi inputs here) */
1737 for (p = 0; p < n_preds; ++p) {
1738 block_info_t *pred_info = pred_block_infos[p];
1740 if (!pred_info->processed) {
1741 /* use node for now, it will get fixed later */
1745 int a = find_value_in_block_info(pred_info, node);
1747 /* must live out of predecessor */
1749 phi_ins[p] = pred_info->assignments[a];
1750 /* different value from last time? then we need a phi */
1751 if (p > 0 && phi_ins[p-1] != phi_ins[p]) {
1758 ir_mode *mode = get_irn_mode(node);
1759 ir_node *phi = be_new_Phi(block, n_preds, phi_ins, mode, cls);
1761 DB((dbg, LEVEL_3, "Create Phi %+F (for %+F) -", phi, node));
1762 #ifdef DEBUG_libfirm
1765 for (pi = 0; pi < n_preds; ++pi) {
1766 DB((dbg, LEVEL_3, " %+F", phi_ins[pi]));
1768 DB((dbg, LEVEL_3, "\n"));
1771 mark_as_copy_of(phi, node);
1772 sched_add_after(block, phi);
1776 allocation_info_t *info = get_allocation_info(node);
1777 info->current_value = phi_ins[0];
1779 /* Grab 1 of the inputs we constructed (might not be the same as
1780 * "node" as we could see the same copy of the value in all
1785 /* if the node already has a register assigned use it */
1786 reg = arch_get_irn_register(node);
1791 /* remember that this node is live at the beginning of the block */
1792 ir_nodeset_insert(&live_nodes, node);
1795 rbitset_alloca(forbidden_regs, n_regs);
1797 /* handle phis... */
1798 assign_phi_registers(block);
1800 /* all live-ins must have a register */
1801 #ifdef DEBUG_libfirm
1803 ir_nodeset_iterator_t iter;
1804 foreach_ir_nodeset(&live_nodes, node, iter) {
1805 const arch_register_t *reg = arch_get_irn_register(node);
1806 assert(reg != NULL);
1811 /* assign instructions in the block */
1812 sched_foreach(block, node) {
1816 /* phis are already assigned */
1820 rewire_inputs(node);
1822 /* enforce use constraints */
1823 rbitset_clear_all(forbidden_regs, n_regs);
1824 enforce_constraints(&live_nodes, node, forbidden_regs);
1826 rewire_inputs(node);
1828 /* we may not use registers used for inputs for optimistic splits */
1829 arity = get_irn_arity(node);
1830 for (i = 0; i < arity; ++i) {
1831 ir_node *op = get_irn_n(node, i);
1832 const arch_register_t *reg;
1833 if (!arch_irn_consider_in_reg_alloc(cls, op))
1836 reg = arch_get_irn_register(op);
1837 rbitset_set(forbidden_regs, arch_register_get_index(reg));
1840 /* free registers of values last used at this instruction */
1841 free_last_uses(&live_nodes, node);
1843 /* assign output registers */
1844 be_foreach_definition_(node, cls, value,
1845 assign_reg(block, value, forbidden_regs);
1849 ir_nodeset_destroy(&live_nodes);
1852 block_info->processed = true;
1854 /* permute values at end of predecessor blocks in case of phi-nodes */
1857 for (p = 0; p < n_preds; ++p) {
1858 add_phi_permutations(block, p);
1862 /* if we have exactly 1 successor then we might be able to produce phi
1864 if (get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) == 1) {
1865 const ir_edge_t *edge
1866 = get_irn_out_edge_first_kind(block, EDGE_KIND_BLOCK);
1867 ir_node *succ = get_edge_src_irn(edge);
1868 int p = get_edge_src_pos(edge);
1869 block_info_t *succ_info = get_block_info(succ);
1871 if (succ_info->processed) {
1872 add_phi_permutations(succ, p);
1877 typedef struct block_costs_t block_costs_t;
1878 struct block_costs_t {
1879 float costs; /**< costs of the block */
1880 int dfs_num; /**< depth first search number (to detect backedges) */
1883 static int cmp_block_costs(const void *d1, const void *d2)
1885 const ir_node * const *block1 = (const ir_node**)d1;
1886 const ir_node * const *block2 = (const ir_node**)d2;
1887 const block_costs_t *info1 = (const block_costs_t*)get_irn_link(*block1);
1888 const block_costs_t *info2 = (const block_costs_t*)get_irn_link(*block2);
1889 return QSORT_CMP(info2->costs, info1->costs);
1892 static void determine_block_order(void)
1895 ir_node **blocklist = be_get_cfgpostorder(irg);
1896 size_t n_blocks = ARR_LEN(blocklist);
1898 pdeq *worklist = new_pdeq();
1899 ir_node **order = XMALLOCN(ir_node*, n_blocks);
1902 /* clear block links... */
1903 for (p = 0; p < n_blocks; ++p) {
1904 ir_node *block = blocklist[p];
1905 set_irn_link(block, NULL);
1908 /* walk blocks in reverse postorder, the costs for each block are the
1909 * sum of the costs of its predecessors (excluding the costs on backedges
1910 * which we can't determine) */
1911 for (p = n_blocks; p > 0;) {
1912 block_costs_t *cost_info;
1913 ir_node *block = blocklist[--p];
1915 float execfreq = (float)get_block_execfreq(execfreqs, block);
1916 float costs = execfreq;
1917 int n_cfgpreds = get_Block_n_cfgpreds(block);
1919 for (p2 = 0; p2 < n_cfgpreds; ++p2) {
1920 ir_node *pred_block = get_Block_cfgpred_block(block, p2);
1921 block_costs_t *pred_costs = (block_costs_t*)get_irn_link(pred_block);
1922 /* we don't have any info for backedges */
1923 if (pred_costs == NULL)
1925 costs += pred_costs->costs;
1928 cost_info = OALLOCZ(&obst, block_costs_t);
1929 cost_info->costs = costs;
1930 cost_info->dfs_num = dfs_num++;
1931 set_irn_link(block, cost_info);
1934 /* sort array by block costs */
1935 qsort(blocklist, n_blocks, sizeof(blocklist[0]), cmp_block_costs);
1937 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_VISITED);
1938 inc_irg_block_visited(irg);
1940 for (p = 0; p < n_blocks; ++p) {
1941 ir_node *block = blocklist[p];
1942 if (Block_block_visited(block))
1945 /* continually add predecessors with highest costs to worklist
1946 * (without using backedges) */
1948 block_costs_t *info = (block_costs_t*)get_irn_link(block);
1949 ir_node *best_pred = NULL;
1950 float best_costs = -1;
1951 int n_cfgpred = get_Block_n_cfgpreds(block);
1954 pdeq_putr(worklist, block);
1955 mark_Block_block_visited(block);
1956 for (i = 0; i < n_cfgpred; ++i) {
1957 ir_node *pred_block = get_Block_cfgpred_block(block, i);
1958 block_costs_t *pred_info = (block_costs_t*)get_irn_link(pred_block);
1960 /* ignore backedges */
1961 if (pred_info->dfs_num > info->dfs_num)
1964 if (info->costs > best_costs) {
1965 best_costs = info->costs;
1966 best_pred = pred_block;
1970 } while (block != NULL && !Block_block_visited(block));
1972 /* now put all nodes in the worklist in our final order */
1973 while (!pdeq_empty(worklist)) {
1974 ir_node *pblock = (ir_node*)pdeq_getr(worklist);
1975 assert(order_p < n_blocks);
1976 order[order_p++] = pblock;
1979 assert(order_p == n_blocks);
1982 ir_free_resources(irg, IR_RESOURCE_BLOCK_VISITED);
1984 DEL_ARR_F(blocklist);
1986 obstack_free(&obst, NULL);
1987 obstack_init(&obst);
1989 block_order = order;
1990 n_block_order = n_blocks;
1994 * Run the register allocator for the current register class.
1996 static void be_pref_alloc_cls(void)
2000 lv = be_assure_liveness(irg);
2001 be_liveness_assure_sets(lv);
2003 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
2005 DB((dbg, LEVEL_2, "=== Allocating registers of %s ===\n", cls->name));
2007 be_clear_links(irg);
2009 irg_block_walk_graph(irg, NULL, analyze_block, NULL);
2010 if (create_congruence_classes)
2011 combine_congruence_classes();
2013 for (i = 0; i < n_block_order; ++i) {
2014 ir_node *block = block_order[i];
2015 allocate_coalesce_block(block, NULL);
2018 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
2021 static void dump(int mask, ir_graph *irg, const char *suffix)
2023 if (be_get_irg_options(irg)->dump_flags & mask)
2024 dump_ir_graph(irg, suffix);
2028 * Run the spiller on the current graph.
2030 static void spill(void)
2032 /* make sure all nodes show their real register pressure */
2033 be_timer_push(T_RA_CONSTR);
2034 be_pre_spill_prepare_constr(irg, cls);
2035 be_timer_pop(T_RA_CONSTR);
2037 dump(DUMP_RA, irg, "spillprepare");
2040 be_timer_push(T_RA_SPILL);
2041 be_do_spill(irg, cls);
2042 be_timer_pop(T_RA_SPILL);
2044 be_timer_push(T_RA_SPILL_APPLY);
2045 check_for_memory_operands(irg);
2046 be_timer_pop(T_RA_SPILL_APPLY);
2048 dump(DUMP_RA, irg, "spill");
2052 * The pref register allocator for a whole procedure.
2054 static void be_pref_alloc(ir_graph *new_irg)
2056 const arch_env_t *arch_env = be_get_irg_arch_env(new_irg);
2057 int n_cls = arch_env->n_register_classes;
2060 obstack_init(&obst);
2063 execfreqs = be_get_irg_exec_freq(irg);
2065 /* determine a good coloring order */
2066 determine_block_order();
2068 for (c = 0; c < n_cls; ++c) {
2069 cls = &arch_env->register_classes[c];
2070 if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
2073 stat_ev_ctx_push_str("regcls", cls->name);
2075 n_regs = arch_register_class_n_regs(cls);
2076 normal_regs = rbitset_malloc(n_regs);
2077 be_set_allocatable_regs(irg, cls, normal_regs);
2081 /* verify schedule and register pressure */
2082 be_timer_push(T_VERIFY);
2083 if (be_get_irg_options(irg)->verify_option == BE_VERIFY_WARN) {
2084 be_verify_schedule(irg);
2085 be_verify_register_pressure(irg, cls);
2086 } else if (be_get_irg_options(irg)->verify_option == BE_VERIFY_ASSERT) {
2087 assert(be_verify_schedule(irg) && "Schedule verification failed");
2088 assert(be_verify_register_pressure(irg, cls)
2089 && "Register pressure verification failed");
2091 be_timer_pop(T_VERIFY);
2093 be_timer_push(T_RA_COLOR);
2094 be_pref_alloc_cls();
2095 be_timer_pop(T_RA_COLOR);
2097 /* we most probably constructed new Phis so liveness info is invalid
2099 /* TODO: test liveness_introduce */
2100 be_liveness_invalidate(lv);
2103 stat_ev_ctx_pop("regcls");
2106 be_timer_push(T_RA_SPILL_APPLY);
2107 be_abi_fix_stack_nodes(irg);
2108 be_timer_pop(T_RA_SPILL_APPLY);
2110 be_timer_push(T_VERIFY);
2111 if (be_get_irg_options(irg)->verify_option == BE_VERIFY_WARN) {
2112 be_verify_register_allocation(irg);
2113 } else if (be_get_irg_options(irg)->verify_option == BE_VERIFY_ASSERT) {
2114 assert(be_verify_register_allocation(irg)
2115 && "Register allocation invalid");
2117 be_timer_pop(T_VERIFY);
2119 obstack_free(&obst, NULL);
2122 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_pref_alloc)
2123 void be_init_pref_alloc(void)
2125 static be_ra_t be_ra_pref = {
2128 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
2129 lc_opt_entry_t *prefalloc_group = lc_opt_get_grp(be_grp, "prefalloc");
2130 lc_opt_add_table(prefalloc_group, options);
2132 be_register_allocator("pref", &be_ra_pref);
2133 FIRM_DBG_REGISTER(dbg, "firm.be.prefalloc");