2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Preference Guided Register Assignment
23 * @author Matthias Braun
26 * The idea is to allocate registers in 2 passes:
27 * 1. A first pass to determine "preferred" registers for live-ranges. This
28 * calculates for each register and each live-range a value indicating
29 * the usefulness. (You can roughly think of the value as the negative
30 * costs needed for copies when the value is in the specific registers...)
32 * 2. Walk blocks and assigns registers in a greedy fashion. Preferring
33 * registers with high preferences. When register constraints are not met,
34 * add copies and split live-ranges.
37 * - make use of free registers in the permute_values code
50 #include "iredges_t.h"
51 #include "irgraph_t.h"
59 #include "raw_bitset.h"
60 #include "unionfind.h"
62 #include "hungarian.h"
65 #include "bechordal_t.h"
74 #include "bespillutil.h"
79 #define USE_FACTOR 1.0f
80 #define DEF_FACTOR 1.0f
81 #define NEIGHBOR_FACTOR 0.2f
82 #define AFF_SHOULD_BE_SAME 0.5f
84 #define SPLIT_DELTA 1.0f
85 #define MAX_OPTIMISTIC_SPLIT_RECURSION 0
87 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
89 static struct obstack obst;
91 static const arch_register_class_t *cls;
93 static unsigned n_regs;
94 static unsigned *normal_regs;
95 static int *congruence_classes;
96 static ir_node **block_order;
97 static size_t n_block_order;
98 static int create_preferences = true;
99 static int create_congruence_classes = true;
100 static int propagate_phi_registers = true;
102 static const lc_opt_table_entry_t options[] = {
103 LC_OPT_ENT_BOOL("prefs", "use preference based coloring", &create_preferences),
104 LC_OPT_ENT_BOOL("congruences", "create congruence classes", &create_congruence_classes),
105 LC_OPT_ENT_BOOL("prop_phi", "propagate phi registers", &propagate_phi_registers),
109 /** currently active assignments (while processing a basic block)
110 * maps registers to values(their current copies) */
111 static ir_node **assignments;
114 * allocation information: last_uses, register preferences
115 * the information is per firm-node.
117 struct allocation_info_t {
118 unsigned last_uses[2]; /**< bitset indicating last uses (input pos) */
119 ir_node *current_value; /**< copy of the value that should be used */
120 ir_node *original_value; /**< for copies point to original value */
121 float prefs[]; /**< register preferences */
123 typedef struct allocation_info_t allocation_info_t;
125 /** helper datastructure used when sorting register preferences */
130 typedef struct reg_pref_t reg_pref_t;
132 /** per basic-block information */
133 struct block_info_t {
134 bool processed; /**< indicate whether block is processed */
135 ir_node *assignments[]; /**< register assignments at end of block */
137 typedef struct block_info_t block_info_t;
140 * Get the allocation info for a node.
141 * The info is allocated on the first visit of a node.
143 static allocation_info_t *get_allocation_info(ir_node *node)
145 allocation_info_t *info = (allocation_info_t*)get_irn_link(node);
147 info = OALLOCFZ(&obst, allocation_info_t, prefs, n_regs);
148 info->current_value = node;
149 info->original_value = node;
150 set_irn_link(node, info);
156 static allocation_info_t *try_get_allocation_info(const ir_node *node)
158 return (allocation_info_t*) get_irn_link(node);
162 * Get allocation information for a basic block
164 static block_info_t *get_block_info(ir_node *block)
166 block_info_t *info = (block_info_t*)get_irn_link(block);
168 assert(is_Block(block));
170 info = OALLOCFZ(&obst, block_info_t, assignments, n_regs);
171 set_irn_link(block, info);
178 * Link the allocation info of a node to a copy.
179 * Afterwards, both nodes uses the same allocation info.
180 * Copy must not have an allocation info assigned yet.
182 * @param copy the node that gets the allocation info assigned
183 * @param value the original node
185 static void mark_as_copy_of(ir_node *copy, ir_node *value)
187 allocation_info_t *info = get_allocation_info(value);
188 allocation_info_t *copy_info = get_allocation_info(copy);
190 /* find original value */
191 ir_node *original = info->original_value;
192 if (original != value) {
193 info = get_allocation_info(original);
196 assert(info->original_value == original);
197 info->current_value = copy;
199 /* the copy should not be linked to something else yet */
200 assert(copy_info->original_value == copy);
201 copy_info->original_value = original;
203 /* copy over allocation preferences */
204 memcpy(copy_info->prefs, info->prefs, n_regs * sizeof(copy_info->prefs[0]));
208 * Calculate the penalties for every register on a node and its live neighbors.
210 * @param live_nodes the set of live nodes at the current position, may be NULL
211 * @param penalty the penalty to subtract from
212 * @param limited a raw bitset containing the limited set for the node
213 * @param node the node
215 static void give_penalties_for_limits(const ir_nodeset_t *live_nodes,
216 float penalty, const unsigned* limited,
219 allocation_info_t *info = get_allocation_info(node);
221 /* give penalty for all forbidden regs */
222 for (unsigned r = 0; r < n_regs; ++r) {
223 if (rbitset_is_set(limited, r))
226 info->prefs[r] -= penalty;
229 /* all other live values should get a penalty for allowed regs */
230 if (live_nodes == NULL)
233 penalty *= NEIGHBOR_FACTOR;
234 size_t n_allowed = rbitset_popcount(limited, n_regs);
236 /* only create a very weak penalty if multiple regs are allowed */
237 penalty = (penalty * 0.8f) / n_allowed;
239 foreach_ir_nodeset(live_nodes, neighbor, iter) {
240 allocation_info_t *neighbor_info;
242 /* TODO: if op is used on multiple inputs we might not do a
244 if (neighbor == node)
247 neighbor_info = get_allocation_info(neighbor);
248 for (unsigned r = 0; r < n_regs; ++r) {
249 if (!rbitset_is_set(limited, r))
252 neighbor_info->prefs[r] -= penalty;
258 * Calculate the preferences of a definition for the current register class.
259 * If the definition uses a limited set of registers, reduce the preferences
260 * for the limited register on the node and its neighbors.
262 * @param live_nodes the set of live nodes at the current node
263 * @param weight the weight
264 * @param node the current node
266 static void check_defs(const ir_nodeset_t *live_nodes, float weight,
269 const arch_register_req_t *req = arch_get_irn_register_req(node);
270 if (req->type & arch_register_req_type_limited) {
271 const unsigned *limited = req->limited;
272 float penalty = weight * DEF_FACTOR;
273 give_penalties_for_limits(live_nodes, penalty, limited, node);
276 if (req->type & arch_register_req_type_should_be_same) {
277 ir_node *insn = skip_Proj(node);
278 allocation_info_t *info = get_allocation_info(node);
279 int arity = get_irn_arity(insn);
281 float factor = 1.0f / rbitset_popcount(&req->other_same, arity);
282 for (int i = 0; i < arity; ++i) {
283 if (!rbitset_is_set(&req->other_same, i))
286 ir_node *op = get_irn_n(insn, i);
288 /* if we the value at the should_be_same input doesn't die at the
289 * node, then it is no use to propagate the constraints (since a
290 * copy will emerge anyway) */
291 if (ir_nodeset_contains(live_nodes, op))
294 allocation_info_t *op_info = get_allocation_info(op);
295 for (unsigned r = 0; r < n_regs; ++r) {
296 op_info->prefs[r] += info->prefs[r] * factor;
303 * Walker: Runs an a block calculates the preferences for any
304 * node and every register from the considered register class.
306 static void analyze_block(ir_node *block, void *data)
308 float weight = (float)get_block_execfreq(block);
309 ir_nodeset_t live_nodes;
312 ir_nodeset_init(&live_nodes);
313 be_liveness_end_of_block(lv, cls, block, &live_nodes);
315 sched_foreach_reverse(block, node) {
319 if (create_preferences) {
321 be_foreach_definition(node, cls, value,
322 check_defs(&live_nodes, weight, value);
327 int arity = get_irn_arity(node);
329 /* the allocation info node currently only uses 1 unsigned value
330 to mark last used inputs. So we will fail for a node with more than
332 allocation_info_t *info = get_allocation_info(node);
333 if (arity >= (int) sizeof(info->last_uses) * 8) {
334 panic("Node with more than %d inputs not supported yet",
335 (int) sizeof(info->last_uses) * 8);
338 for (int i = 0; i < arity; ++i) {
339 ir_node *op = get_irn_n(node, i);
340 const arch_register_req_t *req = arch_get_irn_register_req(op);
344 /* last usage of a value? */
345 if (!ir_nodeset_contains(&live_nodes, op)) {
346 rbitset_set(info->last_uses, i);
350 be_liveness_transfer(cls, node, &live_nodes);
352 if (create_preferences) {
353 /* update weights based on usage constraints */
354 for (int i = 0; i < arity; ++i) {
355 ir_node *op = get_irn_n(node, i);
356 if (!arch_irn_consider_in_reg_alloc(cls, op))
359 const arch_register_req_t *req
360 = arch_get_irn_register_req_in(node, i);
361 if (!(req->type & arch_register_req_type_limited))
364 const unsigned *limited = req->limited;
365 give_penalties_for_limits(&live_nodes, weight * USE_FACTOR,
371 ir_nodeset_destroy(&live_nodes);
374 static void congruence_def(ir_nodeset_t *live_nodes, const ir_node *node)
376 const arch_register_req_t *req = arch_get_irn_register_req(node);
378 /* should be same constraint? */
379 if (req->type & arch_register_req_type_should_be_same) {
380 const ir_node *insn = skip_Proj_const(node);
381 int arity = get_irn_arity(insn);
382 unsigned node_idx = get_irn_idx(node);
383 node_idx = uf_find(congruence_classes, node_idx);
385 for (int i = 0; i < arity; ++i) {
386 if (!rbitset_is_set(&req->other_same, i))
389 ir_node *op = get_irn_n(insn, i);
390 int op_idx = get_irn_idx(op);
391 op_idx = uf_find(congruence_classes, op_idx);
393 /* do we interfere with the value */
394 bool interferes = false;
395 foreach_ir_nodeset(live_nodes, live, iter) {
396 int lv_idx = get_irn_idx(live);
397 lv_idx = uf_find(congruence_classes, lv_idx);
398 if (lv_idx == op_idx) {
403 /* don't put in same affinity class if we interfere */
407 node_idx = uf_union(congruence_classes, node_idx, op_idx);
408 DB((dbg, LEVEL_3, "Merge %+F and %+F congruence classes\n",
410 /* one should_be_same is enough... */
416 static void create_congruence_class(ir_node *block, void *data)
418 ir_nodeset_t live_nodes;
421 ir_nodeset_init(&live_nodes);
422 be_liveness_end_of_block(lv, cls, block, &live_nodes);
424 /* check should be same constraints */
425 ir_node *last_phi = NULL;
426 sched_foreach_reverse(block, node) {
433 be_foreach_definition(node, cls, value,
434 congruence_def(&live_nodes, value);
436 be_liveness_transfer(cls, node, &live_nodes);
441 /* check phi congruence classes */
442 sched_foreach_reverse_from(last_phi, phi) {
445 if (!arch_irn_consider_in_reg_alloc(cls, phi))
448 int node_idx = get_irn_idx(phi);
449 node_idx = uf_find(congruence_classes, node_idx);
451 int arity = get_irn_arity(phi);
452 for (int i = 0; i < arity; ++i) {
453 ir_node *op = get_Phi_pred(phi, i);
454 int op_idx = get_irn_idx(op);
455 op_idx = uf_find(congruence_classes, op_idx);
457 /* do we interfere with the value */
458 bool interferes = false;
459 foreach_ir_nodeset(&live_nodes, live, iter) {
460 int lv_idx = get_irn_idx(live);
461 lv_idx = uf_find(congruence_classes, lv_idx);
462 if (lv_idx == op_idx) {
467 /* don't put in same affinity class if we interfere */
470 /* any other phi has the same input? */
471 sched_foreach(block, phi) {
476 if (!arch_irn_consider_in_reg_alloc(cls, phi))
478 oop = get_Phi_pred(phi, i);
481 oop_idx = get_irn_idx(oop);
482 oop_idx = uf_find(congruence_classes, oop_idx);
483 if (oop_idx == op_idx) {
491 /* merge the 2 congruence classes and sum up their preferences */
492 int old_node_idx = node_idx;
493 node_idx = uf_union(congruence_classes, node_idx, op_idx);
494 DB((dbg, LEVEL_3, "Merge %+F and %+F congruence classes\n",
497 old_node_idx = node_idx == old_node_idx ? op_idx : old_node_idx;
498 allocation_info_t *head_info
499 = get_allocation_info(get_idx_irn(irg, node_idx));
500 allocation_info_t *other_info
501 = get_allocation_info(get_idx_irn(irg, old_node_idx));
502 for (unsigned r = 0; r < n_regs; ++r) {
503 head_info->prefs[r] += other_info->prefs[r];
509 static void set_congruence_prefs(ir_node *node, void *data)
512 unsigned node_idx = get_irn_idx(node);
513 unsigned node_set = uf_find(congruence_classes, node_idx);
515 /* head of congruence class or not in any class */
516 if (node_set == node_idx)
519 if (!arch_irn_consider_in_reg_alloc(cls, node))
522 ir_node *head = get_idx_irn(irg, node_set);
523 allocation_info_t *head_info = get_allocation_info(head);
524 allocation_info_t *info = get_allocation_info(node);
526 memcpy(info->prefs, head_info->prefs, n_regs * sizeof(info->prefs[0]));
529 static void combine_congruence_classes(void)
531 size_t n = get_irg_last_idx(irg);
532 congruence_classes = XMALLOCN(int, n);
533 uf_init(congruence_classes, n);
535 /* create congruence classes */
536 irg_block_walk_graph(irg, create_congruence_class, NULL, NULL);
537 /* merge preferences */
538 irg_walk_graph(irg, set_congruence_prefs, NULL, NULL);
539 free(congruence_classes);
545 * Assign register reg to the given node.
547 * @param node the node
548 * @param reg the register
550 static void use_reg(ir_node *node, const arch_register_t *reg, unsigned width)
552 unsigned r = arch_register_get_index(reg);
553 for (unsigned r0 = r; r0 < r + width; ++r0)
554 assignments[r0] = node;
555 arch_set_irn_register(node, reg);
558 static void free_reg_of_value(ir_node *node)
560 if (!arch_irn_consider_in_reg_alloc(cls, node))
563 const arch_register_t *reg = arch_get_irn_register(node);
564 const arch_register_req_t *req = arch_get_irn_register_req(node);
565 unsigned r = arch_register_get_index(reg);
566 /* assignment->value may be NULL if a value is used at 2 inputs
567 * so it gets freed twice. */
568 for (unsigned r0 = r; r0 < r + req->width; ++r0) {
569 assert(assignments[r0] == node || assignments[r0] == NULL);
570 assignments[r0] = NULL;
575 * Compare two register preferences in decreasing order.
577 static int compare_reg_pref(const void *e1, const void *e2)
579 const reg_pref_t *rp1 = (const reg_pref_t*) e1;
580 const reg_pref_t *rp2 = (const reg_pref_t*) e2;
581 if (rp1->pref < rp2->pref)
583 if (rp1->pref > rp2->pref)
588 static void fill_sort_candidates(reg_pref_t *regprefs,
589 const allocation_info_t *info)
591 for (unsigned r = 0; r < n_regs; ++r) {
592 float pref = info->prefs[r];
594 regprefs[r].pref = pref;
596 /* TODO: use a stable sort here to avoid unnecessary register jumping */
597 qsort(regprefs, n_regs, sizeof(regprefs[0]), compare_reg_pref);
600 static bool try_optimistic_split(ir_node *to_split, ir_node *before,
601 float pref, float pref_delta,
602 unsigned *forbidden_regs, int recursion)
606 allocation_info_t *info = get_allocation_info(to_split);
609 /* stupid hack: don't optimisticallt split don't spill nodes...
610 * (so we don't split away the values produced because of
611 * must_be_different constraints) */
612 ir_node *original_insn = skip_Proj(info->original_value);
613 if (arch_get_irn_flags(original_insn) & arch_irn_flags_dont_spill)
616 const arch_register_t *from_reg = arch_get_irn_register(to_split);
617 unsigned from_r = arch_register_get_index(from_reg);
618 ir_node *block = get_nodes_block(before);
619 float split_threshold = (float)get_block_execfreq(block) * SPLIT_DELTA;
621 if (pref_delta < split_threshold*0.5)
624 /* find the best free position where we could move to */
625 reg_pref_t *prefs = ALLOCAN(reg_pref_t, n_regs);
626 fill_sort_candidates(prefs, info);
628 for (i = 0; i < n_regs; ++i) {
629 /* we need a normal register which is not an output register
630 an different from the current register of to_split */
632 if (!rbitset_is_set(normal_regs, r))
634 if (rbitset_is_set(forbidden_regs, r))
639 /* is the split worth it? */
640 delta = pref_delta + prefs[i].pref;
641 if (delta < split_threshold) {
642 DB((dbg, LEVEL_3, "Not doing optimistical split of %+F (depth %d), win %f too low\n",
643 to_split, recursion, delta));
647 /* if the register is free then we can do the split */
648 if (assignments[r] == NULL)
651 /* otherwise we might try recursively calling optimistic_split */
652 if (recursion+1 > MAX_OPTIMISTIC_SPLIT_RECURSION)
655 float apref = prefs[i].pref;
656 float apref_delta = i+1 < n_regs ? apref - prefs[i+1].pref : 0;
657 apref_delta += pref_delta - split_threshold;
659 /* our source register isn't a useful destination for recursive
661 bool old_source_state = rbitset_is_set(forbidden_regs, from_r);
662 rbitset_set(forbidden_regs, from_r);
663 /* try recursive split */
664 bool res = try_optimistic_split(assignments[r], before, apref,
665 apref_delta, forbidden_regs, recursion+1);
666 /* restore our destination */
667 if (old_source_state) {
668 rbitset_set(forbidden_regs, from_r);
670 rbitset_clear(forbidden_regs, from_r);
679 const arch_register_t *reg = arch_register_for_index(cls, r);
680 ir_node *copy = be_new_Copy(block, to_split);
682 mark_as_copy_of(copy, to_split);
683 /* hacky, but correct here */
684 if (assignments[arch_register_get_index(from_reg)] == to_split)
685 free_reg_of_value(to_split);
686 use_reg(copy, reg, width);
687 sched_add_before(before, copy);
690 "Optimistic live-range split %+F move %+F(%s) -> %s before %+F (win %f, depth %d)\n",
691 copy, to_split, from_reg->name, reg->name, before, delta, recursion));
696 * Determine and assign a register for node @p node
698 static void assign_reg(const ir_node *block, ir_node *node,
699 unsigned *forbidden_regs)
701 assert(!is_Phi(node));
702 /* preassigned register? */
703 const arch_register_t *final_reg = arch_get_irn_register(node);
704 const arch_register_req_t *req = arch_get_irn_register_req(node);
705 unsigned width = req->width;
706 if (final_reg != NULL) {
707 DB((dbg, LEVEL_2, "Preassignment %+F -> %s\n", node, final_reg->name));
708 use_reg(node, final_reg, width);
712 /* ignore reqs must be preassigned */
713 assert (! (req->type & arch_register_req_type_ignore));
715 /* give should_be_same boni */
716 allocation_info_t *info = get_allocation_info(node);
717 ir_node *in_node = skip_Proj(node);
718 if (req->type & arch_register_req_type_should_be_same) {
719 float weight = (float)get_block_execfreq(block);
720 int arity = get_irn_arity(in_node);
722 assert(arity <= (int) sizeof(req->other_same) * 8);
723 for (int i = 0; i < arity; ++i) {
724 if (!rbitset_is_set(&req->other_same, i))
727 ir_node *in = get_irn_n(in_node, i);
728 const arch_register_t *reg = arch_get_irn_register(in);
729 unsigned reg_index = arch_register_get_index(reg);
731 /* if the value didn't die here then we should not propagate the
732 * should_be_same info */
733 if (assignments[reg_index] == in)
736 info->prefs[reg_index] += weight * AFF_SHOULD_BE_SAME;
740 /* create list of register candidates and sort by their preference */
741 DB((dbg, LEVEL_2, "Candidates for %+F:", node));
742 reg_pref_t *reg_prefs = ALLOCAN(reg_pref_t, n_regs);
743 fill_sort_candidates(reg_prefs, info);
744 for (unsigned r = 0; r < n_regs; ++r) {
745 unsigned num = reg_prefs[r].num;
746 if (!rbitset_is_set(normal_regs, num))
748 const arch_register_t *reg = arch_register_for_index(cls, num);
749 DB((dbg, LEVEL_2, " %s(%f)", reg->name, reg_prefs[r].pref));
751 DB((dbg, LEVEL_2, "\n"));
753 const unsigned *allowed_regs = normal_regs;
754 if (req->type & arch_register_req_type_limited) {
755 allowed_regs = req->limited;
758 unsigned final_reg_index = 0;
760 for (r = 0; r < n_regs; ++r) {
761 final_reg_index = reg_prefs[r].num;
762 if (!rbitset_is_set(allowed_regs, final_reg_index))
764 /* alignment constraint? */
766 if ((req->type & arch_register_req_type_aligned)
767 && (final_reg_index % width) != 0)
770 for (unsigned r0 = r+1; r0 < r+width; ++r0) {
771 if (assignments[r0] != NULL)
774 /* TODO: attempt optimistic split here */
779 if (assignments[final_reg_index] == NULL)
781 float pref = reg_prefs[r].pref;
782 float delta = r+1 < n_regs ? pref - reg_prefs[r+1].pref : 0;
783 ir_node *before = skip_Proj(node);
785 = try_optimistic_split(assignments[final_reg_index], before, pref,
786 delta, forbidden_regs, 0);
791 /* the common reason to hit this panic is when 1 of your nodes is not
792 * register pressure faithful */
793 panic("No register left for %+F\n", node);
796 final_reg = arch_register_for_index(cls, final_reg_index);
797 DB((dbg, LEVEL_2, "Assign %+F -> %s\n", node, final_reg->name));
798 use_reg(node, final_reg, width);
802 * Add an permutation in front of a node and change the assignments
803 * due to this permutation.
805 * To understand this imagine a permutation like this:
815 * First we count how many destinations a single value has. At the same time
816 * we can be sure that each destination register has at most 1 source register
817 * (it can have 0 which means we don't care what value is in it).
818 * We ignore all fulfilled permuations (like 7->7)
819 * In a first pass we create as much copy instructions as possible as they
820 * are generally cheaper than exchanges. We do this by counting into how many
821 * destinations a register has to be copied (in the example it's 2 for register
822 * 3, or 1 for the registers 1,2,4 and 7).
823 * We can then create a copy into every destination register when the usecount
824 * of that register is 0 (= noone else needs the value in the register).
826 * After this step we should only have cycles left. We implement a cyclic
827 * permutation of n registers with n-1 transpositions.
829 * @param live_nodes the set of live nodes, updated due to live range split
830 * @param before the node before we add the permutation
831 * @param permutation the permutation array indices are the destination
832 * registers, the values in the array are the source
835 static void permute_values(ir_nodeset_t *live_nodes, ir_node *before,
836 unsigned *permutation)
838 unsigned *n_used = ALLOCANZ(unsigned, n_regs);
840 /* determine how often each source register needs to be read */
841 for (unsigned r = 0; r < n_regs; ++r) {
842 unsigned old_reg = permutation[r];
845 value = assignments[old_reg];
847 /* nothing to do here, reg is not live. Mark it as fixpoint
848 * so we ignore it in the next steps */
856 ir_node *block = get_nodes_block(before);
858 /* step1: create copies where immediately possible */
859 for (unsigned r = 0; r < n_regs; /* empty */) {
860 unsigned old_r = permutation[r];
862 /* - no need to do anything for fixed points.
863 - we can't copy if the value in the dest reg is still needed */
864 if (old_r == r || n_used[r] > 0) {
870 ir_node *src = assignments[old_r];
871 ir_node *copy = be_new_Copy(block, src);
872 sched_add_before(before, copy);
873 const arch_register_t *reg = arch_register_for_index(cls, r);
874 DB((dbg, LEVEL_2, "Copy %+F (from %+F, before %+F) -> %s\n",
875 copy, src, before, reg->name));
876 mark_as_copy_of(copy, src);
877 unsigned width = 1; /* TODO */
878 use_reg(copy, reg, width);
880 if (live_nodes != NULL) {
881 ir_nodeset_insert(live_nodes, copy);
884 /* old register has 1 user less, permutation is resolved */
885 assert(arch_register_get_index(arch_get_irn_register(src)) == old_r);
888 assert(n_used[old_r] > 0);
890 if (n_used[old_r] == 0) {
891 if (live_nodes != NULL) {
892 ir_nodeset_remove(live_nodes, src);
894 free_reg_of_value(src);
897 /* advance or jump back (if this copy enabled another copy) */
898 if (old_r < r && n_used[old_r] == 0) {
905 /* at this point we only have "cycles" left which we have to resolve with
907 * TODO: if we have free registers left, then we should really use copy
908 * instructions for any cycle longer than 2 registers...
909 * (this is probably architecture dependent, there might be archs where
910 * copies are preferable even for 2-cycles) */
912 /* create perms with the rest */
913 for (unsigned r = 0; r < n_regs; /* empty */) {
914 unsigned old_r = permutation[r];
921 /* we shouldn't have copies from 1 value to multiple destinations left*/
922 assert(n_used[old_r] == 1);
924 /* exchange old_r and r2; after that old_r is a fixed point */
925 unsigned r2 = permutation[old_r];
927 ir_node *in[2] = { assignments[r2], assignments[old_r] };
928 ir_node *perm = be_new_Perm(cls, block, 2, in);
929 sched_add_before(before, perm);
930 DB((dbg, LEVEL_2, "Perm %+F (perm %+F,%+F, before %+F)\n",
931 perm, in[0], in[1], before));
933 unsigned width = 1; /* TODO */
935 ir_node *proj0 = new_r_Proj(perm, get_irn_mode(in[0]), 0);
936 mark_as_copy_of(proj0, in[0]);
937 const arch_register_t *reg0 = arch_register_for_index(cls, old_r);
938 use_reg(proj0, reg0, width);
940 ir_node *proj1 = new_r_Proj(perm, get_irn_mode(in[1]), 1);
941 mark_as_copy_of(proj1, in[1]);
942 const arch_register_t *reg1 = arch_register_for_index(cls, r2);
943 use_reg(proj1, reg1, width);
945 /* 1 value is now in the correct register */
946 permutation[old_r] = old_r;
947 /* the source of r changed to r2 */
950 /* if we have reached a fixpoint update data structures */
951 if (live_nodes != NULL) {
952 ir_nodeset_remove(live_nodes, in[0]);
953 ir_nodeset_remove(live_nodes, in[1]);
954 ir_nodeset_remove(live_nodes, proj0);
955 ir_nodeset_insert(live_nodes, proj1);
960 /* now we should only have fixpoints left */
961 for (unsigned r = 0; r < n_regs; ++r) {
962 assert(permutation[r] == r);
968 * Free regs for values last used.
970 * @param live_nodes set of live nodes, will be updated
971 * @param node the node to consider
973 static void free_last_uses(ir_nodeset_t *live_nodes, ir_node *node)
975 allocation_info_t *info = get_allocation_info(node);
976 const unsigned *last_uses = info->last_uses;
977 int arity = get_irn_arity(node);
979 for (int i = 0; i < arity; ++i) {
980 /* check if one operand is the last use */
981 if (!rbitset_is_set(last_uses, i))
984 ir_node *op = get_irn_n(node, i);
985 free_reg_of_value(op);
986 ir_nodeset_remove(live_nodes, op);
991 * change inputs of a node to the current value (copies/perms)
993 static void rewire_inputs(ir_node *node)
995 int arity = get_irn_arity(node);
996 for (int i = 0; i < arity; ++i) {
997 ir_node *op = get_irn_n(node, i);
998 allocation_info_t *info = try_get_allocation_info(op);
1003 info = get_allocation_info(info->original_value);
1004 if (info->current_value != op) {
1005 set_irn_n(node, i, info->current_value);
1011 * Create a bitset of registers occupied with value living through an
1014 static void determine_live_through_regs(unsigned *bitset, ir_node *node)
1016 const allocation_info_t *info = get_allocation_info(node);
1018 /* mark all used registers as potentially live-through */
1019 for (unsigned r = 0; r < n_regs; ++r) {
1020 if (assignments[r] == NULL)
1022 if (!rbitset_is_set(normal_regs, r))
1025 rbitset_set(bitset, r);
1028 /* remove registers of value dying at the instruction */
1029 int arity = get_irn_arity(node);
1030 for (int i = 0; i < arity; ++i) {
1031 if (!rbitset_is_set(info->last_uses, i))
1034 ir_node *op = get_irn_n(node, i);
1035 const arch_register_t *reg = arch_get_irn_register(op);
1036 rbitset_clear(bitset, arch_register_get_index(reg));
1040 static void solve_lpp(ir_nodeset_t *live_nodes, ir_node *node,
1041 unsigned *forbidden_regs, unsigned *live_through_regs)
1043 unsigned *forbidden_edges = rbitset_malloc(n_regs * n_regs);
1044 int *lpp_vars = XMALLOCNZ(int, n_regs*n_regs);
1046 lpp_t *lpp = lpp_new("prefalloc", lpp_minimize);
1047 //lpp_set_time_limit(lpp, 20);
1048 lpp_set_log(lpp, stdout);
1050 /** mark some edges as forbidden */
1051 int arity = get_irn_arity(node);
1052 for (int i = 0; i < arity; ++i) {
1053 ir_node *op = get_irn_n(node, i);
1054 if (!arch_irn_consider_in_reg_alloc(cls, op))
1057 const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
1058 if (!(req->type & arch_register_req_type_limited))
1061 const unsigned *limited = req->limited;
1062 const arch_register_t *reg = arch_get_irn_register(op);
1063 unsigned current_reg = arch_register_get_index(reg);
1064 for (unsigned r = 0; r < n_regs; ++r) {
1065 if (rbitset_is_set(limited, r))
1068 rbitset_set(forbidden_edges, current_reg*n_regs + r);
1072 /* add all combinations, except for not allowed ones */
1073 for (unsigned l = 0; l < n_regs; ++l) {
1074 if (!rbitset_is_set(normal_regs, l)) {
1076 snprintf(name, sizeof(name), "%u_to_%u", l, l);
1077 lpp_vars[l*n_regs+l] = lpp_add_var(lpp, name, lpp_binary, 1);
1081 for (unsigned r = 0; r < n_regs; ++r) {
1082 if (!rbitset_is_set(normal_regs, r))
1084 if (rbitset_is_set(forbidden_edges, l*n_regs + r))
1086 /* livethrough values may not use constrained output registers */
1087 if (rbitset_is_set(live_through_regs, l)
1088 && rbitset_is_set(forbidden_regs, r))
1092 snprintf(name, sizeof(name), "%u_to_%u", l, r);
1094 double costs = l==r ? 9 : 8;
1095 lpp_vars[l*n_regs+r]
1096 = lpp_add_var(lpp, name, lpp_binary, costs);
1097 assert(lpp_vars[l*n_regs+r] > 0);
1100 /* add constraints */
1101 for (unsigned l = 0; l < n_regs; ++l) {
1102 /* only 1 destination per register */
1103 int constraint = -1;
1104 for (unsigned r = 0; r < n_regs; ++r) {
1105 int var = lpp_vars[l*n_regs+r];
1108 if (constraint < 0) {
1110 snprintf(name, sizeof(name), "%u_to_dest", l);
1111 constraint = lpp_add_cst(lpp, name, lpp_equal, 1);
1113 lpp_set_factor_fast(lpp, constraint, var, 1);
1115 /* each destination used by at most 1 value */
1117 for (unsigned r = 0; r < n_regs; ++r) {
1118 int var = lpp_vars[r*n_regs+l];
1121 if (constraint < 0) {
1123 snprintf(name, sizeof(name), "one_to_%u", l);
1124 constraint = lpp_add_cst(lpp, name, lpp_less_equal, 1);
1126 lpp_set_factor_fast(lpp, constraint, var, 1);
1130 lpp_dump_plain(lpp, fopen("lppdump.txt", "w"));
1133 lpp_solve(lpp, be_options.ilp_server, be_options.ilp_solver);
1134 if (!lpp_is_sol_valid(lpp))
1135 panic("ilp solution not valid!");
1137 unsigned *assignment = ALLOCAN(unsigned, n_regs);
1138 for (unsigned l = 0; l < n_regs; ++l) {
1139 unsigned dest_reg = (unsigned)-1;
1140 for (unsigned r = 0; r < n_regs; ++r) {
1141 int var = lpp_vars[l*n_regs+r];
1144 double val = lpp_get_var_sol(lpp, var);
1146 assert(dest_reg == (unsigned)-1);
1150 assert(dest_reg != (unsigned)-1);
1151 assignment[dest_reg] = l;
1154 fprintf(stderr, "Assignment: ");
1155 for (unsigned l = 0; l < n_regs; ++l) {
1156 fprintf(stderr, "%u ", assignment[l]);
1158 fprintf(stderr, "\n");
1160 permute_values(live_nodes, node, assignment);
1164 static bool is_aligned(unsigned num, unsigned alignment)
1166 unsigned mask = alignment-1;
1167 assert(is_po2(alignment));
1168 return (num&mask) == 0;
1172 * Enforce constraints at a node by live range splits.
1174 * @param live_nodes the set of live nodes, might be changed
1175 * @param node the current node
1177 static void enforce_constraints(ir_nodeset_t *live_nodes, ir_node *node,
1178 unsigned *forbidden_regs)
1180 /* see if any use constraints are not met and whether double-width
1181 * values are involved */
1182 bool double_width = false;
1184 int arity = get_irn_arity(node);
1185 for (int i = 0; i < arity; ++i) {
1186 ir_node *op = get_irn_n(node, i);
1187 if (!arch_irn_consider_in_reg_alloc(cls, op))
1190 /* are there any limitations for the i'th operand? */
1191 const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
1193 double_width = true;
1194 const arch_register_t *reg = arch_get_irn_register(op);
1195 unsigned reg_index = arch_register_get_index(reg);
1196 if (req->type & arch_register_req_type_aligned) {
1197 if (!is_aligned(reg_index, req->width)) {
1202 if (!(req->type & arch_register_req_type_limited))
1205 const unsigned *limited = req->limited;
1206 if (!rbitset_is_set(limited, reg_index)) {
1207 /* found an assignment outside the limited set */
1213 /* is any of the live-throughs using a constrained output register? */
1215 unsigned *live_through_regs = NULL;
1216 be_foreach_definition(node, cls, value,
1217 if (req_->width > 1)
1218 double_width = true;
1219 if (! (req_->type & arch_register_req_type_limited))
1221 if (live_through_regs == NULL) {
1222 rbitset_alloca(live_through_regs, n_regs);
1223 determine_live_through_regs(live_through_regs, node);
1225 rbitset_or(forbidden_regs, req_->limited, n_regs);
1226 if (rbitsets_have_common(req_->limited, live_through_regs, n_regs))
1233 /* create these arrays if we haven't yet */
1234 if (live_through_regs == NULL) {
1235 rbitset_alloca(live_through_regs, n_regs);
1239 /* only the ILP variant can solve this yet */
1240 solve_lpp(live_nodes, node, forbidden_regs, live_through_regs);
1244 /* at this point we have to construct a bipartite matching problem to see
1245 * which values should go to which registers
1246 * Note: We're building the matrix in "reverse" - source registers are
1247 * right, destinations left because this will produce the solution
1248 * in the format required for permute_values.
1250 hungarian_problem_t *bp
1251 = hungarian_new(n_regs, n_regs, HUNGARIAN_MATCH_PERFECT);
1253 /* add all combinations, then remove not allowed ones */
1254 for (unsigned l = 0; l < n_regs; ++l) {
1255 if (!rbitset_is_set(normal_regs, l)) {
1256 hungarian_add(bp, l, l, 1);
1260 for (unsigned r = 0; r < n_regs; ++r) {
1261 if (!rbitset_is_set(normal_regs, r))
1263 /* livethrough values may not use constrainted output registers */
1264 if (rbitset_is_set(live_through_regs, l)
1265 && rbitset_is_set(forbidden_regs, r))
1268 hungarian_add(bp, r, l, l == r ? 9 : 8);
1272 for (int i = 0; i < arity; ++i) {
1273 ir_node *op = get_irn_n(node, i);
1274 if (!arch_irn_consider_in_reg_alloc(cls, op))
1277 const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
1278 if (!(req->type & arch_register_req_type_limited))
1281 const unsigned *limited = req->limited;
1282 const arch_register_t *reg = arch_get_irn_register(op);
1283 unsigned current_reg = arch_register_get_index(reg);
1284 for (unsigned r = 0; r < n_regs; ++r) {
1285 if (rbitset_is_set(limited, r))
1287 hungarian_remove(bp, r, current_reg);
1291 //hungarian_print_cost_matrix(bp, 1);
1292 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
1294 unsigned *assignment = ALLOCAN(unsigned, n_regs);
1295 int res = hungarian_solve(bp, assignment, NULL, 0);
1299 fprintf(stderr, "Swap result:");
1300 for (i = 0; i < (int) n_regs; ++i) {
1301 fprintf(stderr, " %d", assignment[i]);
1303 fprintf(stderr, "\n");
1308 permute_values(live_nodes, node, assignment);
1311 /** test whether a node @p n is a copy of the value of node @p of */
1312 static bool is_copy_of(ir_node *value, ir_node *test_value)
1314 if (value == test_value)
1317 allocation_info_t *info = get_allocation_info(value);
1318 allocation_info_t *test_info = get_allocation_info(test_value);
1319 return test_info->original_value == info->original_value;
1323 * find a value in the end-assignment of a basic block
1324 * @returns the index into the assignment array if found
1327 static int find_value_in_block_info(block_info_t *info, ir_node *value)
1329 ir_node **end_assignments = info->assignments;
1330 for (unsigned r = 0; r < n_regs; ++r) {
1331 ir_node *a_value = end_assignments[r];
1333 if (a_value == NULL)
1335 if (is_copy_of(a_value, value))
1343 * Create the necessary permutations at the end of a basic block to fullfill
1344 * the register assignment for phi-nodes in the next block
1346 static void add_phi_permutations(ir_node *block, int p)
1348 ir_node *pred = get_Block_cfgpred_block(block, p);
1349 block_info_t *pred_info = get_block_info(pred);
1351 /* predecessor not processed yet? nothing to do */
1352 if (!pred_info->processed)
1355 unsigned *permutation = ALLOCAN(unsigned, n_regs);
1356 for (unsigned r = 0; r < n_regs; ++r) {
1360 /* check phi nodes */
1361 bool need_permutation = false;
1362 ir_node *phi = sched_first(block);
1363 for ( ; is_Phi(phi); phi = sched_next(phi)) {
1364 if (!arch_irn_consider_in_reg_alloc(cls, phi))
1367 ir_node *phi_pred = get_Phi_pred(phi, p);
1368 int a = find_value_in_block_info(pred_info, phi_pred);
1371 const arch_register_t *reg = arch_get_irn_register(phi);
1372 int regn = arch_register_get_index(reg);
1373 /* same register? nothing to do */
1377 ir_node *op = pred_info->assignments[a];
1378 const arch_register_t *op_reg = arch_get_irn_register(op);
1379 /* virtual or joker registers are ok too */
1380 if ((op_reg->type & arch_register_type_joker)
1381 || (op_reg->type & arch_register_type_virtual))
1384 permutation[regn] = a;
1385 need_permutation = true;
1388 if (need_permutation) {
1389 /* permute values at end of predecessor */
1390 ir_node **old_assignments = assignments;
1391 assignments = pred_info->assignments;
1392 permute_values(NULL, be_get_end_of_block_insertion_point(pred),
1394 assignments = old_assignments;
1397 /* change phi nodes to use the copied values */
1398 phi = sched_first(block);
1399 for ( ; is_Phi(phi); phi = sched_next(phi)) {
1400 if (!arch_irn_consider_in_reg_alloc(cls, phi))
1403 /* we have permuted all values into the correct registers so we can
1404 simply query which value occupies the phis register in the
1406 int a = arch_register_get_index(arch_get_irn_register(phi));
1407 ir_node *op = pred_info->assignments[a];
1408 set_Phi_pred(phi, p, op);
1413 * Set preferences for a phis register based on the registers used on the
1416 static void adapt_phi_prefs(ir_node *phi)
1418 ir_node *block = get_nodes_block(phi);
1419 allocation_info_t *info = get_allocation_info(phi);
1421 int arity = get_irn_arity(phi);
1422 for (int i = 0; i < arity; ++i) {
1423 ir_node *op = get_irn_n(phi, i);
1424 const arch_register_t *reg = arch_get_irn_register(op);
1428 /* we only give the bonus if the predecessor already has registers
1429 * assigned, otherwise we only see a dummy value
1430 * and any conclusions about its register are useless */
1431 ir_node *pred_block = get_Block_cfgpred_block(block, i);
1432 block_info_t *pred_block_info = get_block_info(pred_block);
1433 if (!pred_block_info->processed)
1436 /* give bonus for already assigned register */
1437 float weight = (float)get_block_execfreq(pred_block);
1438 unsigned r = arch_register_get_index(reg);
1439 info->prefs[r] += weight * AFF_PHI;
1444 * After a phi has been assigned a register propagate preference inputs
1445 * to the phi inputs.
1447 static void propagate_phi_register(ir_node *phi, unsigned assigned_r)
1449 ir_node *block = get_nodes_block(phi);
1451 int arity = get_irn_arity(phi);
1452 for (int i = 0; i < arity; ++i) {
1453 ir_node *op = get_Phi_pred(phi, i);
1454 allocation_info_t *info = get_allocation_info(op);
1455 ir_node *pred_block = get_Block_cfgpred_block(block, i);
1457 = (float)get_block_execfreq(pred_block) * AFF_PHI;
1459 if (info->prefs[assigned_r] >= weight)
1462 /* promote the prefered register */
1463 for (unsigned r = 0; r < n_regs; ++r) {
1464 if (info->prefs[r] > -weight) {
1465 info->prefs[r] = -weight;
1468 info->prefs[assigned_r] = weight;
1471 propagate_phi_register(op, assigned_r);
1475 static void assign_phi_registers(ir_node *block)
1477 /* count phi nodes */
1479 sched_foreach(block, node) {
1482 if (!arch_irn_consider_in_reg_alloc(cls, node))
1490 /* build a bipartite matching problem for all phi nodes */
1491 hungarian_problem_t *bp
1492 = hungarian_new(n_phis, n_regs, HUNGARIAN_MATCH_PERFECT);
1494 sched_foreach(block, node) {
1497 if (!arch_irn_consider_in_reg_alloc(cls, node))
1500 /* give boni for predecessor colorings */
1501 adapt_phi_prefs(node);
1502 /* add stuff to bipartite problem */
1503 allocation_info_t *info = get_allocation_info(node);
1504 DB((dbg, LEVEL_3, "Prefs for %+F: ", node));
1505 for (unsigned r = 0; r < n_regs; ++r) {
1506 if (!rbitset_is_set(normal_regs, r))
1509 float costs = info->prefs[r];
1510 costs = costs < 0 ? -logf(-costs+1) : logf(costs+1);
1513 hungarian_add(bp, n, r, (int)costs);
1514 DB((dbg, LEVEL_3, " %s(%f)", arch_register_for_index(cls, r)->name,
1517 DB((dbg, LEVEL_3, "\n"));
1521 //hungarian_print_cost_matrix(bp, 7);
1522 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
1524 unsigned *assignment = ALLOCAN(unsigned, n_regs);
1525 int res = hungarian_solve(bp, assignment, NULL, 0);
1530 sched_foreach(block, node) {
1533 if (!arch_irn_consider_in_reg_alloc(cls, node))
1535 const arch_register_req_t *req
1536 = arch_get_irn_register_req(node);
1538 unsigned r = assignment[n++];
1539 assert(rbitset_is_set(normal_regs, r));
1540 const arch_register_t *reg = arch_register_for_index(cls, r);
1541 DB((dbg, LEVEL_2, "Assign %+F -> %s\n", node, reg->name));
1542 use_reg(node, reg, req->width);
1544 /* adapt preferences for phi inputs */
1545 if (propagate_phi_registers)
1546 propagate_phi_register(node, r);
1550 static arch_register_req_t *allocate_reg_req(ir_graph *irg)
1552 struct obstack *obst = be_get_be_obst(irg);
1553 arch_register_req_t *req = OALLOCZ(obst, arch_register_req_t);
1558 * Walker: assign registers to all nodes of a block that
1559 * need registers from the currently considered register class.
1561 static void allocate_coalesce_block(ir_node *block, void *data)
1564 DB((dbg, LEVEL_2, "* Block %+F\n", block));
1566 /* clear assignments */
1567 block_info_t *block_info = get_block_info(block);
1568 assignments = block_info->assignments;
1570 ir_nodeset_t live_nodes;
1571 ir_nodeset_init(&live_nodes);
1573 /* gather regalloc infos of predecessor blocks */
1574 int n_preds = get_Block_n_cfgpreds(block);
1575 block_info_t **pred_block_infos = ALLOCAN(block_info_t*, n_preds);
1576 for (int i = 0; i < n_preds; ++i) {
1577 ir_node *pred = get_Block_cfgpred_block(block, i);
1578 block_info_t *pred_info = get_block_info(pred);
1579 pred_block_infos[i] = pred_info;
1582 ir_node **phi_ins = ALLOCAN(ir_node*, n_preds);
1584 /* collect live-in nodes and preassigned values */
1585 be_lv_foreach(lv, block, be_lv_state_in, node) {
1586 const arch_register_req_t *req = arch_get_irn_register_req(node);
1587 if (req->cls != cls)
1590 if (req->type & arch_register_req_type_ignore) {
1591 allocation_info_t *info = get_allocation_info(node);
1592 info->current_value = node;
1594 const arch_register_t *reg = arch_get_irn_register(node);
1595 assert(reg != NULL); /* ignore values must be preassigned */
1596 use_reg(node, reg, req->width);
1600 /* check all predecessors for this value, if it is not everywhere the
1601 same or unknown then we have to construct a phi
1602 (we collect the potential phi inputs here) */
1603 bool need_phi = false;
1604 for (int p = 0; p < n_preds; ++p) {
1605 block_info_t *pred_info = pred_block_infos[p];
1607 if (!pred_info->processed) {
1608 /* use node for now, it will get fixed later */
1612 int a = find_value_in_block_info(pred_info, node);
1614 /* must live out of predecessor */
1616 phi_ins[p] = pred_info->assignments[a];
1617 /* different value from last time? then we need a phi */
1618 if (p > 0 && phi_ins[p-1] != phi_ins[p]) {
1625 ir_mode *mode = get_irn_mode(node);
1626 const arch_register_req_t *phi_req = cls->class_req;
1627 if (req->width > 1) {
1628 arch_register_req_t *new_req = allocate_reg_req(irg);
1630 new_req->type = req->type & arch_register_req_type_aligned;
1631 new_req->width = req->width;
1634 ir_node *phi = be_new_Phi(block, n_preds, phi_ins, mode,
1637 DB((dbg, LEVEL_3, "Create Phi %+F (for %+F) -", phi, node));
1638 #ifdef DEBUG_libfirm
1639 for (int pi = 0; pi < n_preds; ++pi) {
1640 DB((dbg, LEVEL_3, " %+F", phi_ins[pi]));
1642 DB((dbg, LEVEL_3, "\n"));
1644 mark_as_copy_of(phi, node);
1645 sched_add_after(block, phi);
1649 allocation_info_t *info = get_allocation_info(node);
1650 info->current_value = phi_ins[0];
1652 /* Grab 1 of the inputs we constructed (might not be the same as
1653 * "node" as we could see the same copy of the value in all
1658 /* if the node already has a register assigned use it */
1659 const arch_register_t *reg = arch_get_irn_register(node);
1661 use_reg(node, reg, req->width);
1664 /* remember that this node is live at the beginning of the block */
1665 ir_nodeset_insert(&live_nodes, node);
1668 unsigned *forbidden_regs; /**< collects registers which must
1669 not be used for optimistic splits */
1670 rbitset_alloca(forbidden_regs, n_regs);
1672 /* handle phis... */
1673 assign_phi_registers(block);
1675 /* all live-ins must have a register */
1677 foreach_ir_nodeset(&live_nodes, node, iter) {
1678 const arch_register_t *reg = arch_get_irn_register(node);
1679 assert(reg != NULL);
1683 /* assign instructions in the block */
1684 sched_foreach(block, node) {
1685 /* phis are already assigned */
1689 rewire_inputs(node);
1691 /* enforce use constraints */
1692 rbitset_clear_all(forbidden_regs, n_regs);
1693 enforce_constraints(&live_nodes, node, forbidden_regs);
1695 rewire_inputs(node);
1697 /* we may not use registers used for inputs for optimistic splits */
1698 int arity = get_irn_arity(node);
1699 for (int i = 0; i < arity; ++i) {
1700 ir_node *op = get_irn_n(node, i);
1701 if (!arch_irn_consider_in_reg_alloc(cls, op))
1704 const arch_register_t *reg = arch_get_irn_register(op);
1705 rbitset_set(forbidden_regs, arch_register_get_index(reg));
1708 /* free registers of values last used at this instruction */
1709 free_last_uses(&live_nodes, node);
1711 /* assign output registers */
1713 be_foreach_definition_(node, cls, value,
1714 assign_reg(block, value, forbidden_regs);
1718 ir_nodeset_destroy(&live_nodes);
1721 block_info->processed = true;
1723 /* permute values at end of predecessor blocks in case of phi-nodes */
1725 for (int p = 0; p < n_preds; ++p) {
1726 add_phi_permutations(block, p);
1730 /* if we have exactly 1 successor then we might be able to produce phi
1732 if (get_irn_n_edges_kind(block, EDGE_KIND_BLOCK) == 1) {
1733 const ir_edge_t *edge
1734 = get_irn_out_edge_first_kind(block, EDGE_KIND_BLOCK);
1735 ir_node *succ = get_edge_src_irn(edge);
1736 int p = get_edge_src_pos(edge);
1737 block_info_t *succ_info = get_block_info(succ);
1739 if (succ_info->processed) {
1740 add_phi_permutations(succ, p);
1745 typedef struct block_costs_t block_costs_t;
1746 struct block_costs_t {
1747 float costs; /**< costs of the block */
1748 int dfs_num; /**< depth first search number (to detect backedges) */
1751 static int cmp_block_costs(const void *d1, const void *d2)
1753 const ir_node * const *block1 = (const ir_node**)d1;
1754 const ir_node * const *block2 = (const ir_node**)d2;
1755 const block_costs_t *info1 = (const block_costs_t*)get_irn_link(*block1);
1756 const block_costs_t *info2 = (const block_costs_t*)get_irn_link(*block2);
1757 return QSORT_CMP(info2->costs, info1->costs);
1760 static void determine_block_order(void)
1762 ir_node **blocklist = be_get_cfgpostorder(irg);
1763 size_t n_blocks = ARR_LEN(blocklist);
1765 pdeq *worklist = new_pdeq();
1766 ir_node **order = XMALLOCN(ir_node*, n_blocks);
1769 /* clear block links... */
1770 for (size_t p = 0; p < n_blocks; ++p) {
1771 ir_node *block = blocklist[p];
1772 set_irn_link(block, NULL);
1775 /* walk blocks in reverse postorder, the costs for each block are the
1776 * sum of the costs of its predecessors (excluding the costs on backedges
1777 * which we can't determine) */
1778 for (size_t p = n_blocks; p > 0;) {
1779 block_costs_t *cost_info;
1780 ir_node *block = blocklist[--p];
1782 float execfreq = (float)get_block_execfreq(block);
1783 float costs = execfreq;
1784 int n_cfgpreds = get_Block_n_cfgpreds(block);
1785 for (int p2 = 0; p2 < n_cfgpreds; ++p2) {
1786 ir_node *pred_block = get_Block_cfgpred_block(block, p2);
1787 block_costs_t *pred_costs = (block_costs_t*)get_irn_link(pred_block);
1788 /* we don't have any info for backedges */
1789 if (pred_costs == NULL)
1791 costs += pred_costs->costs;
1794 cost_info = OALLOCZ(&obst, block_costs_t);
1795 cost_info->costs = costs;
1796 cost_info->dfs_num = dfs_num++;
1797 set_irn_link(block, cost_info);
1800 /* sort array by block costs */
1801 qsort(blocklist, n_blocks, sizeof(blocklist[0]), cmp_block_costs);
1803 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_VISITED);
1804 inc_irg_block_visited(irg);
1806 for (size_t p = 0; p < n_blocks; ++p) {
1807 ir_node *block = blocklist[p];
1808 if (Block_block_visited(block))
1811 /* continually add predecessors with highest costs to worklist
1812 * (without using backedges) */
1814 block_costs_t *info = (block_costs_t*)get_irn_link(block);
1815 ir_node *best_pred = NULL;
1816 float best_costs = -1;
1817 int n_cfgpred = get_Block_n_cfgpreds(block);
1819 pdeq_putr(worklist, block);
1820 mark_Block_block_visited(block);
1821 for (int i = 0; i < n_cfgpred; ++i) {
1822 ir_node *pred_block = get_Block_cfgpred_block(block, i);
1823 block_costs_t *pred_info = (block_costs_t*)get_irn_link(pred_block);
1825 /* ignore backedges */
1826 if (pred_info->dfs_num > info->dfs_num)
1829 if (info->costs > best_costs) {
1830 best_costs = info->costs;
1831 best_pred = pred_block;
1835 } while (block != NULL && !Block_block_visited(block));
1837 /* now put all nodes in the worklist in our final order */
1838 while (!pdeq_empty(worklist)) {
1839 ir_node *pblock = (ir_node*)pdeq_getr(worklist);
1840 assert(order_p < n_blocks);
1841 order[order_p++] = pblock;
1844 assert(order_p == n_blocks);
1847 ir_free_resources(irg, IR_RESOURCE_BLOCK_VISITED);
1849 DEL_ARR_F(blocklist);
1851 obstack_free(&obst, NULL);
1852 obstack_init(&obst);
1854 block_order = order;
1855 n_block_order = n_blocks;
1859 * Run the register allocator for the current register class.
1861 static void be_pref_alloc_cls(void)
1863 be_assure_live_sets(irg);
1864 lv = be_get_irg_liveness(irg);
1866 ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
1868 DB((dbg, LEVEL_2, "=== Allocating registers of %s ===\n", cls->name));
1870 be_clear_links(irg);
1872 irg_block_walk_graph(irg, NULL, analyze_block, NULL);
1873 if (create_congruence_classes)
1874 combine_congruence_classes();
1876 for (size_t i = 0; i < n_block_order; ++i) {
1877 ir_node *block = block_order[i];
1878 allocate_coalesce_block(block, NULL);
1881 ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
1884 static void dump(int mask, ir_graph *irg, const char *suffix)
1886 if (be_options.dump_flags & mask)
1887 dump_ir_graph(irg, suffix);
1891 * Run the spiller on the current graph.
1893 static void spill(void)
1895 /* make sure all nodes show their real register pressure */
1896 be_timer_push(T_RA_CONSTR);
1897 be_pre_spill_prepare_constr(irg, cls);
1898 be_timer_pop(T_RA_CONSTR);
1900 dump(DUMP_RA, irg, "spillprepare");
1903 be_timer_push(T_RA_SPILL);
1904 be_do_spill(irg, cls);
1905 be_timer_pop(T_RA_SPILL);
1907 be_timer_push(T_RA_SPILL_APPLY);
1908 check_for_memory_operands(irg);
1909 be_timer_pop(T_RA_SPILL_APPLY);
1911 dump(DUMP_RA, irg, "spill");
1915 * The pref register allocator for a whole procedure.
1917 static void be_pref_alloc(ir_graph *new_irg)
1919 obstack_init(&obst);
1923 /* determine a good coloring order */
1924 determine_block_order();
1926 const arch_env_t *arch_env = be_get_irg_arch_env(new_irg);
1927 int n_cls = arch_env->n_register_classes;
1928 for (int c = 0; c < n_cls; ++c) {
1929 cls = &arch_env->register_classes[c];
1930 if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
1933 stat_ev_ctx_push_str("regcls", cls->name);
1935 n_regs = arch_register_class_n_regs(cls);
1936 normal_regs = rbitset_malloc(n_regs);
1937 be_set_allocatable_regs(irg, cls, normal_regs);
1941 /* verify schedule and register pressure */
1942 be_timer_push(T_VERIFY);
1943 if (be_options.verify_option == BE_VERIFY_WARN) {
1944 be_verify_schedule(irg);
1945 be_verify_register_pressure(irg, cls);
1946 } else if (be_options.verify_option == BE_VERIFY_ASSERT) {
1947 assert(be_verify_schedule(irg) && "Schedule verification failed");
1948 assert(be_verify_register_pressure(irg, cls)
1949 && "Register pressure verification failed");
1951 be_timer_pop(T_VERIFY);
1953 be_timer_push(T_RA_COLOR);
1954 be_pref_alloc_cls();
1955 be_timer_pop(T_RA_COLOR);
1957 /* we most probably constructed new Phis so liveness info is invalid
1959 be_invalidate_live_sets(irg);
1962 stat_ev_ctx_pop("regcls");
1965 be_timer_push(T_RA_SPILL_APPLY);
1966 be_abi_fix_stack_nodes(irg);
1967 be_timer_pop(T_RA_SPILL_APPLY);
1969 be_timer_push(T_VERIFY);
1970 if (be_options.verify_option == BE_VERIFY_WARN) {
1971 be_verify_register_allocation(irg);
1972 } else if (be_options.verify_option == BE_VERIFY_ASSERT) {
1973 assert(be_verify_register_allocation(irg)
1974 && "Register allocation invalid");
1976 be_timer_pop(T_VERIFY);
1978 obstack_free(&obst, NULL);
1981 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_pref_alloc)
1982 void be_init_pref_alloc(void)
1984 static be_ra_t be_ra_pref = {
1987 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
1988 lc_opt_entry_t *prefalloc_group = lc_opt_get_grp(be_grp, "prefalloc");
1989 lc_opt_add_table(prefalloc_group, options);
1991 be_register_allocator("pref", &be_ra_pref);
1992 FIRM_DBG_REGISTER(dbg, "firm.be.prefalloc");