2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Chordal register allocation.
23 * @author Sebastian Hack
37 #include "raw_bitset.h"
39 #include "bipartite.h"
40 #include "hungarian.h"
43 #include "irgraph_t.h"
44 #include "irprintf_t.h"
55 #include "besched_t.h"
62 #include "bestatevent.h"
64 #include "beintlive_t.h"
66 #include "bechordal_t.h"
67 #include "bechordal_draw.h"
70 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
74 #define DUMP_INTERVALS
76 typedef struct _be_chordal_alloc_env_t {
77 be_chordal_env_t *chordal_env;
79 pset *pre_colored; /**< Set of precolored nodes. */
80 bitset_t *live; /**< A liveness bitset. */
81 bitset_t *tmp_colors; /**< An auxiliary bitset which is as long as the number of colors in the class. */
82 bitset_t *colors; /**< The color mask. */
83 bitset_t *in_colors; /**< Colors used by live in values. */
84 int colors_n; /**< The number of colors. */
85 } be_chordal_alloc_env_t;
89 /* Make a fourcc for border checking. */
90 #define BORDER_FOURCC FOURCC('B', 'O', 'R', 'D')
93 static void check_border_list(struct list_head *head)
96 list_for_each_entry(border_t, x, head, list) {
97 assert(x->magic == BORDER_FOURCC);
101 static void check_heads(be_chordal_env_t *env)
104 for(ent = pmap_first(env->border_heads); ent; ent = pmap_next(env->border_heads)) {
105 /* ir_printf("checking border list of block %+F\n", ent->key); */
106 check_border_list(ent->value);
112 * Add an interval border to the list of a block's list
113 * of interval border.
114 * @note You always have to create the use before the def.
115 * @param env The environment.
116 * @param head The list head to enqueue the borders.
117 * @param irn The node (value) the border belongs to.
118 * @param pressure The pressure at this point in time.
119 * @param step A time step for the border.
120 * @param is_def Is the border a use or a def.
121 * @return The created border.
123 static INLINE border_t *border_add(be_chordal_env_t *env, struct list_head *head,
124 ir_node *irn, unsigned step, unsigned pressure,
125 unsigned is_def, unsigned is_real)
132 b = obstack_alloc(env->obst, sizeof(*b));
134 /* also allocate the def and tie it to the use. */
135 def = obstack_alloc(env->obst, sizeof(*def));
136 memset(def, 0, sizeof(*def));
141 * Set the link field of the irn to the def.
142 * This strongly relies on the fact, that the use is always
143 * made before the def.
145 set_irn_link(irn, def);
147 DEBUG_ONLY(b->magic = BORDER_FOURCC);
148 DEBUG_ONLY(def->magic = BORDER_FOURCC);
152 * If the def is encountered, the use was made and so was the
153 * the def node (see the code above). It was placed into the
154 * link field of the irn, so we can get it there.
157 b = get_irn_link(irn);
159 DEBUG_ONLY(assert(b && b->magic == BORDER_FOURCC && "Illegal border encountered"));
162 b->pressure = pressure;
164 b->is_real = is_real;
167 list_add_tail(&b->list, head);
168 DBG((dbg, LEVEL_5, "\t\t%s adding %+F, step: %d\n", is_def ? "def" : "use", irn, step));
175 * Check, if an irn is of the register class currently under processing.
176 * @param env The chordal environment.
177 * @param irn The node.
178 * @return 1, if the node is of that register class, 0 if not.
180 static INLINE int has_reg_class(const be_chordal_env_t *env, const ir_node *irn)
182 return arch_irn_consider_in_reg_alloc(env->birg->main_env->arch_env, env->cls, irn);
185 static int get_next_free_reg(const be_chordal_alloc_env_t *alloc_env, bitset_t *colors)
187 bitset_t *tmp = alloc_env->tmp_colors;
188 bitset_copy(tmp, colors);
189 bitset_or(tmp, alloc_env->chordal_env->ignore_colors);
190 return bitset_next_clear(tmp, 0);
193 static bitset_t *get_decisive_partner_regs(bitset_t *bs, const be_operand_t *o1, const be_operand_t *o2)
198 bitset_copy(bs, o2->regs);
203 bitset_copy(bs, o1->regs);
207 assert(o1->req->cls == o2->req->cls || ! o1->req->cls || ! o2->req->cls);
209 if(bitset_contains(o1->regs, o2->regs))
210 bitset_copy(bs, o1->regs);
211 else if(bitset_contains(o2->regs, o1->regs))
212 bitset_copy(bs, o2->regs);
219 static be_insn_t *chordal_scan_insn(be_chordal_env_t *env, ir_node *irn)
223 ie.ignore_colors = env->ignore_colors;
224 ie.aenv = env->birg->main_env->arch_env;
227 return be_scan_insn(&ie, irn);
230 static ir_node *prepare_constr_insn(be_chordal_env_t *env, ir_node *irn)
232 bitset_t *tmp = bitset_alloca(env->cls->n_regs);
233 bitset_t *def_constr = bitset_alloca(env->cls->n_regs);
234 ir_node *bl = get_nodes_block(irn);
235 const be_irg_t *birg = env->birg;
236 be_lv_t *lv = birg->lv;
241 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
242 ir_node *op = get_irn_n(irn, i);
244 const arch_register_t *reg;
245 const arch_register_req_t *req;
247 if (arch_get_irn_reg_class(irn, i) != env->cls)
250 reg = arch_get_irn_register(op);
252 if (reg == NULL || !arch_register_type_is(reg, ignore))
254 if(arch_register_type_is(reg, joker))
257 req = arch_get_register_req(irn, i);
258 if (!arch_register_req_is(req, limited))
261 if (rbitset_is_set(req->limited, reg->index))
264 copy = be_new_Copy(env->cls, env->irg, bl, op);
265 be_stat_ev("constr_copy", 1);
267 sched_add_before(irn, copy);
268 set_irn_n(irn, i, copy);
269 DBG((dbg, LEVEL_3, "inserting ignore arg copy %+F for %+F pos %d\n", copy, irn, i));
272 insn = chordal_scan_insn(env, irn);
274 if(!insn->has_constraints)
277 /* insert copies for nodes that occur constrained more than once. */
278 for(i = insn->use_start; i < insn->n_ops; ++i) {
279 be_operand_t *op = &insn->ops[i];
281 if(!op->has_constraints)
284 for(j = i + 1; j < insn->n_ops; ++j) {
286 be_operand_t *a_op = &insn->ops[j];
288 if(a_op->carrier != op->carrier || !a_op->has_constraints)
291 /* if the constraint is the same, no copy is necessary
292 * TODO generalise unequal but overlapping constraints */
293 if (a_op->req == op->req)
296 if (be_is_Copy(get_irn_n(insn->irn, a_op->pos)))
299 copy = be_new_Copy(env->cls, env->irg, bl, op->carrier);
300 be_stat_ev("constr_copy", 1);
302 sched_add_before(insn->irn, copy);
303 set_irn_n(insn->irn, a_op->pos, copy);
304 DBG((dbg, LEVEL_3, "inserting multiple constr copy %+F for %+F pos %d\n", copy, insn->irn, a_op->pos));
308 /* collect all registers occurring in out constraints. */
309 for(i = 0; i < insn->use_start; ++i) {
310 be_operand_t *op = &insn->ops[i];
311 if(op->has_constraints)
312 bitset_or(def_constr, op->regs);
316 insert copies for all constrained arguments living through the node
317 and being constrained to a register which also occurs in out constraints.
319 for(i = insn->use_start; i < insn->n_ops; ++i) {
321 be_operand_t *op = &insn->ops[i];
323 bitset_copy(tmp, op->regs);
324 bitset_and(tmp, def_constr);
328 1) the operand is constrained.
329 2) lives through the node.
330 3) is constrained to a register occurring in out constraints.
332 if(!op->has_constraints ||
333 !values_interfere(birg, insn->irn, op->carrier) ||
334 bitset_popcnt(tmp) == 0)
338 only create the copy if the operand is no copy.
339 this is necessary since the assure constraints phase inserts
340 Copies and Keeps for operands which must be different from the
341 results. Additional copies here would destroy this.
343 if (be_is_Copy(get_irn_n(insn->irn, op->pos)))
346 copy = be_new_Copy(env->cls, env->irg, bl, op->carrier);
348 sched_add_before(insn->irn, copy);
349 set_irn_n(insn->irn, op->pos, copy);
350 DBG((dbg, LEVEL_3, "inserting constr copy %+F for %+F pos %d\n", copy, insn->irn, op->pos));
351 be_liveness_update(lv, op->carrier);
355 obstack_free(env->obst, insn);
356 return insn->next_insn;
359 static void pre_spill_prepare_constr_walker(ir_node *bl, void *data)
361 be_chordal_env_t *env = data;
363 for(irn = sched_first(bl); !sched_is_end(irn);) {
364 irn = prepare_constr_insn(env, irn);
368 void be_pre_spill_prepare_constr(be_chordal_env_t *cenv) {
369 irg_block_walk_graph(cenv->irg, pre_spill_prepare_constr_walker, NULL, (void *) cenv);
372 static void pair_up_operands(const be_chordal_alloc_env_t *alloc_env, be_insn_t *insn)
374 const be_chordal_env_t *env = alloc_env->chordal_env;
376 int n_uses = be_insn_n_uses(insn);
377 int n_defs = be_insn_n_defs(insn);
378 bitset_t *bs = bitset_alloca(env->cls->n_regs);
379 int *pairing = alloca(MAX(n_defs, n_uses) * sizeof(pairing[0]));
384 For each out operand, try to find an in operand which can be assigned the
385 same register as the out operand.
387 for (j = 0; j < insn->use_start; ++j) {
389 int smallest_n_regs = 2 * env->cls->n_regs + 1;
390 be_operand_t *out_op = &insn->ops[j];
392 /* Try to find an in operand which has ... */
393 for(i = insn->use_start; i < insn->n_ops; ++i) {
395 const be_operand_t *op = &insn->ops[i];
397 if (op->partner != NULL)
399 if (values_interfere(env->birg, op->irn, op->carrier))
402 bitset_clear_all(bs);
403 bitset_copy(bs, op->regs);
404 bitset_and(bs, out_op->regs);
405 n_total = bitset_popcnt(op->regs) + bitset_popcnt(out_op->regs);
407 if (bitset_popcnt(bs) > 0 && n_total < smallest_n_regs) {
409 smallest_n_regs = n_total;
414 be_operand_t *partner = &insn->ops[smallest];
415 for(i = insn->use_start; i < insn->n_ops; ++i) {
416 if(insn->ops[i].carrier == partner->carrier)
417 insn->ops[i].partner = out_op;
420 out_op->partner = partner;
421 partner->partner = out_op;
427 static ir_node *pre_process_constraints(be_chordal_alloc_env_t *alloc_env,
428 be_insn_t **the_insn)
430 be_chordal_env_t *env = alloc_env->chordal_env;
431 be_insn_t *insn = *the_insn;
432 ir_node *perm = NULL;
433 bitset_t *out_constr = bitset_alloca(env->cls->n_regs);
434 const ir_edge_t *edge;
437 assert(insn->has_constraints && "only do this for constrained nodes");
440 Collect all registers that occur in output constraints.
441 This is necessary, since if the insn has one of these as an input constraint
442 and the corresponding operand interferes with the insn, the operand must
445 for(i = 0; i < insn->use_start; ++i) {
446 be_operand_t *op = &insn->ops[i];
447 if(op->has_constraints)
448 bitset_or(out_constr, op->regs);
452 Make the Perm, recompute liveness and re-scan the insn since the
453 in operands are now the Projs of the Perm.
455 perm = insert_Perm_after(env->birg, env->cls, sched_prev(insn->irn));
457 /* Registers are propagated by insert_Perm_after(). Clean them here! */
461 be_stat_ev("constr_perm", get_irn_arity(perm));
462 foreach_out_edge(perm, edge) {
463 ir_node *proj = get_edge_src_irn(edge);
464 arch_set_irn_register(proj, NULL);
468 We also have to re-build the insn since the input operands are now the Projs of
469 the Perm. Recomputing liveness is also a good idea if a Perm is inserted, since
470 the live sets may change.
472 obstack_free(env->obst, insn);
473 *the_insn = insn = chordal_scan_insn(env, insn->irn);
476 Copy the input constraints of the insn to the Perm as output
477 constraints. Succeeding phases (coalescing) will need that.
479 for(i = insn->use_start; i < insn->n_ops; ++i) {
480 be_operand_t *op = &insn->ops[i];
481 ir_node *proj = op->carrier;
483 Note that the predecessor must not be a Proj of the Perm,
484 since ignore-nodes are not Perm'ed.
486 if(op->has_constraints && is_Proj(proj) && get_Proj_pred(proj) == perm) {
487 be_set_constr_limited(perm, BE_OUT_POS(get_Proj_proj(proj)), op->req);
494 static ir_node *handle_constraints(be_chordal_alloc_env_t *alloc_env,
495 ir_node *irn, int *silent)
499 ir_node **alloc_nodes;
500 //hungarian_problem_t *bp;
505 const ir_edge_t *edge;
506 ir_node *perm = NULL;
507 //int match_res, cost;
508 be_chordal_env_t *env = alloc_env->chordal_env;
509 void *base = obstack_base(env->obst);
510 be_insn_t *insn = chordal_scan_insn(env, irn);
511 ir_node *res = insn->next_insn;
512 int be_silent = *silent;
513 be_irg_t *birg = env->birg;
516 if(insn->pre_colored) {
518 for(i = 0; i < insn->use_start; ++i)
519 pset_insert_ptr(alloc_env->pre_colored, insn->ops[i].carrier);
523 If the current node is a barrier toggle the silent flag.
524 If we are in the start block, we are ought to be silent at the beginning,
525 so the toggling activates the constraint handling but skips the barrier.
526 If we are in the end block we handle the in requirements of the barrier
527 and set the rest to silent.
529 if(be_is_Barrier(irn))
536 Perms inserted before the constraint handling phase are considered to be
537 correctly precolored. These Perms arise during the ABI handling phase.
539 if(!insn->has_constraints)
542 n_regs = env->cls->n_regs;
543 bs = bitset_alloca(n_regs);
544 alloc_nodes = alloca(n_regs * sizeof(alloc_nodes[0]));
545 //bp = hungarian_new(n_regs, n_regs, 2, HUNGARIAN_MATCH_PERFECT);
546 bp = bipartite_new(n_regs, n_regs);
547 assignment = alloca(n_regs * sizeof(assignment[0]));
548 partners = pmap_create();
551 prepare the constraint handling of this node.
552 Perms are constructed and Copies are created for constrained values
553 interfering with the instruction.
555 perm = pre_process_constraints(alloc_env, &insn);
557 /* find suitable in operands to the out operands of the node. */
558 pair_up_operands(alloc_env, insn);
561 look at the in/out operands and add each operand (and its possible partner)
562 to a bipartite graph (left: nodes with partners, right: admissible colors).
564 for(i = 0, n_alloc = 0; i < insn->n_ops; ++i) {
565 be_operand_t *op = &insn->ops[i];
568 If the operand has no partner or the partner has not been marked
569 for allocation, determine the admissible registers and mark it
570 for allocation by associating the node and its partner with the
571 set of admissible registers via a bipartite graph.
573 if(!op->partner || !pmap_contains(partners, op->partner->carrier)) {
574 ir_node *partner = op->partner ? op->partner->carrier : NULL;
577 pmap_insert(partners, op->carrier, partner);
579 pmap_insert(partners, partner, op->carrier);
581 /* don't insert a node twice */
582 for(i = 0; i < n_alloc; ++i) {
583 if(alloc_nodes[i] == op->carrier) {
590 alloc_nodes[n_alloc] = op->carrier;
592 DBG((dbg, LEVEL_2, "\tassociating %+F and %+F\n", op->carrier,
595 bitset_clear_all(bs);
596 get_decisive_partner_regs(bs, op, op->partner);
598 DBG((dbg, LEVEL_2, "\tallowed registers for %+F: %B\n", op->carrier,
601 bitset_foreach(bs, col) {
602 //hungarian_add(bp, n_alloc, col, 1);
603 bipartite_add(bp, n_alloc, col);
611 Put all nodes which live through the constrained instruction also to the
612 allocation bipartite graph. They are considered unconstrained.
615 foreach_out_edge(perm, edge) {
617 ir_node *proj = get_edge_src_irn(edge);
619 assert(is_Proj(proj));
621 if(!values_interfere(birg, proj, irn) || pmap_contains(partners, proj))
624 /* don't insert a node twice */
625 for(i = 0; i < n_alloc; ++i) {
626 if(alloc_nodes[i] == proj) {
634 assert(n_alloc < n_regs);
636 alloc_nodes[n_alloc] = proj;
637 pmap_insert(partners, proj, NULL);
639 bitset_clear_all(bs);
640 arch_put_non_ignore_regs(env->cls, bs);
641 bitset_andnot(bs, env->ignore_colors);
642 bitset_foreach(bs, col) {
643 //hungarian_add(bp, n_alloc, col, 1);
644 bipartite_add(bp, n_alloc, col);
651 /* Compute a valid register allocation. */
653 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
654 match_res = hungarian_solve(bp, assignment, &cost, 1);
655 assert(match_res == 0 && "matching failed");
657 bipartite_matching(bp, assignment);
660 /* Assign colors obtained from the matching. */
661 for(i = 0; i < n_alloc; ++i) {
662 const arch_register_t *reg;
665 assert(assignment[i] >= 0 && "there must have been a register assigned");
666 reg = arch_register_for_index(env->cls, assignment[i]);
667 assert(! (reg->type & arch_register_type_ignore));
669 irn = alloc_nodes[i];
671 arch_set_irn_register(irn, reg);
672 (void) pset_hinsert_ptr(alloc_env->pre_colored, irn);
673 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", irn, reg->name));
676 irn = pmap_get(partners, alloc_nodes[i]);
678 arch_set_irn_register(irn, reg);
679 (void) pset_hinsert_ptr(alloc_env->pre_colored, irn);
680 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", irn, reg->name));
684 /* Allocate the non-constrained Projs of the Perm. */
686 bitset_clear_all(bs);
688 /* Put the colors of all Projs in a bitset. */
689 foreach_out_edge(perm, edge) {
690 ir_node *proj = get_edge_src_irn(edge);
691 const arch_register_t *reg = arch_get_irn_register(proj);
694 bitset_set(bs, reg->index);
697 /* Assign the not yet assigned Projs of the Perm a suitable color. */
698 foreach_out_edge(perm, edge) {
699 ir_node *proj = get_edge_src_irn(edge);
700 const arch_register_t *reg = arch_get_irn_register(proj);
702 DBG((dbg, LEVEL_2, "\tchecking reg of %+F: %s\n", proj, reg ? reg->name : "<none>"));
705 col = get_next_free_reg(alloc_env, bs);
706 reg = arch_register_for_index(env->cls, col);
707 bitset_set(bs, reg->index);
708 arch_set_irn_register(proj, reg);
709 pset_insert_ptr(alloc_env->pre_colored, proj);
710 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", proj, reg->name));
716 //hungarian_free(bp);
717 pmap_destroy(partners);
720 obstack_free(env->obst, base);
725 * Handle constraint nodes in each basic block.
726 * handle_constraints() inserts Perm nodes which perm
727 * over all values live at the constrained node right in front
728 * of the constrained node. These Perms signal a constrained node.
729 * For further comments, refer to handle_constraints().
731 static void constraints(ir_node *bl, void *data)
733 be_chordal_alloc_env_t *env = data;
736 Start silent in the start block.
737 The silence remains until the first barrier is seen.
738 Each other block is begun loud.
740 int silent = bl == get_irg_start_block(get_irn_irg(bl));
744 If the block is the start block search the barrier and
745 start handling constraints from there.
748 for(irn = sched_first(bl); !sched_is_end(irn);) {
749 irn = handle_constraints(env, irn, &silent);
754 * Annotate the register pressure to the nodes and compute
755 * the liveness intervals.
756 * @param block The block to do it for.
757 * @param env_ptr The environment.
759 static void pressure(ir_node *block, void *env_ptr)
761 /* Convenience macro for a def */
762 #define border_def(irn, step, real) \
763 border_add(env, head, irn, step, pressure--, 1, real)
765 /* Convenience macro for a use */
766 #define border_use(irn, step, real) \
767 border_add(env, head, irn, step, ++pressure, 0, real)
769 be_chordal_alloc_env_t *alloc_env = env_ptr;
770 be_chordal_env_t *env = alloc_env->chordal_env;
771 bitset_t *live = alloc_env->live;
773 be_lv_t *lv = env->birg->lv;
778 unsigned pressure = 0;
779 struct list_head *head;
781 DBG((dbg, LEVEL_1, "Computing pressure in block %+F\n", block));
782 bitset_clear_all(live);
784 /* Set up the border list in the block info */
785 head = obstack_alloc(env->obst, sizeof(*head));
786 INIT_LIST_HEAD(head);
787 assert(pmap_get(env->border_heads, block) == NULL);
788 pmap_insert(env->border_heads, block, head);
791 * Make final uses of all values live out of the block.
792 * They are necessary to build up real intervals.
794 be_lv_foreach(lv, block, be_lv_state_end, i) {
795 ir_node *irn = be_lv_get_irn(lv, block, i);
796 if(has_reg_class(env, irn)) {
797 DBG((dbg, LEVEL_3, "\tMaking live: %+F/%d\n", irn, get_irn_idx(irn)));
798 bitset_set(live, get_irn_idx(irn));
799 border_use(irn, step, 0);
805 * Determine the last uses of a value inside the block, since they are
806 * relevant for the interval borders.
808 sched_foreach_reverse(block, irn) {
809 DBG((dbg, LEVEL_1, "\tinsn: %+F, pressure: %d\n", irn, pressure));
810 DBG((dbg, LEVEL_2, "\tlive: %B\n", live));
812 if (get_irn_mode(irn) == mode_T) {
813 const ir_edge_t *edge;
815 foreach_out_edge(irn, edge) {
816 ir_node *proj = get_edge_src_irn(edge);
819 * If the node defines some value, which can put into a
820 * register of the current class, make a border for it.
822 if(has_reg_class(env, proj)) {
823 int nr = get_irn_idx(proj);
825 bitset_clear(live, nr);
826 border_def(proj, step, 1);
832 * If the node defines some value, which can put into a
833 * register of the current class, make a border for it.
835 if(has_reg_class(env, irn)) {
836 int nr = get_irn_idx(irn);
838 bitset_clear(live, nr);
839 border_def(irn, step, 1);
843 * If the node is no phi node we can examine the uses.
846 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
847 ir_node *op = get_irn_n(irn, i);
849 if(has_reg_class(env, op)) {
850 int nr = get_irn_idx(op);
851 const char *msg = "-";
853 if(!bitset_is_set(live, nr)) {
854 border_use(op, step, 1);
855 bitset_set(live, nr);
859 DBG((dbg, LEVEL_4, "\t\t%s pos: %d, use: %+F\n", msg, i, op));
866 bitset_foreach(live, elm) {
867 ir_node *irn = get_idx_irn(env->irg, elm);
868 if (be_is_live_in(lv, block, irn))
869 border_def(irn, step, 0);
873 static void assign(ir_node *block, void *env_ptr)
875 be_chordal_alloc_env_t *alloc_env = env_ptr;
876 be_chordal_env_t *env = alloc_env->chordal_env;
877 bitset_t *live = alloc_env->live;
878 bitset_t *colors = alloc_env->colors;
879 bitset_t *in_colors = alloc_env->in_colors;
880 const arch_env_t *arch_env = env->birg->main_env->arch_env;
881 struct list_head *head = get_block_border_head(env, block);
882 be_lv_t *lv = env->birg->lv;
888 bitset_clear_all(colors);
889 bitset_clear_all(live);
890 bitset_clear_all(in_colors);
892 DBG((dbg, LEVEL_4, "Assigning colors for block %+F\n", block));
893 DBG((dbg, LEVEL_4, "\tusedef chain for block\n"));
894 list_for_each_entry(border_t, b, head, list) {
895 DBG((dbg, LEVEL_4, "\t%s %+F/%d\n", b->is_def ? "def" : "use",
896 b->irn, get_irn_idx(b->irn)));
900 * Add initial defs for all values live in.
901 * Since their colors have already been assigned (The dominators were
902 * allocated before), we have to mark their colors as used also.
904 be_lv_foreach(lv, block, be_lv_state_in, idx) {
905 irn = be_lv_get_irn(lv, block, idx);
906 if(has_reg_class(env, irn)) {
907 const arch_register_t *reg = arch_get_irn_register(irn);
910 assert(reg && "Node must have been assigned a register");
911 col = arch_register_get_index(reg);
913 DBG((dbg, LEVEL_4, "%+F has reg %s\n", irn, reg->name));
915 /* Mark the color of the live in value as used. */
916 bitset_set(colors, col);
917 bitset_set(in_colors, col);
919 /* Mark the value live in. */
920 bitset_set(live, get_irn_idx(irn));
925 * Mind that the sequence of defs from back to front defines a perfect
926 * elimination order. So, coloring the definitions from first to last
929 list_for_each_entry_reverse(border_t, b, head, list) {
930 ir_node *irn = b->irn;
931 int nr = get_irn_idx(irn);
932 int ignore = arch_irn_is(arch_env, irn, ignore);
935 * Assign a color, if it is a local def. Global defs already have a
938 if(b->is_def && !be_is_live_in(lv, block, irn)) {
939 const arch_register_t *reg;
942 if(ignore || pset_find_ptr(alloc_env->pre_colored, irn)) {
943 reg = arch_get_irn_register(irn);
945 assert(!bitset_is_set(colors, col) && "pre-colored register must be free");
947 col = get_next_free_reg(alloc_env, colors);
948 reg = arch_register_for_index(env->cls, col);
949 assert(arch_get_irn_register(irn) == NULL && "This node must not have been assigned a register yet");
950 assert(!arch_register_type_is(reg, ignore) && "Must not assign ignore register");
953 bitset_set(colors, col);
954 arch_set_irn_register(irn, reg);
956 DBG((dbg, LEVEL_1, "\tassigning register %s(%d) to %+F\n", arch_register_get_name(reg), col, irn));
958 assert(!bitset_is_set(live, nr) && "Value's definition must not have been encountered");
959 bitset_set(live, nr);
962 /* Clear the color upon a use. */
963 else if(!b->is_def) {
964 const arch_register_t *reg = arch_get_irn_register(irn);
967 assert(reg && "Register must have been assigned");
969 col = arch_register_get_index(reg);
971 if(!arch_register_type_is(reg, ignore)) {
972 assert(bitset_is_set(live, nr) && "Cannot have a non live use");
976 bitset_clear(colors, col);
977 bitset_clear(live, nr);
982 void be_ra_chordal_color(be_chordal_env_t *chordal_env)
984 be_chordal_alloc_env_t env;
987 be_irg_t *birg = chordal_env->birg;
988 const arch_register_class_t *cls = chordal_env->cls;
990 int colors_n = arch_register_class_n_regs(cls);
991 ir_graph *irg = chordal_env->irg;
993 lv = be_assure_liveness(birg);
994 be_liveness_assure_sets(lv);
995 be_liveness_assure_chk(lv);
999 env.chordal_env = chordal_env;
1000 env.colors_n = colors_n;
1001 env.colors = bitset_alloca(colors_n);
1002 env.tmp_colors = bitset_alloca(colors_n);
1003 env.in_colors = bitset_alloca(colors_n);
1004 env.pre_colored = pset_new_ptr_default();
1006 BE_TIMER_PUSH(t_constr);
1008 /* Handle register targeting constraints */
1009 dom_tree_walk_irg(irg, constraints, NULL, &env);
1011 if(chordal_env->opts->dump_flags & BE_CH_DUMP_CONSTR) {
1012 snprintf(buf, sizeof(buf), "-%s-constr", chordal_env->cls->name);
1013 be_dump(chordal_env->irg, buf, dump_ir_block_graph_sched);
1016 BE_TIMER_POP(t_constr);
1018 env.live = bitset_malloc(get_irg_last_idx(chordal_env->irg));
1020 /* First, determine the pressure */
1021 dom_tree_walk_irg(irg, pressure, NULL, &env);
1023 /* Assign the colors */
1024 dom_tree_walk_irg(irg, assign, NULL, &env);
1026 if(chordal_env->opts->dump_flags & BE_CH_DUMP_TREE_INTV) {
1028 ir_snprintf(buf, sizeof(buf), "ifg_%s_%F.eps", chordal_env->cls->name, irg);
1029 plotter = new_plotter_ps(buf);
1030 draw_interval_tree(&draw_chordal_def_opts, chordal_env, plotter);
1031 plotter_free(plotter);
1034 bitset_free(env.live);
1035 del_pset(env.pre_colored);
1038 void be_init_chordal(void)
1040 FIRM_DBG_REGISTER(dbg, "firm.be.chordal");
1043 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_chordal);