2 * Chordal register allocation.
3 * @author Sebastian Hack
7 * Copyright (C) Universitaet Karlsruhe
8 * Released under the GPL
30 #include "bipartite.h"
33 #include "irgraph_t.h"
34 #include "irprintf_t.h"
45 #include "besched_t.h"
53 #include "bechordal_t.h"
54 #include "bechordal_draw.h"
56 #define DBG_LEVEL SET_LEVEL_0
57 #define DBG_LEVEL_CHECK SET_LEVEL_0
61 #define DUMP_INTERVALS
63 typedef struct _be_chordal_alloc_env_t {
64 be_chordal_env_t *chordal_env;
66 pset *pre_colored; /**< Set of precolored nodes. */
67 bitset_t *live; /**< A liveness bitset. */
68 bitset_t *tmp_colors; /**< An auxiliary bitset which is as long as the number of colors in the class. */
69 bitset_t *colors; /**< The color mask. */
70 bitset_t *in_colors; /**< Colors used by live in values. */
71 int colors_n; /**< The number of colors. */
72 DEBUG_ONLY(firm_dbg_module_t *constr_dbg;) /**< Debug output for the constraint handler. */
73 } be_chordal_alloc_env_t;
77 /* Make a fourcc for border checking. */
78 #define BORDER_FOURCC FOURCC('B', 'O', 'R', 'D')
81 static void check_border_list(struct list_head *head)
84 list_for_each_entry(border_t, x, head, list) {
85 assert(x->magic == BORDER_FOURCC);
89 static void check_heads(be_chordal_env_t *env)
92 for(ent = pmap_first(env->border_heads); ent; ent = pmap_next(env->border_heads)) {
93 /* ir_printf("checking border list of block %+F\n", ent->key); */
94 check_border_list(ent->value);
100 * Add an interval border to the list of a block's list
101 * of interval border.
102 * @note You always have to create the use before the def.
103 * @param env The environment.
104 * @param head The list head to enqueue the borders.
105 * @param irn The node (value) the border belongs to.
106 * @param pressure The pressure at this point in time.
107 * @param step A time step for the border.
108 * @param is_def Is the border a use or a def.
109 * @return The created border.
111 static INLINE border_t *border_add(be_chordal_env_t *env, struct list_head *head,
112 ir_node *irn, unsigned step, unsigned pressure,
113 unsigned is_def, unsigned is_real)
120 b = obstack_alloc(&env->obst, sizeof(*b));
122 /* also allocate the def and tie it to the use. */
123 def = obstack_alloc(&env->obst, sizeof(*def));
124 memset(def, 0, sizeof(*def));
129 * Set the link field of the irn to the def.
130 * This strongly relies on the fact, that the use is always
131 * made before the def.
133 set_irn_link(irn, def);
135 b->magic = BORDER_FOURCC;
136 def->magic = BORDER_FOURCC;
140 * If the def is encountered, the use was made and so was the
141 * the def node (see the code above). It was placed into the
142 * link field of the irn, so we can get it there.
145 b = get_irn_link(irn);
147 assert(b && b->magic == BORDER_FOURCC && "Illegal border encountered");
150 b->pressure = pressure;
152 b->is_real = is_real;
155 list_add_tail(&b->list, head);
156 DBG((env->dbg, LEVEL_5, "\t\t%s adding %+F, step: %d\n", is_def ? "def" : "use", irn, step));
163 * Check, if an irn is of the register class currently under processing.
164 * @param env The chordal environment.
165 * @param irn The node.
166 * @return 1, if the node is of that register class, 0 if not.
168 static INLINE int has_reg_class(const be_chordal_env_t *env, const ir_node *irn)
170 return arch_irn_has_reg_class(env->birg->main_env->arch_env, irn, -1, env->cls);
171 // return arch_irn_consider_in_reg_alloc(env->birg->main_env->arch_env, env->cls, irn);
174 #define has_limited_constr(req, irn) \
175 (arch_get_register_req(arch_env, (req), irn, -1) && (req)->type == arch_register_req_type_limited)
177 static int get_next_free_reg(const be_chordal_alloc_env_t *alloc_env, bitset_t *colors)
179 bitset_t *tmp = alloc_env->tmp_colors;
180 bitset_copy(tmp, colors);
181 bitset_or(tmp, alloc_env->chordal_env->ignore_colors);
182 return bitset_next_clear(tmp, 0);
185 static bitset_t *get_decisive_partner_regs(bitset_t *bs, const be_operand_t *o1, const be_operand_t *o2)
190 bitset_copy(bs, o2->regs);
195 bitset_copy(bs, o1->regs);
199 assert(o1->req.cls == o2->req.cls);
201 if(bitset_contains(o1->regs, o2->regs))
202 bitset_copy(bs, o1->regs);
203 else if(bitset_contains(o2->regs, o1->regs))
204 bitset_copy(bs, o2->regs);
211 static be_insn_t *chordal_scan_insn(be_chordal_env_t *env, ir_node *irn)
215 ie.ignore_colors = env->ignore_colors;
216 ie.aenv = env->birg->main_env->arch_env;
217 ie.obst = &env->obst;
219 return be_scan_insn(&ie, irn);
222 static ir_node *prepare_constr_insn(be_chordal_env_t *env, ir_node *irn)
224 be_insn_t *insn = chordal_scan_insn(env, irn);
225 bitset_t *def_constr = bitset_alloca(env->cls->n_regs);
226 bitset_t *tmp = bitset_alloca(env->cls->n_regs);
227 ir_node *bl = get_nodes_block(insn->irn);
230 if(!insn->has_constraints)
233 /* insert copies for nodes that occur constrained more than once. */
234 for(i = insn->use_start; i < insn->n_ops; ++i) {
235 be_operand_t *op = &insn->ops[i];
237 if(op->has_constraints) {
238 for(j = i + 1; j < insn->n_ops; ++j) {
239 be_operand_t *a_op = &insn->ops[j];
241 if(a_op->carrier == op->carrier && a_op->has_constraints) {
242 ir_node *copy = be_new_Copy(env->cls, env->irg, bl, op->carrier);
244 sched_add_before(insn->irn, copy);
245 set_irn_n(insn->irn, a_op->pos, copy);
246 DBG((env->dbg, LEVEL_3, "inserting multiple constr copy %+F for %+F pos %d\n", copy, insn->irn, a_op->pos));
252 /* collect all registers occuring in out constraints. */
253 for(i = 0; i < insn->use_start; ++i) {
254 be_operand_t *op = &insn->ops[i];
255 if(op->has_constraints)
256 bitset_or(def_constr, op->regs);
260 insert copies for all constrained arguments living through the node
261 and being constrained to a register which also occurs in out constraints.
263 for(i = insn->use_start; i < insn->n_ops; ++i) {
264 be_operand_t *op = &insn->ops[i];
266 bitset_copy(tmp, op->regs);
267 bitset_and(tmp, def_constr);
271 1) the operand is constrained.
272 2) lives through the node.
273 3) is constrained to a register occuring in out constraints.
275 if(op->has_constraints && values_interfere(env->lv, insn->irn, op->carrier) && bitset_popcnt(tmp) > 0) {
277 only create the copy if the operand is no copy.
278 this is necessary since the assure constraints phase inserts
279 Copies and Keeps for operands which must be different from the results.
280 Additional copies here would destroy this.
282 if(!be_is_Copy(op->carrier)) {
283 ir_node *copy = be_new_Copy(env->cls, env->irg, bl, op->carrier);
285 sched_add_before(insn->irn, copy);
286 set_irn_n(insn->irn, op->pos, copy);
287 DBG((env->dbg, LEVEL_3, "inserting constr copy %+F for %+F pos %d\n", copy, insn->irn, op->pos));
288 be_liveness_update(env->lv, op->carrier);
294 obstack_free(&env->obst, insn);
295 return insn->next_insn;
298 static void pre_spill_prepare_constr_walker(ir_node *bl, void *data)
300 be_chordal_env_t *env = data;
302 for(irn = sched_first(bl); !sched_is_end(irn);) {
303 irn = prepare_constr_insn(env, irn);
307 void be_pre_spill_prepare_constr(be_chordal_env_t *cenv) {
308 irg_block_walk_graph(cenv->irg, pre_spill_prepare_constr_walker, NULL, (void *) cenv);
311 static void pair_up_operands(const be_chordal_alloc_env_t *alloc_env, be_insn_t *insn)
313 const be_chordal_env_t *env = alloc_env->chordal_env;
315 int n_uses = be_insn_n_uses(insn);
316 int n_defs = be_insn_n_defs(insn);
317 bitset_t *bs = bitset_alloca(env->cls->n_regs);
318 int *pairing = alloca(MAX(n_defs, n_uses) * sizeof(pairing[0]));
323 For each out operand, try to find an in operand which can be assigned the
324 same register as the out operand.
326 for (j = 0; j < insn->use_start; ++j) {
328 int smallest_n_regs = 2 * env->cls->n_regs + 1;
329 be_operand_t *out_op = &insn->ops[j];
331 /* Try to find an in operand which has ... */
332 for(i = insn->use_start; i < insn->n_ops; ++i) {
334 const be_operand_t *op = &insn->ops[i];
336 if (! values_interfere(env->lv, op->irn, op->carrier) && ! op->partner) {
337 bitset_clear_all(bs);
338 bitset_copy(bs, op->regs);
339 bitset_and(bs, out_op->regs);
340 n_total = bitset_popcnt(op->regs) + bitset_popcnt(out_op->regs);
342 if (bitset_popcnt(bs) > 0 && n_total < smallest_n_regs) {
344 smallest_n_regs = n_total;
350 be_operand_t *partner = &insn->ops[smallest];
351 out_op->partner = partner;
352 partner->partner = out_op;
358 static ir_node *pre_process_constraints(be_chordal_alloc_env_t *alloc_env, be_insn_t **the_insn)
360 be_chordal_env_t *env = alloc_env->chordal_env;
361 const arch_env_t *aenv = env->birg->main_env->arch_env;
362 be_insn_t *insn = *the_insn;
363 ir_node *bl = get_nodes_block(insn->irn);
364 ir_node *copy = NULL;
365 ir_node *perm = NULL;
366 bitset_t *out_constr = bitset_alloca(env->cls->n_regs);
367 bitset_t *bs = bitset_alloca(env->cls->n_regs);
368 DEBUG_ONLY(firm_dbg_module_t *dbg = alloc_env->constr_dbg;)
372 assert(insn->has_constraints && "only do this for constrained nodes");
375 Collect all registers that occur in output constraints.
376 This is necessary, since if the insn has one of these as an input constraint
377 and the corresponding operand interferes with the insn, the operand must
380 for(i = 0; i < insn->use_start; ++i) {
381 be_operand_t *op = &insn->ops[i];
382 if(op->has_constraints)
383 bitset_or(out_constr, op->regs);
387 Now, figure out which input operand must be copied since it has input
388 constraints which are also output constraints.
395 for(i = insn->use_start; i < insn->n_ops; ++i) {
396 be_operand_t *op = &insn->ops[i];
397 if(op->has_constraints && (values_interfere(env->lv, op->carrier, insn->irn) || arch_irn_is(aenv, op->carrier, ignore))) {
398 bitset_copy(bs, op->regs);
399 bitset_and(bs, out_constr);
402 The operand (interfering with the node) has input constraints
403 which also occur as output constraints, so insert a copy.
405 if(bitset_popcnt(bs) > 0) {
406 copy = be_new_Copy(op->req.cls, env->irg, bl, op->carrier);
408 sched_add_before(insn->irn, copy);
409 set_irn_n(insn->irn, op->pos, op->carrier);
411 DBG((dbg, LEVEL_2, "adding copy for interfering and constrained op %+F\n", op->carrier));
418 Make the Perm, recompute liveness and re-scan the insn since the
419 in operands are now the Projs of the Perm.
421 perm = insert_Perm_after(aenv, env->lv, env->cls, env->dom_front, sched_prev(insn->irn));
423 /* Registers are propagated by insert_Perm_after(). Clean them here! */
425 const ir_edge_t *edge;
427 foreach_out_edge(perm, edge) {
428 ir_node *proj = get_edge_src_irn(edge);
429 arch_set_irn_register(aenv, proj, NULL);
433 We also have to re-build the insn since the input operands are now the Projs of
434 the Perm. Recomputing liveness is also a good idea if a Perm is inserted, since
435 the live sets may change.
437 // be_liveness_recompute(env->lv);
438 obstack_free(&env->obst, insn);
439 *the_insn = insn = chordal_scan_insn(env, insn->irn);
442 Copy the input constraints of the insn to the Perm as output
443 constraints. Succeeding phases (coalescing will need that).
445 for(i = insn->use_start; i < insn->n_ops; ++i) {
446 be_operand_t *op = &insn->ops[i];
447 ir_node *proj = op->carrier;
449 Note that the predecessor must not be a Proj of the Perm,
450 since ignore-nodes are not Perm'ed.
452 if(op->has_constraints && is_Proj(proj) && get_Proj_pred(proj) == perm) {
453 be_set_constr_limited(perm, BE_OUT_POS(get_Proj_proj(proj)), &op->req);
461 static ir_node *handle_constraints(be_chordal_alloc_env_t *alloc_env, ir_node *irn, int *silent)
463 be_chordal_env_t *env = alloc_env->chordal_env;
464 void *base = obstack_base(&env->obst);
465 be_insn_t *insn = chordal_scan_insn(env, irn);
466 ir_node *res = insn->next_insn;
467 int be_silent = *silent;
469 if(insn->pre_colored) {
471 for(i = 0; i < insn->use_start; ++i)
472 pset_insert_ptr(alloc_env->pre_colored, insn->ops[i].carrier);
476 If the current node is a barrier toggle the silent flag.
477 If we are in the start block, we are ought to be silent at the beginning,
478 so the toggling activates the constraint handling but skips the barrier.
479 If we are in the end block we handle the in requirements of the barrier
480 and set the rest to silent.
482 if(be_is_Barrier(irn))
489 Perms inserted before the constraint handling phase are considered to be
490 correctly precolored. These Perms arise during the ABI handling phase.
492 if(insn->has_constraints) {
493 const arch_env_t *aenv = env->birg->main_env->arch_env;
494 int n_regs = env->cls->n_regs;
495 bitset_t *bs = bitset_alloca(n_regs);
496 ir_node **alloc_nodes = alloca(n_regs * sizeof(alloc_nodes[0]));
497 bipartite_t *bp = bipartite_new(n_regs, n_regs);
498 int *assignment = alloca(n_regs * sizeof(assignment[0]));
499 pmap *partners = pmap_create();
500 DEBUG_ONLY(firm_dbg_module_t *dbg = alloc_env->constr_dbg;)
504 const ir_edge_t *edge;
505 ir_node *perm = NULL;
508 prepare the constraint handling of this node.
509 Perms are constructed and Copies are created for constrained values
510 interfering with the instruction.
512 perm = pre_process_constraints(alloc_env, &insn);
514 /* find suitable in operands to the out operands of the node. */
515 pair_up_operands(alloc_env, insn);
518 look at the in/out operands and add each operand (and its possible partner)
519 to a bipartite graph (left: nodes with partners, right: admissible colors).
521 for(i = 0, n_alloc = 0; i < insn->n_ops; ++i) {
522 be_operand_t *op = &insn->ops[i];
525 If the operand has no partner or the partner has not been marked
526 for allocation, determine the admissible registers and mark it
527 for allocation by associating the node and its partner with the
528 set of admissible registers via a bipartite graph.
530 if(!op->partner || !pmap_contains(partners, op->partner->carrier)) {
532 pmap_insert(partners, op->carrier, op->partner ? op->partner->carrier : NULL);
533 alloc_nodes[n_alloc] = op->carrier;
535 DBG((dbg, LEVEL_2, "\tassociating %+F and %+F\n", op->carrier, op->partner ? op->partner->carrier : NULL));
537 bitset_clear_all(bs);
538 get_decisive_partner_regs(bs, op, op->partner);
540 DBG((dbg, LEVEL_2, "\tallowed registers for %+F: %B\n", op->carrier, bs));
542 bitset_foreach(bs, col)
543 bipartite_add(bp, n_alloc, col);
550 Put all nodes which live by the constrained instruction also to the
551 allocation bipartite graph. They are considered unconstrained.
554 foreach_out_edge(perm, edge) {
555 ir_node *proj = get_edge_src_irn(edge);
557 assert(is_Proj(proj));
559 if(values_interfere(env->lv, proj, irn) && !pmap_contains(partners, proj)) {
560 assert(n_alloc < n_regs);
561 alloc_nodes[n_alloc] = proj;
562 pmap_insert(partners, proj, NULL);
564 bitset_clear_all(bs);
565 arch_put_non_ignore_regs(aenv, env->cls, bs);
566 bitset_foreach(bs, col)
567 bipartite_add(bp, n_alloc, col);
574 /* Compute a valid register allocation. */
575 bipartite_matching(bp, assignment);
577 /* Assign colors obtained from the matching. */
578 for(i = 0; i < n_alloc; ++i) {
579 const arch_register_t *reg;
583 assert(assignment[i] >= 0 && "there must have been a register assigned");
584 reg = arch_register_for_index(env->cls, assignment[i]);
586 nodes[0] = alloc_nodes[i];
587 nodes[1] = pmap_get(partners, alloc_nodes[i]);
589 for(j = 0; j < 2; ++j) {
593 arch_set_irn_register(aenv, nodes[j], reg);
594 pset_hinsert_ptr(alloc_env->pre_colored, nodes[j]);
595 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", nodes[j], reg->name));
600 /* Allocate the non-constrained Projs of the Perm. */
603 bitset_clear_all(bs);
605 /* Put the colors of all Projs in a bitset. */
606 foreach_out_edge(perm, edge) {
607 ir_node *proj = get_edge_src_irn(edge);
608 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
611 bitset_set(bs, reg->index);
614 /* Assign the not yet assigned Projs of the Perm a suitable color. */
615 foreach_out_edge(perm, edge) {
616 ir_node *proj = get_edge_src_irn(edge);
617 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
619 DBG((dbg, LEVEL_2, "\tchecking reg of %+F: %s\n", proj, reg ? reg->name : "<none>"));
622 col = get_next_free_reg(alloc_env, bs);
623 reg = arch_register_for_index(env->cls, col);
624 bitset_set(bs, reg->index);
625 arch_set_irn_register(aenv, proj, reg);
626 pset_insert_ptr(alloc_env->pre_colored, proj);
627 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", proj, reg->name));
633 pmap_destroy(partners);
637 obstack_free(&env->obst, base);
642 * Handle constraint nodes in each basic block.
643 * handle_constraints() inserts Perm nodes which perm
644 * over all values live at the constrained node right in front
645 * of the constrained node. These Perms signal a constrained node.
646 * For further comments, refer to handle_constraints().
648 static void constraints(ir_node *bl, void *data)
650 be_chordal_alloc_env_t *env = data;
653 Start silent in the start block.
654 The silence remains until the first barrier is seen.
655 Each other block is begun loud.
657 int silent = bl == get_irg_start_block(get_irn_irg(bl));
661 If the block is the start block search the barrier and
662 start handling constraints from there.
665 for(irn = sched_first(bl); !sched_is_end(irn);) {
666 irn = handle_constraints(env, irn, &silent);
671 * Annotate the register pressure to the nodes and compute
672 * the liveness intervals.
673 * @param block The block to do it for.
674 * @param env_ptr The environment.
676 static void pressure(ir_node *block, void *env_ptr)
678 /* Convenience macro for a def */
679 #define border_def(irn, step, real) \
680 border_add(env, head, irn, step, pressure--, 1, real)
682 /* Convenience macro for a use */
683 #define border_use(irn, step, real) \
684 border_add(env, head, irn, step, ++pressure, 0, real)
686 be_chordal_alloc_env_t *alloc_env = env_ptr;
687 be_chordal_env_t *env = alloc_env->chordal_env;
688 bitset_t *live = alloc_env->live;
690 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
694 unsigned pressure = 0;
695 struct list_head *head;
696 pset *live_in = be_lv_pset_put_in(env->lv, block, pset_new_ptr_default());
697 pset *live_end = be_lv_pset_put_end(env->lv, block, pset_new_ptr_default());
699 DBG((dbg, LEVEL_1, "Computing pressure in block %+F\n", block));
700 bitset_clear_all(live);
702 /* Set up the border list in the block info */
703 head = obstack_alloc(&env->obst, sizeof(*head));
704 INIT_LIST_HEAD(head);
705 assert(pmap_get(env->border_heads, block) == NULL);
706 pmap_insert(env->border_heads, block, head);
709 * Make final uses of all values live out of the block.
710 * They are necessary to build up real intervals.
712 foreach_pset(live_end, irn) {
713 if(has_reg_class(env, irn)) {
714 DBG((dbg, LEVEL_3, "\tMaking live: %+F/%d\n", irn, get_irn_idx(irn)));
715 bitset_set(live, get_irn_idx(irn));
716 border_use(irn, step, 0);
722 * Determine the last uses of a value inside the block, since they are
723 * relevant for the interval borders.
725 sched_foreach_reverse(block, irn) {
726 DBG((dbg, LEVEL_1, "\tinsn: %+F, pressure: %d\n", irn, pressure));
727 DBG((dbg, LEVEL_2, "\tlive: %B\n", live));
730 * If the node defines some value, which can put into a
731 * register of the current class, make a border for it.
733 if(has_reg_class(env, irn)) {
734 int nr = get_irn_idx(irn);
736 bitset_clear(live, nr);
737 border_def(irn, step, 1);
741 * If the node is no phi node we can examine the uses.
744 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
745 ir_node *op = get_irn_n(irn, i);
747 if(has_reg_class(env, op)) {
748 int nr = get_irn_idx(op);
749 const char *msg = "-";
751 if(!bitset_is_set(live, nr)) {
752 border_use(op, step, 1);
753 bitset_set(live, nr);
757 DBG((dbg, LEVEL_4, "\t\t%s pos: %d, use: %+F\n", msg, i, op));
765 * Add initial defs for all values live in.
767 foreach_pset(live_in, irn) {
768 if(has_reg_class(env, irn)) {
770 /* Mark the value live in. */
771 bitset_set(live, get_irn_idx(irn));
774 border_def(irn, step, 0);
782 static void assign(ir_node *block, void *env_ptr)
784 be_chordal_alloc_env_t *alloc_env = env_ptr;
785 be_chordal_env_t *env = alloc_env->chordal_env;
786 bitset_t *live = alloc_env->live;
787 bitset_t *colors = alloc_env->colors;
788 bitset_t *in_colors = alloc_env->in_colors;
789 const arch_env_t *arch_env = env->birg->main_env->arch_env;
790 struct list_head *head = get_block_border_head(env, block);
791 pset *live_in = be_lv_pset_put_in(env->lv, block, pset_new_ptr_default());
795 DEBUG_ONLY(firm_dbg_module_t *dbg = env->dbg;)
797 bitset_clear_all(colors);
798 bitset_clear_all(live);
799 bitset_clear_all(in_colors);
801 DBG((dbg, LEVEL_4, "Assigning colors for block %+F\n", block));
802 DBG((dbg, LEVEL_4, "\tusedef chain for block\n"));
803 list_for_each_entry(border_t, b, head, list) {
804 DBG((dbg, LEVEL_4, "\t%s %+F/%d\n", b->is_def ? "def" : "use",
805 b->irn, get_irn_idx(b->irn)));
809 * Add initial defs for all values live in.
810 * Since their colors have already been assigned (The dominators were
811 * allocated before), we have to mark their colors as used also.
813 foreach_pset(live_in, irn) {
814 if(has_reg_class(env, irn)) {
815 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
818 assert(reg && "Node must have been assigned a register");
819 col = arch_register_get_index(reg);
821 DBG((dbg, LEVEL_4, "%+F has reg %s\n", irn, reg->name));
823 /* Mark the color of the live in value as used. */
824 bitset_set(colors, col);
825 bitset_set(in_colors, col);
827 /* Mark the value live in. */
828 bitset_set(live, get_irn_idx(irn));
833 * Mind that the sequence
834 * of defs from back to front defines a perfect
835 * elimination order. So, coloring the definitions from first to last
838 list_for_each_entry_reverse(border_t, b, head, list) {
839 ir_node *irn = b->irn;
840 int nr = get_irn_idx(irn);
841 int ignore = arch_irn_is(arch_env, irn, ignore);
844 * Assign a color, if it is a local def. Global defs already have a
847 if(b->is_def && !be_is_live_in(env->lv, block, irn)) {
848 const arch_register_t *reg;
851 if(pset_find_ptr(alloc_env->pre_colored, irn) || ignore) {
852 reg = arch_get_irn_register(arch_env, irn);
854 assert(!bitset_is_set(colors, col) && "pre-colored register must be free");
858 col = get_next_free_reg(alloc_env, colors);
859 reg = arch_register_for_index(env->cls, col);
860 assert(arch_get_irn_register(arch_env, irn) == NULL && "This node must not have been assigned a register yet");
861 assert(!arch_register_type_is(reg, ignore) && "Must not assign ignore register");
864 bitset_set(colors, col);
865 arch_set_irn_register(arch_env, irn, reg);
867 DBG((dbg, LEVEL_1, "\tassigning register %s(%d) to %+F\n", arch_register_get_name(reg), col, irn));
869 assert(!bitset_is_set(live, nr) && "Value's definition must not have been encountered");
870 bitset_set(live, nr);
873 /* Clear the color upon a use. */
874 else if(!b->is_def) {
875 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
878 assert(reg && "Register must have been assigned");
880 col = arch_register_get_index(reg);
881 assert(bitset_is_set(live, nr) && "Cannot have a non live use");
883 bitset_clear(colors, col);
884 bitset_clear(live, nr);
891 void be_ra_chordal_color(be_chordal_env_t *chordal_env)
893 be_chordal_alloc_env_t env;
896 int colors_n = arch_register_class_n_regs(chordal_env->cls);
897 ir_graph *irg = chordal_env->irg;
902 env.chordal_env = chordal_env;
903 env.colors_n = colors_n;
904 env.colors = bitset_alloca(colors_n);
905 env.tmp_colors = bitset_alloca(colors_n);
906 env.in_colors = bitset_alloca(colors_n);
907 env.pre_colored = pset_new_ptr_default();
908 FIRM_DBG_REGISTER(env.constr_dbg, "firm.be.chordal.constr");
911 /* Handle register targeting constraints */
912 dom_tree_walk_irg(irg, constraints, NULL, &env);
914 if(chordal_env->opts->dump_flags & BE_CH_DUMP_CONSTR) {
915 snprintf(buf, sizeof(buf), "-%s-constr", chordal_env->cls->name);
916 be_dump(chordal_env->irg, buf, dump_ir_block_graph_sched);
920 env.live = bitset_malloc(get_irg_last_idx(chordal_env->irg));
922 /* First, determine the pressure */
923 dom_tree_walk_irg(irg, pressure, NULL, &env);
925 /* Assign the colors */
926 dom_tree_walk_irg(irg, assign, NULL, &env);
928 be_numbering_done(irg);
930 if(chordal_env->opts->dump_flags & BE_CH_DUMP_TREE_INTV) {
932 ir_snprintf(buf, sizeof(buf), "ifg_%s_%F.eps", chordal_env->cls->name, irg);
933 plotter = new_plotter_ps(buf);
934 draw_interval_tree(&draw_chordal_def_opts, chordal_env, plotter);
935 plotter_free(plotter);
938 bitset_free(env.live);
939 del_pset(env.pre_colored);