2 * Chordal register allocation.
3 * @author Sebastian Hack
6 * Copyright (C) Universitaet Karlsruhe
7 * Released under the GPL
29 #include "bipartite.h"
32 #include "irgraph_t.h"
33 #include "irprintf_t.h"
43 #include "besched_t.h"
50 #include "bechordal_t.h"
51 #include "bechordal_draw.h"
53 #define DBG_LEVEL SET_LEVEL_0
54 #define DBG_LEVEL_CHECK SET_LEVEL_0
58 #define MAX(x, y) ((x) > (y) ? (x) : (y))
59 #define MIN(x, y) ((x) < (y) ? (x) : (y))
61 #define DUMP_INTERVALS
63 typedef struct _be_chordal_alloc_env_t {
64 be_chordal_env_t *chordal_env;
66 firm_dbg_module_t *constr_dbg; /**< Debug output for the constraint handler. */
67 pset *pre_colored; /**< Set of precolored nodes. */
68 bitset_t *live; /**< A liveness bitset. */
69 bitset_t *tmp_colors; /**< An auxiliary bitset which is as long as the number of colors in the class. */
70 bitset_t *colors; /**< The color mask. */
71 bitset_t *in_colors; /**< Colors used by live in values. */
72 bitset_t *ignore_regs; /**< A bitset of all ignore registers in the current class. */
73 int colors_n; /**< The number of colors. */
74 } be_chordal_alloc_env_t;
78 /* Make a fourcc for border checking. */
79 #define BORDER_FOURCC FOURCC('B', 'O', 'R', 'D')
81 static void check_border_list(struct list_head *head)
84 list_for_each_entry(border_t, x, head, list) {
85 assert(x->magic == BORDER_FOURCC);
89 static void check_heads(be_chordal_env_t *env)
92 for(ent = pmap_first(env->border_heads); ent; ent = pmap_next(env->border_heads)) {
93 /* ir_printf("checking border list of block %+F\n", ent->key); */
94 check_border_list(ent->value);
100 * Add an interval border to the list of a block's list
101 * of interval border.
102 * @note You always have to create the use before the def.
103 * @param env The environment.
104 * @param head The list head to enqueue the borders.
105 * @param irn The node (value) the border belongs to.
106 * @param pressure The pressure at this point in time.
107 * @param step A time step for the border.
108 * @param is_def Is the border a use or a def.
109 * @return The created border.
111 static INLINE border_t *border_add(be_chordal_env_t *env, struct list_head *head,
112 ir_node *irn, unsigned step, unsigned pressure,
113 unsigned is_def, unsigned is_real)
120 b = obstack_alloc(&env->obst, sizeof(*b));
122 /* also allocate the def and tie it to the use. */
123 def = obstack_alloc(&env->obst, sizeof(*def));
124 memset(def, 0, sizeof(*def));
129 * Set the link field of the irn to the def.
130 * This strongly relies on the fact, that the use is always
131 * made before the def.
133 set_irn_link(irn, def);
135 b->magic = BORDER_FOURCC;
136 def->magic = BORDER_FOURCC;
140 * If the def is encountered, the use was made and so was the
141 * the def node (see the code above). It was placed into the
142 * link field of the irn, so we can get it there.
145 b = get_irn_link(irn);
147 assert(b && b->magic == BORDER_FOURCC && "Illegal border encountered");
150 b->pressure = pressure;
152 b->is_real = is_real;
155 list_add_tail(&b->list, head);
156 DBG((env->dbg, LEVEL_5, "\t\t%s adding %+F, step: %d\n", is_def ? "def" : "use", irn, step));
163 * Check, if an irn is of the register class currently under processing.
164 * @param env The chordal environment.
165 * @param irn The node.
166 * @return 1, if the node is of that register class, 0 if not.
168 static INLINE int has_reg_class(const be_chordal_env_t *env, const ir_node *irn)
170 // return arch_irn_has_reg_class(env->main_env->arch_env, irn, -1, env->cls);
171 return arch_irn_consider_in_reg_alloc(env->birg->main_env->arch_env, env->cls, irn);
174 #define has_limited_constr(req, irn) \
175 (arch_get_register_req(arch_env, (req), irn, -1) && (req)->type == arch_register_req_type_limited)
177 static int get_next_free_reg(const be_chordal_alloc_env_t *alloc_env, bitset_t *colors)
179 bitset_t *tmp = alloc_env->tmp_colors;
180 bitset_copy(tmp, colors);
181 bitset_or(tmp, alloc_env->ignore_regs);
182 return bitset_next_clear(tmp, 0);
185 typedef struct _operand_t operand_t;
193 arch_register_req_t req;
194 unsigned has_constraints : 1;
203 unsigned in_constraints : 1;
204 unsigned out_constraints : 1;
205 unsigned has_constraints : 1;
206 unsigned pre_colored : 1;
209 #define insn_n_defs(insn) ((insn)->use_start)
210 #define insn_n_uses(insn) ((insn)->n_ops - (insn)->use_start)
212 static insn_t *scan_insn(be_chordal_env_t *env, ir_node *irn, struct obstack *obst)
214 const arch_env_t *arch_env = env->birg->main_env->arch_env;
220 insn = obstack_alloc(obst, sizeof(insn[0]));
221 memset(insn, 0, sizeof(insn[0]));
224 insn->next_insn = sched_next(irn);
225 if(get_irn_mode(irn) == mode_T) {
228 for(p = sched_next(irn); is_Proj(p); p = sched_next(p)) {
229 if(arch_irn_consider_in_reg_alloc(arch_env, env->cls, p)) {
230 arch_get_register_req(arch_env, &o.req, p, -1);
233 o.pos = -(get_Proj_proj(p) + 1);
235 o.has_constraints = arch_register_req_is(&o.req, limited);
236 obstack_grow(obst, &o, sizeof(o));
238 insn->out_constraints |= o.has_constraints;
239 pre_colored += arch_get_irn_register(arch_env, p) != NULL;
246 else if(arch_irn_consider_in_reg_alloc(arch_env, env->cls, irn)) {
247 arch_get_register_req(arch_env, &o.req, irn, -1);
252 o.has_constraints = arch_register_req_is(&o.req, limited);
253 obstack_grow(obst, &o, sizeof(o));
255 insn->out_constraints |= o.has_constraints;
256 pre_colored += arch_get_irn_register(arch_env, irn) != NULL;
259 insn->pre_colored = pre_colored == insn->n_ops && insn->n_ops > 0;
260 insn->use_start = insn->n_ops;
262 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
263 ir_node *op = get_irn_n(irn, i);
265 if(arch_irn_consider_in_reg_alloc(arch_env, env->cls, op)) {
266 arch_get_register_req(arch_env, &o.req, irn, i);
271 o.has_constraints = arch_register_req_is(&o.req, limited);
272 obstack_grow(obst, &o, sizeof(o));
274 insn->in_constraints |= o.has_constraints;
278 insn->has_constraints = insn->in_constraints | insn->out_constraints;
279 insn->ops = obstack_finish(obst);
281 /* Compute the admissible registers bitsets. */
282 for(i = 0; i < insn->n_ops; ++i) {
283 operand_t *op = &insn->ops[i];
285 assert(op->req.cls == env->cls);
286 op->regs = bitset_obstack_alloc(obst, env->cls->n_regs);
288 if(arch_register_req_is(&op->req, limited))
289 op->req.limited(op->req.limited_env, op->regs);
291 arch_put_non_ignore_regs(env->birg->main_env->arch_env, env->cls, op->regs);
297 static bitset_t *get_decisive_partner_regs(bitset_t *bs, const operand_t *o1, const operand_t *o2)
302 bitset_copy(bs, o2->regs);
307 bitset_copy(bs, o1->regs);
311 assert(o1->req.cls == o2->req.cls);
313 if(bitset_contains(o1->regs, o2->regs))
314 bitset_copy(bs, o1->regs);
315 else if(bitset_contains(o2->regs, o1->regs))
316 bitset_copy(bs, o2->regs);
323 static void pair_up_operands(const be_chordal_alloc_env_t *alloc_env, insn_t *insn)
325 const be_chordal_env_t *env = alloc_env->chordal_env;
326 const arch_env_t *aenv = env->birg->main_env->arch_env;
327 firm_dbg_module_t *dbg = alloc_env->constr_dbg;
329 int n_uses = insn_n_uses(insn);
330 int n_defs = insn_n_defs(insn);
331 int max_pairs = MIN(n_uses, n_defs);
332 bitset_t *bs = bitset_alloca(env->cls->n_regs);
333 bipartite_t *bp = bipartite_new(n_defs, n_uses);
334 int *pairing = alloca(MAX(n_defs, n_uses) * sizeof(pairing[0]));
339 For each out operand, try to find an in operand which can be assigned the
340 same register as the out operand.
342 for(j = 0; j < insn->use_start; ++j) {
343 operand_t *out_op = &insn->ops[j];
345 /* Try to find an in operand which has ... */
346 for(i = insn->use_start; i < insn->n_ops; ++i) {
347 const operand_t *op = &insn->ops[i];
350 The in operand can only be paired with a def, if the node defining the
351 operand's value does not interfere with the instruction itself. That
352 would mean, that it is live at the instruction, so no result of the instruction
353 can have the same register as the operand.
355 Furthermore, tow operands can be paired, if the admissible registers
356 of one are a subset of the other's. We record the operand whose constraints
357 count in the decisive array.
359 if(!values_interfere(op->irn, op->carrier)) {
360 if(get_decisive_partner_regs(bs, out_op, op))
361 bipartite_add(bp, j, i - insn->use_start);
366 /* Compute the pairing. */
367 bipartite_matching(bp, pairing);
368 for(i = 0; i < insn->use_start; ++i) {
369 int p = pairing[i] + insn->use_start;
371 if(p >= insn->use_start) {
372 insn->ops[i].partner = &insn->ops[p];
373 insn->ops[p].partner = &insn->ops[i];
381 static ir_node *pre_process_constraints(be_chordal_alloc_env_t *alloc_env, insn_t **the_insn)
383 be_chordal_env_t *env = alloc_env->chordal_env;
384 const arch_env_t *aenv = env->birg->main_env->arch_env;
385 firm_dbg_module_t *dbg = alloc_env->constr_dbg;
386 insn_t *insn = *the_insn;
387 ir_node *bl = get_nodes_block(insn->irn);
388 ir_node *copy = NULL;
389 ir_node *perm = NULL;
390 bitset_t *out_constr = bitset_alloca(env->cls->n_regs);
391 bitset_t *bs = bitset_alloca(env->cls->n_regs);
395 assert(insn->has_constraints && "only do this for constrained nodes");
398 Collect all registers that occur in output constraints.
399 This is necessary, since if the insn has one of these as an input constraint
400 and the corresponding operand interferes with the insn, the operand must
403 for(i = 0; i < insn->use_start; ++i) {
404 operand_t *op = &insn->ops[i];
405 if(op->has_constraints)
406 bitset_or(out_constr, op->regs);
410 Now, figure out which input operand must be copied since it has input
411 constraints which are also output constraints.
413 for(i = insn->use_start; i < insn->n_ops; ++i) {
414 operand_t *op = &insn->ops[i];
415 if(op->has_constraints && (values_interfere(op->carrier, insn->irn) || arch_irn_is(aenv, op->carrier, ignore))) {
416 bitset_copy(bs, op->regs);
417 bitset_and(bs, out_constr);
420 The operand (interfering with the node) has input constraints
421 which also occur as output constraints, so insert a copy.
423 if(bitset_popcnt(bs) > 0) {
424 copy = be_new_Copy(op->req.cls, env->irg, bl, op->carrier);
425 insn->ops[i].carrier = copy;
426 sched_add_before(insn->irn, copy);
428 DBG((dbg, LEVEL_2, "adding copy for interfering and constrained op %+F\n", op->carrier));
434 Make the Perm, recompute liveness and re-scan the insn since the
435 in operands are now the Projs of the Perm.
437 perm = insert_Perm_after(aenv, env->cls, env->dom_front, sched_prev(insn->irn));
439 /* Registers are propagated by insert_Perm_after(). Clean them here! */
441 const ir_edge_t *edge;
443 foreach_out_edge(perm, edge) {
444 ir_node *proj = get_edge_src_irn(edge);
445 arch_set_irn_register(aenv, proj, NULL);
449 We also have to re-build the insn since the input operands are now the Projs of
450 the Perm. Recomputing liveness is also a good idea if a Perm is inserted, since
451 the live sets may change.
453 be_liveness(env->irg);
454 obstack_free(&env->obst, insn);
455 *the_insn = insn = scan_insn(env, insn->irn, &env->obst);
458 Copy the input constraints of the insn to the Perm as output
459 constraints. Succeeding phases (coalescing will need that).
461 for(i = insn->use_start; i < insn->n_ops; ++i) {
462 operand_t *op = &insn->ops[i];
463 ir_node *proj = op->carrier;
465 Note that the predecessor must not be a Proj of the Perm,
466 since ignore-nodes are not Perm'ed.
468 if(op->has_constraints && is_Proj(proj) && get_Proj_pred(proj) == perm) {
469 be_set_constr_limited(perm, BE_OUT_POS(get_Proj_proj(proj)), &op->req);
477 static ir_node *handle_constraints(be_chordal_alloc_env_t *alloc_env, ir_node *irn)
479 be_chordal_env_t *env = alloc_env->chordal_env;
480 void *base = obstack_base(&env->obst);
481 insn_t *insn = scan_insn(env, irn, &env->obst);
482 ir_node *res = insn->next_insn;
484 if(insn->pre_colored) {
486 for(i = 0; i < insn->use_start; ++i)
487 pset_insert_ptr(alloc_env->pre_colored, insn->ops[i].carrier);
490 if(be_is_Perm(irn) || be_is_RegParams(irn) || (be_is_Barrier(irn) && !insn->in_constraints))
494 Perms inserted before the constraint handling phase are considered to be
495 correctly precolored. These Perms arise during the ABI handling phase.
497 if(insn->has_constraints) {
498 firm_dbg_module_t *dbg = alloc_env->constr_dbg;
499 const arch_env_t *aenv = env->birg->main_env->arch_env;
500 int n_regs = env->cls->n_regs;
501 bitset_t *bs = bitset_alloca(n_regs);
502 bitset_t *non_ignore = bitset_alloca(n_regs);
503 ir_node **alloc_nodes = alloca(n_regs * sizeof(alloc_nodes[0]));
504 bipartite_t *bp = bipartite_new(n_regs, n_regs);
505 int *assignment = alloca(n_regs * sizeof(assignment[0]));
506 pmap *partners = pmap_create();
510 const ir_edge_t *edge;
511 ir_node *perm = NULL;
514 prepare the constraint handling of this node.
515 Perms are constructed and Copies are created for constrained values
516 interfering with the instruction.
518 perm = pre_process_constraints(alloc_env, &insn);
520 /* find suitable in operands to the out operands of the node. */
521 pair_up_operands(alloc_env, insn);
524 look at the in/out operands and add each operand (and its possible partner)
525 to a bipartite graph (left: nodes with partners, right: admissible colors).
527 for(i = 0, n_alloc = 0; i < insn->n_ops; ++i) {
528 operand_t *op = &insn->ops[i];
531 If the operand has no partner or the partner has not been marked
532 for allocation, determine the admissible registers and mark it
533 for allocation by associating the node and its partner with the
534 set of admissible registers via a bipartite graph.
536 if(!op->partner || !pmap_contains(partners, op->partner->carrier)) {
538 pmap_insert(partners, op->carrier, op->partner ? op->partner->carrier : NULL);
539 alloc_nodes[n_alloc] = op->carrier;
541 DBG((dbg, LEVEL_2, "\tassociating %+F and %+F\n", op->carrier, op->partner ? op->partner->carrier : NULL));
543 bitset_clear_all(bs);
544 get_decisive_partner_regs(bs, op, op->partner);
546 DBG((dbg, LEVEL_2, "\tallowed registers for %+F: %B\n", op->carrier, bs));
548 bitset_foreach(bs, col)
549 bipartite_add(bp, n_alloc, col);
556 Put all nodes which live by the constrained instruction also to the
557 allocation bipartite graph. They are considered unconstrained.
560 foreach_out_edge(perm, edge) {
561 ir_node *proj = get_edge_src_irn(edge);
563 assert(is_Proj(proj));
565 if(values_interfere(proj, irn)) {
566 assert(n_alloc < n_regs);
567 alloc_nodes[n_alloc] = proj;
568 pmap_insert(partners, proj, NULL);
570 bitset_clear_all(bs);
571 arch_put_non_ignore_regs(aenv, env->cls, bs);
572 bitset_foreach(bs, col)
573 bipartite_add(bp, n_alloc, col);
580 /* Compute a valid register allocation. */
581 bipartite_matching(bp, assignment);
583 /* Assign colors obtained from the matching. */
584 for(i = 0; i < n_alloc; ++i) {
585 const arch_register_t *reg;
589 assert(assignment[i] >= 0 && "there must have been a register assigned");
590 reg = arch_register_for_index(env->cls, assignment[i]);
592 nodes[0] = alloc_nodes[i];
593 nodes[1] = pmap_get(partners, alloc_nodes[i]);
595 for(j = 0; j < 2; ++j) {
599 arch_set_irn_register(aenv, nodes[j], reg);
600 pset_hinsert_ptr(alloc_env->pre_colored, nodes[j]);
601 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", nodes[j], reg->name));
606 /* Allocate the non-constrained Projs of the Perm. */
609 bitset_clear_all(bs);
611 /* Put the colors of all Projs in a bitset. */
612 foreach_out_edge(perm, edge) {
613 ir_node *proj = get_edge_src_irn(edge);
614 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
617 bitset_set(bs, reg->index);
620 /* Assign the not yet assigned Projs of the Perm a suitable color. */
621 foreach_out_edge(perm, edge) {
622 ir_node *proj = get_edge_src_irn(edge);
623 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
625 DBG((dbg, LEVEL_2, "\tchecking reg of %+F: %s\n", proj, reg ? reg->name : "<none>"));
628 col = get_next_free_reg(alloc_env, bs);
629 reg = arch_register_for_index(env->cls, col);
630 bitset_set(bs, reg->index);
631 arch_set_irn_register(aenv, proj, reg);
632 pset_insert_ptr(alloc_env->pre_colored, proj);
633 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", proj, reg->name));
638 pmap_destroy(partners);
642 obstack_free(&env->obst, base);
647 * Handle constraint nodes in each basic block.
648 * be_insert_constr_perms() inserts Perm nodes which perm
649 * over all values live at the constrained node right in front
650 * of the constrained node. These Perms signal a constrained node.
651 * For further comments, refer to handle_constraints_at_perm().
653 static void constraints(ir_node *bl, void *data)
655 firm_dbg_module_t *dbg = firm_dbg_register("firm.be.chordal.constr");
656 be_chordal_alloc_env_t *env = data;
657 arch_env_t *arch_env = env->chordal_env->birg->main_env->arch_env;
660 for(irn = sched_first(bl); !sched_is_end(irn);) {
661 irn = handle_constraints(env, irn);
666 * Annotate the register pressure to the nodes and compute
667 * the liveness intervals.
668 * @param block The block to do it for.
669 * @param env_ptr The environment.
671 static void pressure(ir_node *block, void *env_ptr)
673 /* Convenience macro for a def */
674 #define border_def(irn, step, real) \
675 border_add(env, head, irn, step, pressure--, 1, real)
677 /* Convenience macro for a use */
678 #define border_use(irn, step, real) \
679 border_add(env, head, irn, step, ++pressure, 0, real)
681 be_chordal_alloc_env_t *alloc_env = env_ptr;
682 be_chordal_env_t *env = alloc_env->chordal_env;
683 const arch_env_t *arch_env = env->birg->main_env->arch_env;
684 bitset_t *live = alloc_env->live;
685 firm_dbg_module_t *dbg = env->dbg;
690 unsigned pressure = 0;
691 struct list_head *head;
692 pset *live_in = put_live_in(block, pset_new_ptr_default());
693 pset *live_end = put_live_end(block, pset_new_ptr_default());
695 DBG((dbg, LEVEL_1, "Computing pressure in block %+F\n", block));
696 bitset_clear_all(live);
698 /* Set up the border list in the block info */
699 head = obstack_alloc(&env->obst, sizeof(*head));
700 INIT_LIST_HEAD(head);
701 assert(pmap_get(env->border_heads, block) == NULL);
702 pmap_insert(env->border_heads, block, head);
705 * Make final uses of all values live out of the block.
706 * They are necessary to build up real intervals.
708 for(irn = pset_first(live_end); irn; irn = pset_next(live_end)) {
709 if(has_reg_class(env, irn)) {
710 DBG((dbg, LEVEL_3, "\tMaking live: %+F/%d\n", irn, get_irn_graph_nr(irn)));
711 bitset_set(live, get_irn_graph_nr(irn));
712 border_use(irn, step, 0);
718 * Determine the last uses of a value inside the block, since they are
719 * relevant for the interval borders.
721 sched_foreach_reverse(block, irn) {
722 DBG((dbg, LEVEL_1, "\tinsn: %+F, pressure: %d\n", irn, pressure));
723 DBG((dbg, LEVEL_2, "\tlive: %b\n", live));
726 * If the node defines some value, which can put into a
727 * register of the current class, make a border for it.
729 if(has_reg_class(env, irn)) {
730 int nr = get_irn_graph_nr(irn);
732 bitset_clear(live, nr);
733 border_def(irn, step, 1);
737 * If the node is no phi node we can examine the uses.
740 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
741 ir_node *op = get_irn_n(irn, i);
743 if(has_reg_class(env, op)) {
744 int nr = get_irn_graph_nr(op);
746 DBG((dbg, LEVEL_4, "\t\tpos: %d, use: %+F\n", i, op));
748 if(!bitset_is_set(live, nr)) {
749 border_use(op, step, 1);
750 bitset_set(live, nr);
759 * Add initial defs for all values live in.
761 for(irn = pset_first(live_in); irn; irn = pset_next(live_in)) {
762 if(has_reg_class(env, irn)) {
764 /* Mark the value live in. */
765 bitset_set(live, get_irn_graph_nr(irn));
768 border_def(irn, step, 0);
777 static void assign(ir_node *block, void *env_ptr)
779 be_chordal_alloc_env_t *alloc_env = env_ptr;
780 be_chordal_env_t *env = alloc_env->chordal_env;
781 firm_dbg_module_t *dbg = env->dbg;
782 bitset_t *live = alloc_env->live;
783 bitset_t *colors = alloc_env->colors;
784 bitset_t *in_colors = alloc_env->in_colors;
785 const arch_env_t *arch_env = env->birg->main_env->arch_env;
789 struct list_head *head = get_block_border_head(env, block);
790 pset *live_in = put_live_in(block, pset_new_ptr_default());
792 bitset_clear_all(colors);
793 bitset_clear_all(live);
794 bitset_clear_all(in_colors);
796 DBG((dbg, LEVEL_4, "Assigning colors for block %+F\n", block));
797 DBG((dbg, LEVEL_4, "\tusedef chain for block\n"));
798 list_for_each_entry(border_t, b, head, list) {
799 DBG((dbg, LEVEL_4, "\t%s %+F/%d\n", b->is_def ? "def" : "use",
800 b->irn, get_irn_graph_nr(b->irn)));
804 * Add initial defs for all values live in.
805 * Since their colors have already been assigned (The dominators were
806 * allocated before), we have to mark their colors as used also.
808 for(irn = pset_first(live_in); irn; irn = pset_next(live_in)) {
809 if(has_reg_class(env, irn)) {
810 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
813 assert(reg && "Node must have been assigned a register");
814 col = arch_register_get_index(reg);
816 /* Mark the color of the live in value as used. */
817 bitset_set(colors, col);
818 bitset_set(in_colors, col);
820 /* Mark the value live in. */
821 bitset_set(live, get_irn_graph_nr(irn));
826 * Mind that the sequence
827 * of defs from back to front defines a perfect
828 * elimination order. So, coloring the definitions from first to last
831 list_for_each_entry_reverse(border_t, b, head, list) {
832 ir_node *irn = b->irn;
833 int nr = get_irn_graph_nr(irn);
836 * Assign a color, if it is a local def. Global defs already have a
839 if(b->is_def && !is_live_in(block, irn)) {
840 const arch_register_t *reg;
843 if(pset_find_ptr(alloc_env->pre_colored, irn)) {
844 reg = arch_get_irn_register(arch_env, irn);
846 assert(!bitset_is_set(colors, col) && "pre-colored register must be free");
850 col = get_next_free_reg(alloc_env, colors);
851 reg = arch_register_for_index(env->cls, col);
852 assert(arch_get_irn_register(arch_env, irn) == NULL && "This node must not have been assigned a register yet");
855 bitset_set(colors, col);
857 assert(!arch_register_type_is(reg, ignore) && "Must not assign ignore register");
858 arch_set_irn_register(arch_env, irn, reg);
860 DBG((dbg, LEVEL_1, "\tassigning register %s(%d) to %+F\n",
861 arch_register_get_name(reg), col, irn));
863 assert(!bitset_is_set(live, nr) && "Value's definition must not have been encountered");
864 bitset_set(live, nr);
867 /* Clear the color upon a use. */
868 else if(!b->is_def) {
869 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
872 assert(reg && "Register must have been assigned");
874 col = arch_register_get_index(reg);
875 assert(bitset_is_set(live, nr) && "Cannot have a non live use");
877 bitset_clear(colors, col);
878 bitset_clear(live, nr);
885 void be_ra_chordal_color(be_chordal_env_t *chordal_env)
887 be_chordal_alloc_env_t env;
891 int colors_n = arch_register_class_n_regs(chordal_env->cls);
892 ir_graph *irg = chordal_env->irg;
895 if(get_irg_dom_state(irg) != dom_consistent)
898 env.chordal_env = chordal_env;
899 env.colors_n = colors_n;
900 env.colors = bitset_alloca(colors_n);
901 env.tmp_colors = bitset_alloca(colors_n);
902 env.in_colors = bitset_alloca(colors_n);
903 env.ignore_regs = bitset_alloca(colors_n);
904 env.pre_colored = pset_new_ptr_default();
905 env.constr_dbg = firm_dbg_register("firm.be.chordal.constr");
907 for(i = 0; i < colors_n; ++i)
908 if(arch_register_type_is(&chordal_env->cls->regs[i], ignore))
909 bitset_set(env.ignore_regs, i);
911 /* Handle register targeting constraints */
912 dom_tree_walk_irg(irg, constraints, NULL, &env);
914 if(chordal_env->opts->dump_flags & BE_CH_DUMP_CONSTR) {
915 snprintf(buf, sizeof(buf), "-%s-constr", chordal_env->cls->name);
916 be_dump(chordal_env->irg, buf, dump_ir_block_graph_sched);
920 env.live = bitset_malloc(get_graph_node_count(chordal_env->irg));
922 /* First, determine the pressure */
923 dom_tree_walk_irg(irg, pressure, NULL, &env);
925 /* Assign the colors */
926 dom_tree_walk_irg(irg, assign, NULL, &env);
928 be_numbering_done(irg);
930 if(chordal_env->opts->dump_flags & BE_CH_DUMP_TREE_INTV) {
932 ir_snprintf(buf, sizeof(buf), "ifg_%s_%F.eps", chordal_env->cls->name, irg);
933 plotter = new_plotter_ps(buf);
934 draw_interval_tree(&draw_chordal_def_opts, chordal_env, plotter);
935 plotter_free(plotter);
938 del_pset(env.pre_colored);