2 * Chordal register allocation.
3 * @author Sebastian Hack
6 * Copyright (C) Universitaet Karlsruhe
7 * Released under the GPL
29 #include "bipartite.h"
32 #include "irgraph_t.h"
33 #include "irprintf_t.h"
43 #include "besched_t.h"
50 #include "bechordal_t.h"
51 #include "bechordal_draw.h"
53 #define DBG_LEVEL SET_LEVEL_0
54 #define DBG_LEVEL_CHECK SET_LEVEL_0
58 #define MAX(x, y) ((x) > (y) ? (x) : (y))
59 #define MIN(x, y) ((x) < (y) ? (x) : (y))
61 #define DUMP_INTERVALS
63 typedef struct _be_chordal_alloc_env_t {
64 be_chordal_env_t *chordal_env;
66 firm_dbg_module_t *constr_dbg; /**< Debug output for the constraint handler. */
67 pset *pre_colored; /**< Set of precolored nodes. */
68 bitset_t *live; /**< A liveness bitset. */
69 bitset_t *tmp_colors; /**< An auxiliary bitset which is as long as the number of colors in the class. */
70 bitset_t *colors; /**< The color mask. */
71 bitset_t *in_colors; /**< Colors used by live in values. */
72 bitset_t *ignore_regs; /**< A bitset of all ignore registers in the current class. */
73 int colors_n; /**< The number of colors. */
74 } be_chordal_alloc_env_t;
78 /* Make a fourcc for border checking. */
79 #define BORDER_FOURCC FOURCC('B', 'O', 'R', 'D')
81 static void check_border_list(struct list_head *head)
84 list_for_each_entry(border_t, x, head, list) {
85 assert(x->magic == BORDER_FOURCC);
89 static void check_heads(be_chordal_env_t *env)
92 for(ent = pmap_first(env->border_heads); ent; ent = pmap_next(env->border_heads)) {
93 /* ir_printf("checking border list of block %+F\n", ent->key); */
94 check_border_list(ent->value);
100 * Add an interval border to the list of a block's list
101 * of interval border.
102 * @note You always have to create the use before the def.
103 * @param env The environment.
104 * @param head The list head to enqueue the borders.
105 * @param irn The node (value) the border belongs to.
106 * @param pressure The pressure at this point in time.
107 * @param step A time step for the border.
108 * @param is_def Is the border a use or a def.
109 * @return The created border.
111 static INLINE border_t *border_add(be_chordal_env_t *env, struct list_head *head,
112 ir_node *irn, unsigned step, unsigned pressure,
113 unsigned is_def, unsigned is_real)
120 b = obstack_alloc(&env->obst, sizeof(*b));
122 /* also allocate the def and tie it to the use. */
123 def = obstack_alloc(&env->obst, sizeof(*def));
124 memset(def, 0, sizeof(*def));
129 * Set the link field of the irn to the def.
130 * This strongly relies on the fact, that the use is always
131 * made before the def.
133 set_irn_link(irn, def);
135 b->magic = BORDER_FOURCC;
136 def->magic = BORDER_FOURCC;
140 * If the def is encountered, the use was made and so was the
141 * the def node (see the code above). It was placed into the
142 * link field of the irn, so we can get it there.
145 b = get_irn_link(irn);
147 assert(b && b->magic == BORDER_FOURCC && "Illegal border encountered");
150 b->pressure = pressure;
152 b->is_real = is_real;
155 list_add_tail(&b->list, head);
156 DBG((env->dbg, LEVEL_5, "\t\t%s adding %+F, step: %d\n", is_def ? "def" : "use", irn, step));
163 * Check, if an irn is of the register class currently under processing.
164 * @param env The chordal environment.
165 * @param irn The node.
166 * @return 1, if the node is of that register class, 0 if not.
168 static INLINE int has_reg_class(const be_chordal_env_t *env, const ir_node *irn)
170 // return arch_irn_has_reg_class(env->main_env->arch_env, irn, -1, env->cls);
171 return arch_irn_consider_in_reg_alloc(env->birg->main_env->arch_env, env->cls, irn);
174 #define has_limited_constr(req, irn) \
175 (arch_get_register_req(arch_env, (req), irn, -1) && (req)->type == arch_register_req_type_limited)
177 static int get_next_free_reg(const be_chordal_alloc_env_t *alloc_env, bitset_t *colors)
179 bitset_t *tmp = alloc_env->tmp_colors;
180 bitset_copy(tmp, colors);
181 bitset_or(tmp, alloc_env->ignore_regs);
182 return bitset_next_clear(tmp, 0);
185 typedef struct _operand_t operand_t;
193 arch_register_req_t req;
194 unsigned has_constraints : 1;
203 unsigned in_constraints : 1;
204 unsigned out_constraints : 1;
205 unsigned has_constraints : 1;
206 unsigned pre_colored : 1;
209 #define insn_n_defs(insn) ((insn)->use_start)
210 #define insn_n_uses(insn) ((insn)->n_ops - (insn)->use_start)
212 static insn_t *scan_insn(be_chordal_alloc_env_t *alloc_env, ir_node *irn, struct obstack *obst)
214 const be_chordal_env_t *env = alloc_env->chordal_env;
215 const arch_env_t *arch_env = env->birg->main_env->arch_env;
221 insn = obstack_alloc(obst, sizeof(insn[0]));
222 memset(insn, 0, sizeof(insn[0]));
225 insn->next_insn = sched_next(irn);
226 if(get_irn_mode(irn) == mode_T) {
229 for(p = sched_next(irn); is_Proj(p); p = sched_next(p)) {
230 if(arch_irn_consider_in_reg_alloc(arch_env, env->cls, p)) {
231 arch_get_register_req(arch_env, &o.req, p, -1);
234 o.pos = -(get_Proj_proj(p) + 1);
236 o.has_constraints = arch_register_req_is(&o.req, limited);
237 obstack_grow(obst, &o, sizeof(o));
239 insn->out_constraints |= o.has_constraints;
240 pre_colored += arch_get_irn_register(arch_env, p) != NULL;
247 else if(arch_irn_consider_in_reg_alloc(arch_env, env->cls, irn)) {
248 arch_get_register_req(arch_env, &o.req, irn, -1);
253 o.has_constraints = arch_register_req_is(&o.req, limited);
254 obstack_grow(obst, &o, sizeof(o));
256 insn->out_constraints |= o.has_constraints;
257 pre_colored += arch_get_irn_register(arch_env, irn) != NULL;
260 insn->pre_colored = pre_colored == insn->n_ops && insn->n_ops > 0;
261 insn->use_start = insn->n_ops;
263 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
264 ir_node *op = get_irn_n(irn, i);
266 if(arch_irn_consider_in_reg_alloc(arch_env, env->cls, op)) {
267 arch_get_register_req(arch_env, &o.req, irn, i);
272 o.has_constraints = arch_register_req_is(&o.req, limited);
273 obstack_grow(obst, &o, sizeof(o));
275 insn->in_constraints |= o.has_constraints;
279 insn->has_constraints = insn->in_constraints | insn->out_constraints;
280 insn->ops = obstack_finish(obst);
282 /* Compute the admissible registers bitsets. */
283 for(i = 0; i < insn->n_ops; ++i) {
284 operand_t *op = &insn->ops[i];
286 assert(op->req.cls == env->cls);
287 op->regs = bitset_obstack_alloc(obst, env->cls->n_regs);
289 if(arch_register_req_is(&op->req, limited))
290 op->req.limited(op->req.limited_env, op->regs);
292 arch_put_non_ignore_regs(env->birg->main_env->arch_env, env->cls, op->regs);
298 static bitset_t *get_decisive_partner_regs(bitset_t *bs, const operand_t *o1, const operand_t *o2)
303 bitset_copy(bs, o2->regs);
308 bitset_copy(bs, o1->regs);
312 assert(o1->req.cls == o2->req.cls);
314 if(bitset_contains(o1->regs, o2->regs))
315 bitset_copy(bs, o1->regs);
316 else if(bitset_contains(o2->regs, o1->regs))
317 bitset_copy(bs, o2->regs);
324 static void pair_up_operands(const be_chordal_alloc_env_t *alloc_env, insn_t *insn)
326 const be_chordal_env_t *env = alloc_env->chordal_env;
327 const arch_env_t *aenv = env->birg->main_env->arch_env;
328 firm_dbg_module_t *dbg = alloc_env->constr_dbg;
330 int n_uses = insn_n_uses(insn);
331 int n_defs = insn_n_defs(insn);
332 int max_pairs = MIN(n_uses, n_defs);
333 bitset_t *bs = bitset_alloca(env->cls->n_regs);
334 bipartite_t *bp = bipartite_new(n_defs, n_uses);
335 int *pairing = alloca(MAX(n_defs, n_uses) * sizeof(pairing[0]));
340 For each out operand, try to find an in operand which can be assigned the
341 same register as the out operand.
343 for(j = 0; j < insn->use_start; ++j) {
344 operand_t *out_op = &insn->ops[j];
346 /* Try to find an in operand which has ... */
347 for(i = insn->use_start; i < insn->n_ops; ++i) {
348 const operand_t *op = &insn->ops[i];
351 The in operand can only be paired with a def, if the node defining the
352 operand's value does not interfere with the instruction itself. That
353 would mean, that it is live at the instruction, so no result of the instruction
354 can have the same register as the operand.
356 Furthermore, tow operands can be paired, if the admissible registers
357 of one are a subset of the other's. We record the operand whose constraints
358 count in the decisive array.
360 if(!values_interfere(op->irn, op->carrier)) {
361 if(get_decisive_partner_regs(bs, out_op, op))
362 bipartite_add(bp, j, i - insn->use_start);
367 /* Compute the pairing. */
368 bipartite_matching(bp, pairing);
369 for(i = 0; i < insn->use_start; ++i) {
370 int p = pairing[i] + insn->use_start;
372 if(p >= insn->use_start) {
373 insn->ops[i].partner = &insn->ops[p];
374 insn->ops[p].partner = &insn->ops[i];
382 static ir_node *pre_process_constraints(be_chordal_alloc_env_t *alloc_env, insn_t **the_insn)
384 be_chordal_env_t *env = alloc_env->chordal_env;
385 const arch_env_t *aenv = env->birg->main_env->arch_env;
386 firm_dbg_module_t *dbg = alloc_env->constr_dbg;
387 insn_t *insn = *the_insn;
388 ir_node *bl = get_nodes_block(insn->irn);
389 ir_node *copy = NULL;
390 ir_node *perm = NULL;
391 bitset_t *out_constr = bitset_alloca(env->cls->n_regs);
392 bitset_t *bs = bitset_alloca(env->cls->n_regs);
396 assert(insn->has_constraints && "only do this for constrained nodes");
399 Collect all registers that occur in output constraints.
400 This is necessary, since if the insn has one of these as an input constraint
401 and the corresponding operand interferes with the insn, the operand must
404 for(i = 0; i < insn->use_start; ++i) {
405 operand_t *op = &insn->ops[i];
406 if(op->has_constraints)
407 bitset_or(out_constr, op->regs);
411 Now, figure out which input operand must be copied since it has input
412 constraints which are also output constraints.
414 for(i = insn->use_start; i < insn->n_ops; ++i) {
415 operand_t *op = &insn->ops[i];
416 if(op->has_constraints && (values_interfere(op->carrier, insn->irn) || arch_irn_is(aenv, op->carrier, ignore))) {
417 bitset_copy(bs, op->regs);
418 bitset_and(bs, out_constr);
421 The operand (interfering with the node) has input constraints
422 which also occur as output constraints, so insert a copy.
424 if(bitset_popcnt(bs) > 0) {
425 copy = be_new_Copy(op->req.cls, env->irg, bl, op->carrier);
426 insn->ops[i].carrier = copy;
427 sched_add_before(insn->irn, copy);
429 DBG((dbg, LEVEL_2, "adding copy for interfering and constrained op %+F\n", op->carrier));
435 Make the Perm, recompute liveness and re-scan the insn since the
436 in operands are now the Projs of the Perm.
438 perm = insert_Perm_after(aenv, env->cls, env->dom_front, sched_prev(insn->irn));
440 /* Registers are propagated by insert_Perm_after(). Clean them here! */
442 const ir_edge_t *edge;
444 foreach_out_edge(perm, edge) {
445 ir_node *proj = get_edge_src_irn(edge);
446 arch_set_irn_register(aenv, proj, NULL);
450 We also have to re-build the insn since the input operands are now the Projs of
451 the Perm. Recomputing liveness is also a good idea if a Perm is inserted, since
452 the live sets may change.
454 be_liveness(env->irg);
455 obstack_free(&env->obst, insn);
456 *the_insn = insn = scan_insn(alloc_env, insn->irn, &env->obst);
459 Copy the input constraints of the insn to the Perm as output
460 constraints. Succeeding phases (coalescing will need that).
462 for(i = insn->use_start; i < insn->n_ops; ++i) {
463 operand_t *op = &insn->ops[i];
464 ir_node *proj = op->carrier;
466 Note that the predecessor must not be a Proj of the Perm,
467 since ignore-nodes are not Perm'ed.
469 if(op->has_constraints && is_Proj(proj) && get_Proj_pred(proj) == perm) {
470 be_set_constr_limited(perm, BE_OUT_POS(get_Proj_proj(proj)), &op->req);
478 static ir_node *handle_constraints(be_chordal_alloc_env_t *alloc_env, ir_node *irn)
480 be_chordal_env_t *env = alloc_env->chordal_env;
481 void *base = obstack_base(&env->obst);
482 insn_t *insn = scan_insn(alloc_env, irn, &env->obst);
483 ir_node *res = insn->next_insn;
485 if(insn->pre_colored) {
487 for(i = 0; i < insn->use_start; ++i)
488 pset_insert_ptr(alloc_env->pre_colored, insn->ops[i].carrier);
491 if(be_is_Perm(irn) || be_is_RegParams(irn) || (be_is_Barrier(irn) && !insn->in_constraints))
495 Perms inserted before the constraint handling phase are considered to be
496 correctly precolored. These Perms arise during the ABI handling phase.
498 if(insn->has_constraints) {
499 firm_dbg_module_t *dbg = alloc_env->constr_dbg;
500 const arch_env_t *aenv = env->birg->main_env->arch_env;
501 int n_regs = env->cls->n_regs;
502 bitset_t *bs = bitset_alloca(n_regs);
503 bitset_t *non_ignore = bitset_alloca(n_regs);
504 ir_node **alloc_nodes = alloca(n_regs * sizeof(alloc_nodes[0]));
505 bipartite_t *bp = bipartite_new(n_regs, n_regs);
506 int *assignment = alloca(n_regs * sizeof(assignment[0]));
507 pmap *partners = pmap_create();
511 const ir_edge_t *edge;
512 ir_node *perm = NULL;
515 prepare the constraint handling of this node.
516 Perms are constructed and Copies are created for constrained values
517 interfering with the instruction.
519 perm = pre_process_constraints(alloc_env, &insn);
521 /* find suitable in operands to the out operands of the node. */
522 pair_up_operands(alloc_env, insn);
525 look at the in/out operands and add each operand (and its possible partner)
526 to a bipartite graph (left: nodes with partners, right: admissible colors).
528 for(i = 0, n_alloc = 0; i < insn->n_ops; ++i) {
529 operand_t *op = &insn->ops[i];
532 If the operand has no partner or the partner has not been marked
533 for allocation, determine the admissible registers and mark it
534 for allocation by associating the node and its partner with the
535 set of admissible registers via a bipartite graph.
537 if(!op->partner || !pmap_contains(partners, op->partner->carrier)) {
539 pmap_insert(partners, op->carrier, op->partner ? op->partner->carrier : NULL);
540 alloc_nodes[n_alloc] = op->carrier;
542 DBG((dbg, LEVEL_2, "\tassociating %+F and %+F\n", op->carrier, op->partner ? op->partner->carrier : NULL));
544 bitset_clear_all(bs);
545 get_decisive_partner_regs(bs, op, op->partner);
547 DBG((dbg, LEVEL_2, "\tallowed registers for %+F: %B\n", op->carrier, bs));
549 bitset_foreach(bs, col)
550 bipartite_add(bp, n_alloc, col);
557 Put all nodes which live by the constrained instruction also to the
558 allocation bipartite graph. They are considered unconstrained.
561 foreach_out_edge(perm, edge) {
562 ir_node *proj = get_edge_src_irn(edge);
564 assert(is_Proj(proj));
566 if(values_interfere(proj, irn)) {
567 assert(n_alloc < n_regs);
568 alloc_nodes[n_alloc] = proj;
569 pmap_insert(partners, proj, NULL);
571 bitset_clear_all(bs);
572 arch_put_non_ignore_regs(aenv, env->cls, bs);
573 bitset_foreach(bs, col)
574 bipartite_add(bp, n_alloc, col);
581 /* Compute a valid register allocation. */
582 bipartite_matching(bp, assignment);
584 /* Assign colors obtained from the matching. */
585 for(i = 0; i < n_alloc; ++i) {
586 const arch_register_t *reg;
590 assert(assignment[i] >= 0 && "there must have been a register assigned");
591 reg = arch_register_for_index(env->cls, assignment[i]);
593 nodes[0] = alloc_nodes[i];
594 nodes[1] = pmap_get(partners, alloc_nodes[i]);
596 for(j = 0; j < 2; ++j) {
600 arch_set_irn_register(aenv, nodes[j], reg);
601 pset_hinsert_ptr(alloc_env->pre_colored, nodes[j]);
602 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", nodes[j], reg->name));
607 /* Allocate the non-constrained Projs of the Perm. */
610 bitset_clear_all(bs);
612 /* Put the colors of all Projs in a bitset. */
613 foreach_out_edge(perm, edge) {
614 ir_node *proj = get_edge_src_irn(edge);
615 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
618 bitset_set(bs, reg->index);
621 /* Assign the not yet assigned Projs of the Perm a suitable color. */
622 foreach_out_edge(perm, edge) {
623 ir_node *proj = get_edge_src_irn(edge);
624 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
626 DBG((dbg, LEVEL_2, "\tchecking reg of %+F: %s\n", proj, reg ? reg->name : "<none>"));
629 col = get_next_free_reg(alloc_env, bs);
630 reg = arch_register_for_index(env->cls, col);
631 bitset_set(bs, reg->index);
632 arch_set_irn_register(aenv, proj, reg);
633 pset_insert_ptr(alloc_env->pre_colored, proj);
634 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", proj, reg->name));
639 pmap_destroy(partners);
643 obstack_free(&env->obst, base);
648 * Handle constraint nodes in each basic block.
649 * be_insert_constr_perms() inserts Perm nodes which perm
650 * over all values live at the constrained node right in front
651 * of the constrained node. These Perms signal a constrained node.
652 * For further comments, refer to handle_constraints_at_perm().
654 static void constraints(ir_node *bl, void *data)
656 firm_dbg_module_t *dbg = firm_dbg_register("firm.be.chordal.constr");
657 be_chordal_alloc_env_t *env = data;
658 arch_env_t *arch_env = env->chordal_env->birg->main_env->arch_env;
661 for(irn = sched_first(bl); !sched_is_end(irn);) {
662 irn = handle_constraints(env, irn);
667 * Annotate the register pressure to the nodes and compute
668 * the liveness intervals.
669 * @param block The block to do it for.
670 * @param env_ptr The environment.
672 static void pressure(ir_node *block, void *env_ptr)
674 /* Convenience macro for a def */
675 #define border_def(irn, step, real) \
676 border_add(env, head, irn, step, pressure--, 1, real)
678 /* Convenience macro for a use */
679 #define border_use(irn, step, real) \
680 border_add(env, head, irn, step, ++pressure, 0, real)
682 be_chordal_alloc_env_t *alloc_env = env_ptr;
683 be_chordal_env_t *env = alloc_env->chordal_env;
684 const arch_env_t *arch_env = env->birg->main_env->arch_env;
685 bitset_t *live = alloc_env->live;
686 firm_dbg_module_t *dbg = env->dbg;
691 unsigned pressure = 0;
692 struct list_head *head;
693 pset *live_in = put_live_in(block, pset_new_ptr_default());
694 pset *live_end = put_live_end(block, pset_new_ptr_default());
696 DBG((dbg, LEVEL_1, "Computing pressure in block %+F\n", block));
697 bitset_clear_all(live);
699 /* Set up the border list in the block info */
700 head = obstack_alloc(&env->obst, sizeof(*head));
701 INIT_LIST_HEAD(head);
702 assert(pmap_get(env->border_heads, block) == NULL);
703 pmap_insert(env->border_heads, block, head);
706 * Make final uses of all values live out of the block.
707 * They are necessary to build up real intervals.
709 for(irn = pset_first(live_end); irn; irn = pset_next(live_end)) {
710 if(has_reg_class(env, irn)) {
711 DBG((dbg, LEVEL_3, "\tMaking live: %+F/%d\n", irn, get_irn_graph_nr(irn)));
712 bitset_set(live, get_irn_graph_nr(irn));
713 border_use(irn, step, 0);
719 * Determine the last uses of a value inside the block, since they are
720 * relevant for the interval borders.
722 sched_foreach_reverse(block, irn) {
723 DBG((dbg, LEVEL_1, "\tinsn: %+F, pressure: %d\n", irn, pressure));
724 DBG((dbg, LEVEL_2, "\tlive: %b\n", live));
727 * If the node defines some value, which can put into a
728 * register of the current class, make a border for it.
730 if(has_reg_class(env, irn)) {
731 int nr = get_irn_graph_nr(irn);
733 bitset_clear(live, nr);
734 border_def(irn, step, 1);
738 * If the node is no phi node we can examine the uses.
741 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
742 ir_node *op = get_irn_n(irn, i);
744 if(has_reg_class(env, op)) {
745 int nr = get_irn_graph_nr(op);
747 DBG((dbg, LEVEL_4, "\t\tpos: %d, use: %+F\n", i, op));
749 if(!bitset_is_set(live, nr)) {
750 border_use(op, step, 1);
751 bitset_set(live, nr);
760 * Add initial defs for all values live in.
762 for(irn = pset_first(live_in); irn; irn = pset_next(live_in)) {
763 if(has_reg_class(env, irn)) {
765 /* Mark the value live in. */
766 bitset_set(live, get_irn_graph_nr(irn));
769 border_def(irn, step, 0);
778 static void assign(ir_node *block, void *env_ptr)
780 be_chordal_alloc_env_t *alloc_env = env_ptr;
781 be_chordal_env_t *env = alloc_env->chordal_env;
782 firm_dbg_module_t *dbg = env->dbg;
783 bitset_t *live = alloc_env->live;
784 bitset_t *colors = alloc_env->colors;
785 bitset_t *in_colors = alloc_env->in_colors;
786 const arch_env_t *arch_env = env->birg->main_env->arch_env;
790 struct list_head *head = get_block_border_head(env, block);
791 pset *live_in = put_live_in(block, pset_new_ptr_default());
793 bitset_clear_all(colors);
794 bitset_clear_all(live);
795 bitset_clear_all(in_colors);
797 DBG((dbg, LEVEL_4, "Assigning colors for block %+F\n", block));
798 DBG((dbg, LEVEL_4, "\tusedef chain for block\n"));
799 list_for_each_entry(border_t, b, head, list) {
800 DBG((dbg, LEVEL_4, "\t%s %+F/%d\n", b->is_def ? "def" : "use",
801 b->irn, get_irn_graph_nr(b->irn)));
805 * Add initial defs for all values live in.
806 * Since their colors have already been assigned (The dominators were
807 * allocated before), we have to mark their colors as used also.
809 for(irn = pset_first(live_in); irn; irn = pset_next(live_in)) {
810 if(has_reg_class(env, irn)) {
811 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
814 assert(reg && "Node must have been assigned a register");
815 col = arch_register_get_index(reg);
817 /* Mark the color of the live in value as used. */
818 bitset_set(colors, col);
819 bitset_set(in_colors, col);
821 /* Mark the value live in. */
822 bitset_set(live, get_irn_graph_nr(irn));
827 * Mind that the sequence
828 * of defs from back to front defines a perfect
829 * elimination order. So, coloring the definitions from first to last
832 list_for_each_entry_reverse(border_t, b, head, list) {
833 ir_node *irn = b->irn;
834 int nr = get_irn_graph_nr(irn);
837 * Assign a color, if it is a local def. Global defs already have a
840 if(b->is_def && !is_live_in(block, irn)) {
841 const arch_register_t *reg;
844 if(pset_find_ptr(alloc_env->pre_colored, irn)) {
845 reg = arch_get_irn_register(arch_env, irn);
847 assert(!bitset_is_set(colors, col) && "pre-colored register must be free");
851 col = get_next_free_reg(alloc_env, colors);
852 reg = arch_register_for_index(env->cls, col);
853 assert(arch_get_irn_register(arch_env, irn) == NULL && "This node must not have been assigned a register yet");
854 assert(!arch_register_type_is(reg, ignore) && "Must not assign ignore register");
857 bitset_set(colors, col);
858 arch_set_irn_register(arch_env, irn, reg);
860 DBG((dbg, LEVEL_1, "\tassigning register %s(%d) to %+F\n",
861 arch_register_get_name(reg), col, irn));
863 assert(!bitset_is_set(live, nr) && "Value's definition must not have been encountered");
864 bitset_set(live, nr);
867 /* Clear the color upon a use. */
868 else if(!b->is_def) {
869 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
872 assert(reg && "Register must have been assigned");
874 col = arch_register_get_index(reg);
875 assert(bitset_is_set(live, nr) && "Cannot have a non live use");
877 bitset_clear(colors, col);
878 bitset_clear(live, nr);
885 void be_ra_chordal_color(be_chordal_env_t *chordal_env)
887 be_chordal_alloc_env_t env;
891 int colors_n = arch_register_class_n_regs(chordal_env->cls);
892 ir_graph *irg = chordal_env->irg;
895 if(get_irg_dom_state(irg) != dom_consistent)
898 env.chordal_env = chordal_env;
899 env.colors_n = colors_n;
900 env.colors = bitset_alloca(colors_n);
901 env.tmp_colors = bitset_alloca(colors_n);
902 env.in_colors = bitset_alloca(colors_n);
903 env.ignore_regs = bitset_alloca(colors_n);
904 env.pre_colored = pset_new_ptr_default();
905 env.constr_dbg = firm_dbg_register("firm.be.chordal.constr");
907 for(i = 0; i < colors_n; ++i)
908 if(arch_register_type_is(&chordal_env->cls->regs[i], ignore))
909 bitset_set(env.ignore_regs, i);
911 /* Handle register targeting constraints */
912 dom_tree_walk_irg(irg, constraints, NULL, &env);
914 if(chordal_env->opts->dump_flags & BE_CH_DUMP_CONSTR) {
915 snprintf(buf, sizeof(buf), "-%s-constr", chordal_env->cls->name);
916 be_dump(chordal_env->irg, buf, dump_ir_block_graph_sched);
920 env.live = bitset_malloc(get_graph_node_count(chordal_env->irg));
922 /* First, determine the pressure */
923 dom_tree_walk_irg(irg, pressure, NULL, &env);
925 /* Assign the colors */
926 dom_tree_walk_irg(irg, assign, NULL, &env);
928 be_numbering_done(irg);
930 if(chordal_env->opts->dump_flags & BE_CH_DUMP_TREE_INTV) {
932 ir_snprintf(buf, sizeof(buf), "ifg_%s_%F.eps", chordal_env->cls->name, irg);
933 plotter = new_plotter_ps(buf);
934 draw_interval_tree(&draw_chordal_def_opts, chordal_env, plotter);
935 plotter_free(plotter);
938 del_pset(env.pre_colored);