2 * Chordal register allocation.
3 * @author Sebastian Hack
6 * Copyright (C) Universitaet Karlsruhe
7 * Released under the GPL
29 #include "bipartite.h"
32 #include "irgraph_t.h"
33 #include "irprintf_t.h"
43 #include "besched_t.h"
50 #include "bechordal_t.h"
51 #include "bechordal_draw.h"
53 #define DBG_LEVEL SET_LEVEL_0
54 #define DBG_LEVEL_CHECK SET_LEVEL_0
58 #define MAX(x, y) ((x) > (y) ? (x) : (y))
59 #define MIN(x, y) ((x) < (y) ? (x) : (y))
61 #define DUMP_INTERVALS
63 typedef struct _be_chordal_alloc_env_t {
64 be_chordal_env_t *chordal_env;
66 firm_dbg_module_t *constr_dbg; /**< Debug output for the constraint handler. */
67 pset *pre_colored; /**< Set of precolored nodes. */
68 bitset_t *live; /**< A liveness bitset. */
69 bitset_t *tmp_colors; /**< An auxiliary bitset which is as long as the number of colors in the class. */
70 bitset_t *colors; /**< The color mask. */
71 bitset_t *in_colors; /**< Colors used by live in values. */
72 bitset_t *ignore_regs; /**< A bitset of all ignore registers in the current class. */
73 int colors_n; /**< The number of colors. */
74 } be_chordal_alloc_env_t;
78 /* Make a fourcc for border checking. */
79 #define BORDER_FOURCC FOURCC('B', 'O', 'R', 'D')
81 static void check_border_list(struct list_head *head)
84 list_for_each_entry(border_t, x, head, list) {
85 assert(x->magic == BORDER_FOURCC);
89 static void check_heads(be_chordal_env_t *env)
92 for(ent = pmap_first(env->border_heads); ent; ent = pmap_next(env->border_heads)) {
93 /* ir_printf("checking border list of block %+F\n", ent->key); */
94 check_border_list(ent->value);
100 * Add an interval border to the list of a block's list
101 * of interval border.
102 * @note You always have to create the use before the def.
103 * @param env The environment.
104 * @param head The list head to enqueue the borders.
105 * @param irn The node (value) the border belongs to.
106 * @param pressure The pressure at this point in time.
107 * @param step A time step for the border.
108 * @param is_def Is the border a use or a def.
109 * @return The created border.
111 static INLINE border_t *border_add(be_chordal_env_t *env, struct list_head *head,
112 ir_node *irn, unsigned step, unsigned pressure,
113 unsigned is_def, unsigned is_real)
120 b = obstack_alloc(&env->obst, sizeof(*b));
122 /* also allocate the def and tie it to the use. */
123 def = obstack_alloc(&env->obst, sizeof(*def));
124 memset(def, 0, sizeof(*def));
129 * Set the link field of the irn to the def.
130 * This strongly relies on the fact, that the use is always
131 * made before the def.
133 set_irn_link(irn, def);
135 b->magic = BORDER_FOURCC;
136 def->magic = BORDER_FOURCC;
140 * If the def is encountered, the use was made and so was the
141 * the def node (see the code above). It was placed into the
142 * link field of the irn, so we can get it there.
145 b = get_irn_link(irn);
147 assert(b && b->magic == BORDER_FOURCC && "Illegal border encountered");
150 b->pressure = pressure;
152 b->is_real = is_real;
155 list_add_tail(&b->list, head);
156 DBG((env->dbg, LEVEL_5, "\t\t%s adding %+F, step: %d\n", is_def ? "def" : "use", irn, step));
163 * Check, if an irn is of the register class currently under processing.
164 * @param env The chordal environment.
165 * @param irn The node.
166 * @return 1, if the node is of that register class, 0 if not.
168 static INLINE int has_reg_class(const be_chordal_env_t *env, const ir_node *irn)
170 // return arch_irn_has_reg_class(env->main_env->arch_env, irn, -1, env->cls);
171 return arch_irn_consider_in_reg_alloc(env->birg->main_env->arch_env, env->cls, irn);
174 #define has_limited_constr(req, irn) \
175 (arch_get_register_req(arch_env, (req), irn, -1) && (req)->type == arch_register_req_type_limited)
177 static int get_next_free_reg(const be_chordal_alloc_env_t *alloc_env, bitset_t *colors)
179 bitset_t *tmp = alloc_env->tmp_colors;
180 bitset_copy(tmp, colors);
181 bitset_or(tmp, alloc_env->ignore_regs);
182 return bitset_next_clear(tmp, 0);
185 typedef struct _operand_t operand_t;
193 arch_register_req_t req;
194 unsigned has_constraints : 1;
203 unsigned in_constraints : 1;
204 unsigned out_constraints : 1;
205 unsigned has_constraints : 1;
206 unsigned pre_colored : 1;
209 #define insn_n_defs(insn) ((insn)->use_start)
210 #define insn_n_uses(insn) ((insn)->n_ops - (insn)->use_start)
212 static insn_t *scan_insn(be_chordal_alloc_env_t *alloc_env, ir_node *irn, struct obstack *obst)
214 const be_chordal_env_t *env = alloc_env->chordal_env;
215 const arch_env_t *arch_env = env->birg->main_env->arch_env;
221 insn = obstack_alloc(obst, sizeof(insn[0]));
222 memset(insn, 0, sizeof(insn[0]));
225 insn->next_insn = sched_next(irn);
226 if(get_irn_mode(irn) == mode_T) {
229 for(p = sched_next(irn); is_Proj(p); p = sched_next(p)) {
230 if(arch_irn_consider_in_reg_alloc(arch_env, env->cls, p)) {
231 arch_get_register_req(arch_env, &o.req, p, -1);
234 o.pos = -(get_Proj_proj(p) + 1);
236 o.has_constraints = arch_register_req_is(&o.req, limited);
237 obstack_grow(obst, &o, sizeof(o));
239 insn->out_constraints |= o.has_constraints;
240 pre_colored += arch_get_irn_register(arch_env, p) != NULL;
247 else if(arch_irn_consider_in_reg_alloc(arch_env, env->cls, irn)) {
248 arch_get_register_req(arch_env, &o.req, irn, -1);
253 o.has_constraints = arch_register_req_is(&o.req, limited);
254 obstack_grow(obst, &o, sizeof(o));
256 insn->out_constraints |= o.has_constraints;
257 pre_colored += arch_get_irn_register(arch_env, irn) != NULL;
260 insn->pre_colored = pre_colored == insn->n_ops && insn->n_ops > 0;
261 insn->use_start = insn->n_ops;
263 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
264 ir_node *op = get_irn_n(irn, i);
266 if(arch_irn_consider_in_reg_alloc(arch_env, env->cls, op)) {
267 arch_get_register_req(arch_env, &o.req, irn, i);
272 o.has_constraints = arch_register_req_is(&o.req, limited);
273 obstack_grow(obst, &o, sizeof(o));
275 insn->in_constraints |= o.has_constraints;
279 insn->has_constraints = insn->in_constraints | insn->out_constraints;
280 insn->ops = obstack_finish(obst);
282 /* Compute the admissible registers bitsets. */
283 for(i = 0; i < insn->n_ops; ++i) {
284 operand_t *op = &insn->ops[i];
286 assert(op->req.cls == env->cls);
287 op->regs = bitset_obstack_alloc(obst, env->cls->n_regs);
289 if(arch_register_req_is(&op->req, limited)) {
290 op->req.limited(op->req.limited_env, op->regs);
291 bitset_andnot(op->regs, alloc_env->ignore_regs);
294 arch_put_non_ignore_regs(env->birg->main_env->arch_env, env->cls, op->regs);
300 static bitset_t *get_decisive_partner_regs(bitset_t *bs, const operand_t *o1, const operand_t *o2)
305 bitset_copy(bs, o2->regs);
310 bitset_copy(bs, o1->regs);
314 assert(o1->req.cls == o2->req.cls);
316 if(bitset_contains(o1->regs, o2->regs))
317 bitset_copy(bs, o1->regs);
318 else if(bitset_contains(o2->regs, o1->regs))
319 bitset_copy(bs, o2->regs);
326 static void pair_up_operands(const be_chordal_alloc_env_t *alloc_env, insn_t *insn)
328 const be_chordal_env_t *env = alloc_env->chordal_env;
329 const arch_env_t *aenv = env->birg->main_env->arch_env;
330 firm_dbg_module_t *dbg = alloc_env->constr_dbg;
332 int n_uses = insn_n_uses(insn);
333 int n_defs = insn_n_defs(insn);
334 int max_pairs = MIN(n_uses, n_defs);
335 bitset_t *bs = bitset_alloca(env->cls->n_regs);
336 bipartite_t *bp = bipartite_new(n_defs, n_uses);
337 int *pairing = alloca(MAX(n_defs, n_uses) * sizeof(pairing[0]));
342 For each out operand, try to find an in operand which can be assigned the
343 same register as the out operand.
345 for(j = 0; j < insn->use_start; ++j) {
346 operand_t *out_op = &insn->ops[j];
348 /* Try to find an in operand which has ... */
349 for(i = insn->use_start; i < insn->n_ops; ++i) {
350 const operand_t *op = &insn->ops[i];
353 The in operand can only be paired with a def, if the node defining the
354 operand's value does not interfere with the instruction itself. That
355 would mean, that it is live at the instruction, so no result of the instruction
356 can have the same register as the operand.
358 Furthermore, tow operands can be paired, if the admissible registers
359 of one are a subset of the other's. We record the operand whose constraints
360 count in the decisive array.
362 if(!values_interfere(op->irn, op->carrier)) {
363 if(get_decisive_partner_regs(bs, out_op, op))
364 bipartite_add(bp, j, i - insn->use_start);
369 /* Compute the pairing. */
370 bipartite_matching(bp, pairing);
371 for(i = 0; i < insn->use_start; ++i) {
372 int p = pairing[i] + insn->use_start;
374 if(p >= insn->use_start) {
375 insn->ops[i].partner = &insn->ops[p];
376 insn->ops[p].partner = &insn->ops[i];
384 static ir_node *pre_process_constraints(be_chordal_alloc_env_t *alloc_env, insn_t **the_insn)
386 be_chordal_env_t *env = alloc_env->chordal_env;
387 const arch_env_t *aenv = env->birg->main_env->arch_env;
388 firm_dbg_module_t *dbg = alloc_env->constr_dbg;
389 insn_t *insn = *the_insn;
390 ir_node *bl = get_nodes_block(insn->irn);
391 ir_node *copy = NULL;
392 ir_node *perm = NULL;
393 bitset_t *out_constr = bitset_alloca(env->cls->n_regs);
394 bitset_t *bs = bitset_alloca(env->cls->n_regs);
398 assert(insn->has_constraints && "only do this for constrained nodes");
401 Collect all registers that occur in output constraints.
402 This is necessary, since if the insn has one of these as an input constraint
403 and the corresponding operand interferes with the insn, the operand must
406 for(i = 0; i < insn->use_start; ++i) {
407 operand_t *op = &insn->ops[i];
408 if(op->has_constraints)
409 bitset_or(out_constr, op->regs);
413 Now, figure out which input operand must be copied since it has input
414 constraints which are also output constraints.
416 for(i = insn->use_start; i < insn->n_ops; ++i) {
417 operand_t *op = &insn->ops[i];
418 if(op->has_constraints && (values_interfere(op->carrier, insn->irn) || arch_irn_is(aenv, op->carrier, ignore))) {
419 bitset_copy(bs, op->regs);
420 bitset_and(bs, out_constr);
423 The operand (interfering with the node) has input constraints
424 which also occur as output constraints, so insert a copy.
426 if(bitset_popcnt(bs) > 0) {
427 copy = be_new_Copy(op->req.cls, env->irg, bl, op->carrier);
428 insn->ops[i].carrier = copy;
429 sched_add_before(insn->irn, copy);
431 DBG((dbg, LEVEL_2, "adding copy for interfering and constrained op %+F\n", op->carrier));
437 Make the Perm, recompute liveness and re-scan the insn since the
438 in operands are now the Projs of the Perm.
440 perm = insert_Perm_after(aenv, env->cls, env->dom_front, sched_prev(insn->irn));
442 /* Registers are propagated by insert_Perm_after(). Clean them here! */
444 const ir_edge_t *edge;
446 foreach_out_edge(perm, edge) {
447 ir_node *proj = get_edge_src_irn(edge);
448 arch_set_irn_register(aenv, proj, NULL);
452 We also have to re-build the insn since the input operands are now the Projs of
453 the Perm. Recomputing liveness is also a good idea if a Perm is inserted, since
454 the live sets may change.
456 be_liveness(env->irg);
457 obstack_free(&env->obst, insn);
458 *the_insn = insn = scan_insn(alloc_env, insn->irn, &env->obst);
461 Copy the input constraints of the insn to the Perm as output
462 constraints. Succeeding phases (coalescing will need that).
464 for(i = insn->use_start; i < insn->n_ops; ++i) {
465 operand_t *op = &insn->ops[i];
466 ir_node *proj = op->carrier;
468 Note that the predecessor must not be a Proj of the Perm,
469 since ignore-nodes are not Perm'ed.
471 if(op->has_constraints && is_Proj(proj) && get_Proj_pred(proj) == perm) {
472 be_set_constr_limited(perm, BE_OUT_POS(get_Proj_proj(proj)), &op->req);
480 static ir_node *handle_constraints(be_chordal_alloc_env_t *alloc_env, ir_node *irn)
482 be_chordal_env_t *env = alloc_env->chordal_env;
483 void *base = obstack_base(&env->obst);
484 insn_t *insn = scan_insn(alloc_env, irn, &env->obst);
485 ir_node *res = insn->next_insn;
487 if(insn->pre_colored) {
489 for(i = 0; i < insn->use_start; ++i)
490 pset_insert_ptr(alloc_env->pre_colored, insn->ops[i].carrier);
493 if(be_is_Perm(irn) || be_is_RegParams(irn) || (be_is_Barrier(irn) && !insn->in_constraints))
497 Perms inserted before the constraint handling phase are considered to be
498 correctly precolored. These Perms arise during the ABI handling phase.
500 if(insn->has_constraints) {
501 firm_dbg_module_t *dbg = alloc_env->constr_dbg;
502 const arch_env_t *aenv = env->birg->main_env->arch_env;
503 int n_regs = env->cls->n_regs;
504 bitset_t *bs = bitset_alloca(n_regs);
505 bitset_t *non_ignore = bitset_alloca(n_regs);
506 ir_node **alloc_nodes = alloca(n_regs * sizeof(alloc_nodes[0]));
507 bipartite_t *bp = bipartite_new(n_regs, n_regs);
508 int *assignment = alloca(n_regs * sizeof(assignment[0]));
509 pmap *partners = pmap_create();
513 const ir_edge_t *edge;
514 ir_node *perm = NULL;
517 prepare the constraint handling of this node.
518 Perms are constructed and Copies are created for constrained values
519 interfering with the instruction.
521 perm = pre_process_constraints(alloc_env, &insn);
523 /* find suitable in operands to the out operands of the node. */
524 pair_up_operands(alloc_env, insn);
527 look at the in/out operands and add each operand (and its possible partner)
528 to a bipartite graph (left: nodes with partners, right: admissible colors).
530 for(i = 0, n_alloc = 0; i < insn->n_ops; ++i) {
531 operand_t *op = &insn->ops[i];
534 If the operand has no partner or the partner has not been marked
535 for allocation, determine the admissible registers and mark it
536 for allocation by associating the node and its partner with the
537 set of admissible registers via a bipartite graph.
539 if(!op->partner || !pmap_contains(partners, op->partner->carrier)) {
541 pmap_insert(partners, op->carrier, op->partner ? op->partner->carrier : NULL);
542 alloc_nodes[n_alloc] = op->carrier;
544 DBG((dbg, LEVEL_2, "\tassociating %+F and %+F\n", op->carrier, op->partner ? op->partner->carrier : NULL));
546 bitset_clear_all(bs);
547 get_decisive_partner_regs(bs, op, op->partner);
549 DBG((dbg, LEVEL_2, "\tallowed registers for %+F: %B\n", op->carrier, bs));
551 bitset_foreach(bs, col)
552 bipartite_add(bp, n_alloc, col);
559 Put all nodes which live by the constrained instruction also to the
560 allocation bipartite graph. They are considered unconstrained.
563 foreach_out_edge(perm, edge) {
564 ir_node *proj = get_edge_src_irn(edge);
566 assert(is_Proj(proj));
568 if(values_interfere(proj, irn)) {
569 assert(n_alloc < n_regs);
570 alloc_nodes[n_alloc] = proj;
571 pmap_insert(partners, proj, NULL);
573 bitset_clear_all(bs);
574 arch_put_non_ignore_regs(aenv, env->cls, bs);
575 bitset_foreach(bs, col)
576 bipartite_add(bp, n_alloc, col);
583 /* Compute a valid register allocation. */
584 bipartite_matching(bp, assignment);
586 /* Assign colors obtained from the matching. */
587 for(i = 0; i < n_alloc; ++i) {
588 const arch_register_t *reg;
592 assert(assignment[i] >= 0 && "there must have been a register assigned");
593 reg = arch_register_for_index(env->cls, assignment[i]);
595 nodes[0] = alloc_nodes[i];
596 nodes[1] = pmap_get(partners, alloc_nodes[i]);
598 for(j = 0; j < 2; ++j) {
602 arch_set_irn_register(aenv, nodes[j], reg);
603 pset_hinsert_ptr(alloc_env->pre_colored, nodes[j]);
604 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", nodes[j], reg->name));
609 /* Allocate the non-constrained Projs of the Perm. */
612 bitset_clear_all(bs);
614 /* Put the colors of all Projs in a bitset. */
615 foreach_out_edge(perm, edge) {
616 ir_node *proj = get_edge_src_irn(edge);
617 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
620 bitset_set(bs, reg->index);
623 /* Assign the not yet assigned Projs of the Perm a suitable color. */
624 foreach_out_edge(perm, edge) {
625 ir_node *proj = get_edge_src_irn(edge);
626 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
628 DBG((dbg, LEVEL_2, "\tchecking reg of %+F: %s\n", proj, reg ? reg->name : "<none>"));
631 col = get_next_free_reg(alloc_env, bs);
632 reg = arch_register_for_index(env->cls, col);
633 bitset_set(bs, reg->index);
634 arch_set_irn_register(aenv, proj, reg);
635 pset_insert_ptr(alloc_env->pre_colored, proj);
636 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", proj, reg->name));
641 pmap_destroy(partners);
645 obstack_free(&env->obst, base);
650 * Handle constraint nodes in each basic block.
651 * be_insert_constr_perms() inserts Perm nodes which perm
652 * over all values live at the constrained node right in front
653 * of the constrained node. These Perms signal a constrained node.
654 * For further comments, refer to handle_constraints_at_perm().
656 static void constraints(ir_node *bl, void *data)
658 firm_dbg_module_t *dbg = firm_dbg_register("firm.be.chordal.constr");
659 be_chordal_alloc_env_t *env = data;
660 arch_env_t *arch_env = env->chordal_env->birg->main_env->arch_env;
663 for(irn = sched_first(bl); !sched_is_end(irn);) {
664 irn = handle_constraints(env, irn);
669 * Annotate the register pressure to the nodes and compute
670 * the liveness intervals.
671 * @param block The block to do it for.
672 * @param env_ptr The environment.
674 static void pressure(ir_node *block, void *env_ptr)
676 /* Convenience macro for a def */
677 #define border_def(irn, step, real) \
678 border_add(env, head, irn, step, pressure--, 1, real)
680 /* Convenience macro for a use */
681 #define border_use(irn, step, real) \
682 border_add(env, head, irn, step, ++pressure, 0, real)
684 be_chordal_alloc_env_t *alloc_env = env_ptr;
685 be_chordal_env_t *env = alloc_env->chordal_env;
686 const arch_env_t *arch_env = env->birg->main_env->arch_env;
687 bitset_t *live = alloc_env->live;
688 firm_dbg_module_t *dbg = env->dbg;
693 unsigned pressure = 0;
694 struct list_head *head;
695 pset *live_in = put_live_in(block, pset_new_ptr_default());
696 pset *live_end = put_live_end(block, pset_new_ptr_default());
698 DBG((dbg, LEVEL_1, "Computing pressure in block %+F\n", block));
699 bitset_clear_all(live);
701 /* Set up the border list in the block info */
702 head = obstack_alloc(&env->obst, sizeof(*head));
703 INIT_LIST_HEAD(head);
704 assert(pmap_get(env->border_heads, block) == NULL);
705 pmap_insert(env->border_heads, block, head);
708 * Make final uses of all values live out of the block.
709 * They are necessary to build up real intervals.
711 for(irn = pset_first(live_end); irn; irn = pset_next(live_end)) {
712 if(has_reg_class(env, irn)) {
713 DBG((dbg, LEVEL_3, "\tMaking live: %+F/%d\n", irn, get_irn_graph_nr(irn)));
714 bitset_set(live, get_irn_graph_nr(irn));
715 border_use(irn, step, 0);
721 * Determine the last uses of a value inside the block, since they are
722 * relevant for the interval borders.
724 sched_foreach_reverse(block, irn) {
725 DBG((dbg, LEVEL_1, "\tinsn: %+F, pressure: %d\n", irn, pressure));
726 DBG((dbg, LEVEL_2, "\tlive: %b\n", live));
729 * If the node defines some value, which can put into a
730 * register of the current class, make a border for it.
732 if(has_reg_class(env, irn)) {
733 int nr = get_irn_graph_nr(irn);
735 bitset_clear(live, nr);
736 border_def(irn, step, 1);
740 * If the node is no phi node we can examine the uses.
743 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
744 ir_node *op = get_irn_n(irn, i);
746 if(has_reg_class(env, op)) {
747 int nr = get_irn_graph_nr(op);
749 DBG((dbg, LEVEL_4, "\t\tpos: %d, use: %+F\n", i, op));
751 if(!bitset_is_set(live, nr)) {
752 border_use(op, step, 1);
753 bitset_set(live, nr);
762 * Add initial defs for all values live in.
764 for(irn = pset_first(live_in); irn; irn = pset_next(live_in)) {
765 if(has_reg_class(env, irn)) {
767 /* Mark the value live in. */
768 bitset_set(live, get_irn_graph_nr(irn));
771 border_def(irn, step, 0);
780 static void assign(ir_node *block, void *env_ptr)
782 be_chordal_alloc_env_t *alloc_env = env_ptr;
783 be_chordal_env_t *env = alloc_env->chordal_env;
784 firm_dbg_module_t *dbg = env->dbg;
785 bitset_t *live = alloc_env->live;
786 bitset_t *colors = alloc_env->colors;
787 bitset_t *in_colors = alloc_env->in_colors;
788 const arch_env_t *arch_env = env->birg->main_env->arch_env;
792 struct list_head *head = get_block_border_head(env, block);
793 pset *live_in = put_live_in(block, pset_new_ptr_default());
795 bitset_clear_all(colors);
796 bitset_clear_all(live);
797 bitset_clear_all(in_colors);
799 DBG((dbg, LEVEL_4, "Assigning colors for block %+F\n", block));
800 DBG((dbg, LEVEL_4, "\tusedef chain for block\n"));
801 list_for_each_entry(border_t, b, head, list) {
802 DBG((dbg, LEVEL_4, "\t%s %+F/%d\n", b->is_def ? "def" : "use",
803 b->irn, get_irn_graph_nr(b->irn)));
807 * Add initial defs for all values live in.
808 * Since their colors have already been assigned (The dominators were
809 * allocated before), we have to mark their colors as used also.
811 for(irn = pset_first(live_in); irn; irn = pset_next(live_in)) {
812 if(has_reg_class(env, irn)) {
813 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
816 assert(reg && "Node must have been assigned a register");
817 col = arch_register_get_index(reg);
819 /* Mark the color of the live in value as used. */
820 bitset_set(colors, col);
821 bitset_set(in_colors, col);
823 /* Mark the value live in. */
824 bitset_set(live, get_irn_graph_nr(irn));
829 * Mind that the sequence
830 * of defs from back to front defines a perfect
831 * elimination order. So, coloring the definitions from first to last
834 list_for_each_entry_reverse(border_t, b, head, list) {
835 ir_node *irn = b->irn;
836 int nr = get_irn_graph_nr(irn);
839 * Assign a color, if it is a local def. Global defs already have a
842 if(b->is_def && !is_live_in(block, irn)) {
843 const arch_register_t *reg;
846 if(pset_find_ptr(alloc_env->pre_colored, irn)) {
847 reg = arch_get_irn_register(arch_env, irn);
849 assert(!bitset_is_set(colors, col) && "pre-colored register must be free");
853 col = get_next_free_reg(alloc_env, colors);
854 reg = arch_register_for_index(env->cls, col);
855 assert(arch_get_irn_register(arch_env, irn) == NULL && "This node must not have been assigned a register yet");
858 bitset_set(colors, col);
860 assert(!arch_register_type_is(reg, ignore) && "Must not assign ignore register");
861 arch_set_irn_register(arch_env, irn, reg);
863 DBG((dbg, LEVEL_1, "\tassigning register %s(%d) to %+F\n",
864 arch_register_get_name(reg), col, irn));
866 assert(!bitset_is_set(live, nr) && "Value's definition must not have been encountered");
867 bitset_set(live, nr);
870 /* Clear the color upon a use. */
871 else if(!b->is_def) {
872 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
875 assert(reg && "Register must have been assigned");
877 col = arch_register_get_index(reg);
878 assert(bitset_is_set(live, nr) && "Cannot have a non live use");
880 bitset_clear(colors, col);
881 bitset_clear(live, nr);
888 void be_ra_chordal_color(be_chordal_env_t *chordal_env)
890 be_chordal_alloc_env_t env;
894 int colors_n = arch_register_class_n_regs(chordal_env->cls);
895 ir_graph *irg = chordal_env->irg;
898 if(get_irg_dom_state(irg) != dom_consistent)
901 env.chordal_env = chordal_env;
902 env.colors_n = colors_n;
903 env.colors = bitset_alloca(colors_n);
904 env.tmp_colors = bitset_alloca(colors_n);
905 env.in_colors = bitset_alloca(colors_n);
906 env.ignore_regs = bitset_alloca(colors_n);
907 env.pre_colored = pset_new_ptr_default();
908 env.constr_dbg = firm_dbg_register("firm.be.chordal.constr");
910 for(i = 0; i < colors_n; ++i)
911 if(arch_register_type_is(&chordal_env->cls->regs[i], ignore))
912 bitset_set(env.ignore_regs, i);
914 /* Handle register targeting constraints */
915 dom_tree_walk_irg(irg, constraints, NULL, &env);
917 if(chordal_env->opts->dump_flags & BE_CH_DUMP_CONSTR) {
918 snprintf(buf, sizeof(buf), "-%s-constr", chordal_env->cls->name);
919 be_dump(chordal_env->irg, buf, dump_ir_block_graph_sched);
923 env.live = bitset_malloc(get_graph_node_count(chordal_env->irg));
925 /* First, determine the pressure */
926 dom_tree_walk_irg(irg, pressure, NULL, &env);
928 /* Assign the colors */
929 dom_tree_walk_irg(irg, assign, NULL, &env);
931 be_numbering_done(irg);
933 if(chordal_env->opts->dump_flags & BE_CH_DUMP_TREE_INTV) {
935 ir_snprintf(buf, sizeof(buf), "ifg_%s_%F.eps", chordal_env->cls->name, irg);
936 plotter = new_plotter_ps(buf);
937 draw_interval_tree(&draw_chordal_def_opts, chordal_env, plotter);
938 plotter_free(plotter);
941 del_pset(env.pre_colored);