2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * Chordal register allocation.
22 * @author Sebastian Hack
26 * Copyright (C) Universitaet Karlsruhe
27 * Released under the GPL
39 #include "raw_bitset.h"
41 #include "bipartite.h"
42 #include "hungarian.h"
45 #include "irgraph_t.h"
46 #include "irprintf_t.h"
56 #include "besched_t.h"
63 #include "bestatevent.h"
66 #include "bechordal_t.h"
67 #include "bechordal_draw.h"
69 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
73 #define DUMP_INTERVALS
75 typedef struct _be_chordal_alloc_env_t {
76 be_chordal_env_t *chordal_env;
78 pset *pre_colored; /**< Set of precolored nodes. */
79 bitset_t *live; /**< A liveness bitset. */
80 bitset_t *tmp_colors; /**< An auxiliary bitset which is as long as the number of colors in the class. */
81 bitset_t *colors; /**< The color mask. */
82 bitset_t *in_colors; /**< Colors used by live in values. */
83 int colors_n; /**< The number of colors. */
84 } be_chordal_alloc_env_t;
88 /* Make a fourcc for border checking. */
89 #define BORDER_FOURCC FOURCC('B', 'O', 'R', 'D')
92 static void check_border_list(struct list_head *head)
95 list_for_each_entry(border_t, x, head, list) {
96 assert(x->magic == BORDER_FOURCC);
100 static void check_heads(be_chordal_env_t *env)
103 for(ent = pmap_first(env->border_heads); ent; ent = pmap_next(env->border_heads)) {
104 /* ir_printf("checking border list of block %+F\n", ent->key); */
105 check_border_list(ent->value);
111 * Add an interval border to the list of a block's list
112 * of interval border.
113 * @note You always have to create the use before the def.
114 * @param env The environment.
115 * @param head The list head to enqueue the borders.
116 * @param irn The node (value) the border belongs to.
117 * @param pressure The pressure at this point in time.
118 * @param step A time step for the border.
119 * @param is_def Is the border a use or a def.
120 * @return The created border.
122 static INLINE border_t *border_add(be_chordal_env_t *env, struct list_head *head,
123 ir_node *irn, unsigned step, unsigned pressure,
124 unsigned is_def, unsigned is_real)
131 b = obstack_alloc(env->obst, sizeof(*b));
133 /* also allocate the def and tie it to the use. */
134 def = obstack_alloc(env->obst, sizeof(*def));
135 memset(def, 0, sizeof(*def));
140 * Set the link field of the irn to the def.
141 * This strongly relies on the fact, that the use is always
142 * made before the def.
144 set_irn_link(irn, def);
146 DEBUG_ONLY(b->magic = BORDER_FOURCC);
147 DEBUG_ONLY(def->magic = BORDER_FOURCC);
151 * If the def is encountered, the use was made and so was the
152 * the def node (see the code above). It was placed into the
153 * link field of the irn, so we can get it there.
156 b = get_irn_link(irn);
158 assert(b && b->magic == BORDER_FOURCC && "Illegal border encountered");
161 b->pressure = pressure;
163 b->is_real = is_real;
166 list_add_tail(&b->list, head);
167 DBG((dbg, LEVEL_5, "\t\t%s adding %+F, step: %d\n", is_def ? "def" : "use", irn, step));
174 * Check, if an irn is of the register class currently under processing.
175 * @param env The chordal environment.
176 * @param irn The node.
177 * @return 1, if the node is of that register class, 0 if not.
179 static INLINE int has_reg_class(const be_chordal_env_t *env, const ir_node *irn)
181 return arch_irn_has_reg_class(env->birg->main_env->arch_env, irn, -1, env->cls);
182 // return arch_irn_consider_in_reg_alloc(env->birg->main_env->arch_env, env->cls, irn);
185 #define has_limited_constr(req, irn) \
186 (arch_get_register_req(arch_env, (req), irn, -1) && (req)->type == arch_register_req_type_limited)
188 static int get_next_free_reg(const be_chordal_alloc_env_t *alloc_env, bitset_t *colors)
190 bitset_t *tmp = alloc_env->tmp_colors;
191 bitset_copy(tmp, colors);
192 bitset_or(tmp, alloc_env->chordal_env->ignore_colors);
193 return bitset_next_clear(tmp, 0);
196 static bitset_t *get_decisive_partner_regs(bitset_t *bs, const be_operand_t *o1, const be_operand_t *o2)
201 bitset_copy(bs, o2->regs);
206 bitset_copy(bs, o1->regs);
210 assert(o1->req->cls == o2->req->cls || ! o1->req->cls || ! o2->req->cls);
212 if(bitset_contains(o1->regs, o2->regs))
213 bitset_copy(bs, o1->regs);
214 else if(bitset_contains(o2->regs, o1->regs))
215 bitset_copy(bs, o2->regs);
222 static be_insn_t *chordal_scan_insn(be_chordal_env_t *env, ir_node *irn)
226 ie.ignore_colors = env->ignore_colors;
227 ie.aenv = env->birg->main_env->arch_env;
230 return be_scan_insn(&ie, irn);
233 static ir_node *prepare_constr_insn(be_chordal_env_t *env, ir_node *irn)
235 const arch_env_t *aenv = env->birg->main_env->arch_env;
236 bitset_t *tmp = bitset_alloca(env->cls->n_regs);
237 bitset_t *def_constr = bitset_alloca(env->cls->n_regs);
238 ir_node *bl = get_nodes_block(irn);
239 be_lv_t *lv = env->birg->lv;
244 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
245 ir_node *op = get_irn_n(irn, i);
247 const arch_register_t *reg;
248 const arch_register_req_t *req;
250 if (arch_get_irn_reg_class(aenv, irn, i) != env->cls)
253 reg = arch_get_irn_register(aenv, op);
255 if (reg == NULL || !arch_register_type_is(reg, ignore))
257 if(arch_register_type_is(reg, joker))
260 req = arch_get_register_req(aenv, irn, i);
261 if (!arch_register_req_is(req, limited))
264 if (rbitset_is_set(req->limited, reg->index))
267 copy = be_new_Copy(env->cls, env->irg, bl, op);
268 be_stat_ev("constr_copy", 1);
270 sched_add_before(irn, copy);
271 set_irn_n(irn, i, copy);
272 DBG((dbg, LEVEL_3, "inserting ignore arg copy %+F for %+F pos %d\n", copy, irn, i));
275 insn = chordal_scan_insn(env, irn);
277 if(!insn->has_constraints)
280 /* insert copies for nodes that occur constrained more than once. */
281 for(i = insn->use_start; i < insn->n_ops; ++i) {
282 be_operand_t *op = &insn->ops[i];
284 if(!op->has_constraints)
287 for(j = i + 1; j < insn->n_ops; ++j) {
289 be_operand_t *a_op = &insn->ops[j];
291 if(a_op->carrier != op->carrier || !a_op->has_constraints)
294 copy = be_new_Copy(env->cls, env->irg, bl, op->carrier);
295 be_stat_ev("constr_copy", 1);
297 sched_add_before(insn->irn, copy);
298 set_irn_n(insn->irn, a_op->pos, copy);
299 DBG((dbg, LEVEL_3, "inserting multiple constr copy %+F for %+F pos %d\n", copy, insn->irn, a_op->pos));
303 /* collect all registers occuring in out constraints. */
304 for(i = 0; i < insn->use_start; ++i) {
305 be_operand_t *op = &insn->ops[i];
306 if(op->has_constraints)
307 bitset_or(def_constr, op->regs);
311 insert copies for all constrained arguments living through the node
312 and being constrained to a register which also occurs in out constraints.
314 for(i = insn->use_start; i < insn->n_ops; ++i) {
316 be_operand_t *op = &insn->ops[i];
318 bitset_copy(tmp, op->regs);
319 bitset_and(tmp, def_constr);
323 1) the operand is constrained.
324 2) lives through the node.
325 3) is constrained to a register occuring in out constraints.
327 if(!op->has_constraints ||
328 !values_interfere(lv, insn->irn, op->carrier) ||
329 bitset_popcnt(tmp) == 0)
333 only create the copy if the operand is no copy.
334 this is necessary since the assure constraints phase inserts
335 Copies and Keeps for operands which must be different from the results.
336 Additional copies here would destroy this.
338 if(be_is_Copy(op->carrier))
341 copy = be_new_Copy(env->cls, env->irg, bl, op->carrier);
343 sched_add_before(insn->irn, copy);
344 set_irn_n(insn->irn, op->pos, copy);
345 DBG((dbg, LEVEL_3, "inserting constr copy %+F for %+F pos %d\n", copy, insn->irn, op->pos));
346 be_liveness_update(lv, op->carrier);
350 obstack_free(env->obst, insn);
351 return insn->next_insn;
354 static void pre_spill_prepare_constr_walker(ir_node *bl, void *data)
356 be_chordal_env_t *env = data;
358 for(irn = sched_first(bl); !sched_is_end(irn);) {
359 irn = prepare_constr_insn(env, irn);
363 void be_pre_spill_prepare_constr(be_chordal_env_t *cenv) {
364 irg_block_walk_graph(cenv->irg, pre_spill_prepare_constr_walker, NULL, (void *) cenv);
367 static void pair_up_operands(const be_chordal_alloc_env_t *alloc_env, be_insn_t *insn)
369 const be_chordal_env_t *env = alloc_env->chordal_env;
371 int n_uses = be_insn_n_uses(insn);
372 int n_defs = be_insn_n_defs(insn);
373 bitset_t *bs = bitset_alloca(env->cls->n_regs);
374 int *pairing = alloca(MAX(n_defs, n_uses) * sizeof(pairing[0]));
375 be_lv_t *lv = env->birg->lv;
380 For each out operand, try to find an in operand which can be assigned the
381 same register as the out operand.
383 for (j = 0; j < insn->use_start; ++j) {
385 int smallest_n_regs = 2 * env->cls->n_regs + 1;
386 be_operand_t *out_op = &insn->ops[j];
388 /* Try to find an in operand which has ... */
389 for(i = insn->use_start; i < insn->n_ops; ++i) {
391 const be_operand_t *op = &insn->ops[i];
393 if (op->partner != NULL)
395 if (values_interfere(lv, op->irn, op->carrier))
398 bitset_clear_all(bs);
399 bitset_copy(bs, op->regs);
400 bitset_and(bs, out_op->regs);
401 n_total = bitset_popcnt(op->regs) + bitset_popcnt(out_op->regs);
403 if (bitset_popcnt(bs) > 0 && n_total < smallest_n_regs) {
405 smallest_n_regs = n_total;
410 be_operand_t *partner = &insn->ops[smallest];
411 out_op->partner = partner;
412 partner->partner = out_op;
418 static ir_node *pre_process_constraints(be_chordal_alloc_env_t *alloc_env,
419 be_insn_t **the_insn)
421 be_chordal_env_t *env = alloc_env->chordal_env;
422 const arch_env_t *aenv = env->birg->main_env->arch_env;
423 be_insn_t *insn = *the_insn;
424 ir_node *perm = NULL;
425 bitset_t *out_constr = bitset_alloca(env->cls->n_regs);
426 const ir_edge_t *edge;
429 assert(insn->has_constraints && "only do this for constrained nodes");
432 Collect all registers that occur in output constraints.
433 This is necessary, since if the insn has one of these as an input constraint
434 and the corresponding operand interferes with the insn, the operand must
437 for(i = 0; i < insn->use_start; ++i) {
438 be_operand_t *op = &insn->ops[i];
439 if(op->has_constraints)
440 bitset_or(out_constr, op->regs);
444 Make the Perm, recompute liveness and re-scan the insn since the
445 in operands are now the Projs of the Perm.
447 perm = insert_Perm_after(env->birg, env->cls, sched_prev(insn->irn));
449 /* Registers are propagated by insert_Perm_after(). Clean them here! */
453 be_stat_ev("constr_perm", get_irn_arity(perm));
454 foreach_out_edge(perm, edge) {
455 ir_node *proj = get_edge_src_irn(edge);
456 arch_set_irn_register(aenv, proj, NULL);
460 We also have to re-build the insn since the input operands are now the Projs of
461 the Perm. Recomputing liveness is also a good idea if a Perm is inserted, since
462 the live sets may change.
464 // be_liveness_recompute(lv);
465 obstack_free(env->obst, insn);
466 *the_insn = insn = chordal_scan_insn(env, insn->irn);
469 Copy the input constraints of the insn to the Perm as output
470 constraints. Succeeding phases (coalescing) will need that.
472 for(i = insn->use_start; i < insn->n_ops; ++i) {
473 be_operand_t *op = &insn->ops[i];
474 ir_node *proj = op->carrier;
476 Note that the predecessor must not be a Proj of the Perm,
477 since ignore-nodes are not Perm'ed.
479 if(op->has_constraints && is_Proj(proj) && get_Proj_pred(proj) == perm) {
480 be_set_constr_limited(perm, BE_OUT_POS(get_Proj_proj(proj)), op->req);
487 static ir_node *handle_constraints(be_chordal_alloc_env_t *alloc_env, ir_node *irn, int *silent)
489 const arch_env_t *aenv;
492 ir_node **alloc_nodes;
493 hungarian_problem_t *bp;
498 const ir_edge_t *edge;
499 ir_node *perm = NULL;
501 be_chordal_env_t *env = alloc_env->chordal_env;
502 void *base = obstack_base(env->obst);
503 be_insn_t *insn = chordal_scan_insn(env, irn);
504 ir_node *res = insn->next_insn;
505 int be_silent = *silent;
506 be_lv_t *lv = env->birg->lv;
508 if(insn->pre_colored) {
510 for(i = 0; i < insn->use_start; ++i)
511 pset_insert_ptr(alloc_env->pre_colored, insn->ops[i].carrier);
515 If the current node is a barrier toggle the silent flag.
516 If we are in the start block, we are ought to be silent at the beginning,
517 so the toggling activates the constraint handling but skips the barrier.
518 If we are in the end block we handle the in requirements of the barrier
519 and set the rest to silent.
521 if(be_is_Barrier(irn))
528 Perms inserted before the constraint handling phase are considered to be
529 correctly precolored. These Perms arise during the ABI handling phase.
531 if(!insn->has_constraints)
534 aenv = env->birg->main_env->arch_env;
535 n_regs = env->cls->n_regs;
536 bs = bitset_alloca(n_regs);
537 alloc_nodes = alloca(n_regs * sizeof(alloc_nodes[0]));
538 bp = hungarian_new(n_regs, n_regs, 2, HUNGARIAN_MATCH_PERFECT);
539 // bipartite_t *bp = bipartite_new(n_regs, n_regs);
540 assignment = alloca(n_regs * sizeof(assignment[0]));
541 partners = pmap_create();
544 prepare the constraint handling of this node.
545 Perms are constructed and Copies are created for constrained values
546 interfering with the instruction.
548 perm = pre_process_constraints(alloc_env, &insn);
550 /* find suitable in operands to the out operands of the node. */
551 pair_up_operands(alloc_env, insn);
554 look at the in/out operands and add each operand (and its possible partner)
555 to a bipartite graph (left: nodes with partners, right: admissible colors).
557 for(i = 0, n_alloc = 0; i < insn->n_ops; ++i) {
558 be_operand_t *op = &insn->ops[i];
561 If the operand has no partner or the partner has not been marked
562 for allocation, determine the admissible registers and mark it
563 for allocation by associating the node and its partner with the
564 set of admissible registers via a bipartite graph.
566 if(!op->partner || !pmap_contains(partners, op->partner->carrier)) {
568 pmap_insert(partners, op->carrier, op->partner ? op->partner->carrier : NULL);
569 alloc_nodes[n_alloc] = op->carrier;
571 DBG((dbg, LEVEL_2, "\tassociating %+F and %+F\n", op->carrier, op->partner ? op->partner->carrier : NULL));
573 bitset_clear_all(bs);
574 get_decisive_partner_regs(bs, op, op->partner);
576 DBG((dbg, LEVEL_2, "\tallowed registers for %+F: %B\n", op->carrier, bs));
578 bitset_foreach(bs, col) {
579 hungarian_add(bp, n_alloc, col, 1);
580 // bipartite_add(bp, n_alloc, col);
588 Put all nodes which live through the constrained instruction also to the
589 allocation bipartite graph. They are considered unconstrained.
592 foreach_out_edge(perm, edge) {
593 ir_node *proj = get_edge_src_irn(edge);
595 assert(is_Proj(proj));
597 if(!values_interfere(lv, proj, irn) || pmap_contains(partners, proj))
600 assert(n_alloc < n_regs);
601 alloc_nodes[n_alloc] = proj;
602 pmap_insert(partners, proj, NULL);
604 bitset_clear_all(bs);
605 arch_put_non_ignore_regs(aenv, env->cls, bs);
606 bitset_andnot(bs, env->ignore_colors);
607 bitset_foreach(bs, col) {
608 hungarian_add(bp, n_alloc, col, 1);
609 // bipartite_add(bp, n_alloc, col);
616 /* Compute a valid register allocation. */
617 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
618 match_res = hungarian_solve(bp, assignment, &cost, 1);
619 assert(match_res == 0 && "matching failed");
620 //bipartite_matching(bp, assignment);
622 /* Assign colors obtained from the matching. */
623 for(i = 0; i < n_alloc; ++i) {
624 const arch_register_t *reg;
628 assert(assignment[i] >= 0 && "there must have been a register assigned");
629 reg = arch_register_for_index(env->cls, assignment[i]);
631 nodes[0] = alloc_nodes[i];
632 nodes[1] = pmap_get(partners, alloc_nodes[i]);
634 for(j = 0; j < 2; ++j) {
638 arch_set_irn_register(aenv, nodes[j], reg);
639 (void) pset_hinsert_ptr(alloc_env->pre_colored, nodes[j]);
640 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", nodes[j], reg->name));
644 /* Allocate the non-constrained Projs of the Perm. */
646 bitset_clear_all(bs);
648 /* Put the colors of all Projs in a bitset. */
649 foreach_out_edge(perm, edge) {
650 ir_node *proj = get_edge_src_irn(edge);
651 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
654 bitset_set(bs, reg->index);
657 /* Assign the not yet assigned Projs of the Perm a suitable color. */
658 foreach_out_edge(perm, edge) {
659 ir_node *proj = get_edge_src_irn(edge);
660 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
662 DBG((dbg, LEVEL_2, "\tchecking reg of %+F: %s\n", proj, reg ? reg->name : "<none>"));
665 col = get_next_free_reg(alloc_env, bs);
666 reg = arch_register_for_index(env->cls, col);
667 bitset_set(bs, reg->index);
668 arch_set_irn_register(aenv, proj, reg);
669 pset_insert_ptr(alloc_env->pre_colored, proj);
670 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", proj, reg->name));
675 //bipartite_free(bp);
677 pmap_destroy(partners);
680 obstack_free(env->obst, base);
685 * Handle constraint nodes in each basic block.
686 * handle_constraints() inserts Perm nodes which perm
687 * over all values live at the constrained node right in front
688 * of the constrained node. These Perms signal a constrained node.
689 * For further comments, refer to handle_constraints().
691 static void constraints(ir_node *bl, void *data)
693 be_chordal_alloc_env_t *env = data;
696 Start silent in the start block.
697 The silence remains until the first barrier is seen.
698 Each other block is begun loud.
700 int silent = bl == get_irg_start_block(get_irn_irg(bl));
704 If the block is the start block search the barrier and
705 start handling constraints from there.
708 for(irn = sched_first(bl); !sched_is_end(irn);) {
709 irn = handle_constraints(env, irn, &silent);
714 * Annotate the register pressure to the nodes and compute
715 * the liveness intervals.
716 * @param block The block to do it for.
717 * @param env_ptr The environment.
719 static void pressure(ir_node *block, void *env_ptr)
721 /* Convenience macro for a def */
722 #define border_def(irn, step, real) \
723 border_add(env, head, irn, step, pressure--, 1, real)
725 /* Convenience macro for a use */
726 #define border_use(irn, step, real) \
727 border_add(env, head, irn, step, ++pressure, 0, real)
729 be_chordal_alloc_env_t *alloc_env = env_ptr;
730 be_chordal_env_t *env = alloc_env->chordal_env;
731 bitset_t *live = alloc_env->live;
733 be_lv_t *lv = env->birg->lv;
737 unsigned pressure = 0;
738 struct list_head *head;
739 pset *live_in = be_lv_pset_put_in(lv, block, pset_new_ptr_default());
740 pset *live_end = be_lv_pset_put_end(lv, block, pset_new_ptr_default());
742 DBG((dbg, LEVEL_1, "Computing pressure in block %+F\n", block));
743 bitset_clear_all(live);
745 /* Set up the border list in the block info */
746 head = obstack_alloc(env->obst, sizeof(*head));
747 INIT_LIST_HEAD(head);
748 assert(pmap_get(env->border_heads, block) == NULL);
749 pmap_insert(env->border_heads, block, head);
752 * Make final uses of all values live out of the block.
753 * They are necessary to build up real intervals.
755 foreach_pset(live_end, irn) {
756 if(has_reg_class(env, irn)) {
757 DBG((dbg, LEVEL_3, "\tMaking live: %+F/%d\n", irn, get_irn_idx(irn)));
758 bitset_set(live, get_irn_idx(irn));
759 border_use(irn, step, 0);
765 * Determine the last uses of a value inside the block, since they are
766 * relevant for the interval borders.
768 sched_foreach_reverse(block, irn) {
769 DBG((dbg, LEVEL_1, "\tinsn: %+F, pressure: %d\n", irn, pressure));
770 DBG((dbg, LEVEL_2, "\tlive: %B\n", live));
773 * If the node defines some value, which can put into a
774 * register of the current class, make a border for it.
776 if(has_reg_class(env, irn)) {
777 int nr = get_irn_idx(irn);
779 bitset_clear(live, nr);
780 border_def(irn, step, 1);
784 * If the node is no phi node we can examine the uses.
787 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
788 ir_node *op = get_irn_n(irn, i);
790 if(has_reg_class(env, op)) {
791 int nr = get_irn_idx(op);
792 const char *msg = "-";
794 if(!bitset_is_set(live, nr)) {
795 border_use(op, step, 1);
796 bitset_set(live, nr);
800 DBG((dbg, LEVEL_4, "\t\t%s pos: %d, use: %+F\n", msg, i, op));
808 * Add initial defs for all values live in.
810 foreach_pset(live_in, irn) {
811 if(has_reg_class(env, irn)) {
813 /* Mark the value live in. */
814 bitset_set(live, get_irn_idx(irn));
817 border_def(irn, step, 0);
825 static void assign(ir_node *block, void *env_ptr)
827 be_chordal_alloc_env_t *alloc_env = env_ptr;
828 be_chordal_env_t *env = alloc_env->chordal_env;
829 bitset_t *live = alloc_env->live;
830 bitset_t *colors = alloc_env->colors;
831 bitset_t *in_colors = alloc_env->in_colors;
832 const arch_env_t *arch_env = env->birg->main_env->arch_env;
833 struct list_head *head = get_block_border_head(env, block);
834 be_lv_t *lv = env->birg->lv;
835 pset *live_in = be_lv_pset_put_in(lv, block, pset_new_ptr_default());
840 bitset_clear_all(colors);
841 bitset_clear_all(live);
842 bitset_clear_all(in_colors);
844 DBG((dbg, LEVEL_4, "Assigning colors for block %+F\n", block));
845 DBG((dbg, LEVEL_4, "\tusedef chain for block\n"));
846 list_for_each_entry(border_t, b, head, list) {
847 DBG((dbg, LEVEL_4, "\t%s %+F/%d\n", b->is_def ? "def" : "use",
848 b->irn, get_irn_idx(b->irn)));
852 * Add initial defs for all values live in.
853 * Since their colors have already been assigned (The dominators were
854 * allocated before), we have to mark their colors as used also.
856 foreach_pset(live_in, irn) {
857 if(has_reg_class(env, irn)) {
858 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
861 assert(reg && "Node must have been assigned a register");
862 col = arch_register_get_index(reg);
864 DBG((dbg, LEVEL_4, "%+F has reg %s\n", irn, reg->name));
866 /* Mark the color of the live in value as used. */
867 bitset_set(colors, col);
868 bitset_set(in_colors, col);
870 /* Mark the value live in. */
871 bitset_set(live, get_irn_idx(irn));
876 * Mind that the sequence of defs from back to front defines a perfect
877 * elimination order. So, coloring the definitions from first to last
880 list_for_each_entry_reverse(border_t, b, head, list) {
881 ir_node *irn = b->irn;
882 int nr = get_irn_idx(irn);
883 int ignore = arch_irn_is(arch_env, irn, ignore);
886 * Assign a color, if it is a local def. Global defs already have a
889 if(b->is_def && !be_is_live_in(lv, block, irn)) {
890 const arch_register_t *reg;
893 if(ignore || pset_find_ptr(alloc_env->pre_colored, irn)) {
894 reg = arch_get_irn_register(arch_env, irn);
896 assert(!bitset_is_set(colors, col) && "pre-colored register must be free");
898 col = get_next_free_reg(alloc_env, colors);
899 reg = arch_register_for_index(env->cls, col);
900 assert(arch_get_irn_register(arch_env, irn) == NULL && "This node must not have been assigned a register yet");
901 assert(!arch_register_type_is(reg, ignore) && "Must not assign ignore register");
904 bitset_set(colors, col);
905 arch_set_irn_register(arch_env, irn, reg);
907 DBG((dbg, LEVEL_1, "\tassigning register %s(%d) to %+F\n", arch_register_get_name(reg), col, irn));
909 assert(!bitset_is_set(live, nr) && "Value's definition must not have been encountered");
910 bitset_set(live, nr);
913 /* Clear the color upon a use. */
914 else if(!b->is_def) {
915 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
918 assert(reg && "Register must have been assigned");
920 col = arch_register_get_index(reg);
922 if(!arch_register_type_is(reg, ignore)) {
923 assert(bitset_is_set(live, nr) && "Cannot have a non live use");
927 bitset_clear(colors, col);
928 bitset_clear(live, nr);
935 void be_ra_chordal_color(be_chordal_env_t *chordal_env)
937 be_chordal_alloc_env_t env;
939 be_irg_t *birg = chordal_env->birg;
940 const arch_register_class_t *cls = chordal_env->cls;
942 int colors_n = arch_register_class_n_regs(cls);
943 ir_graph *irg = chordal_env->irg;
944 int allocatable_regs = colors_n - be_put_ignore_regs(birg, cls, NULL);
946 /* some special classes contain only ignore regs, no work to be done */
947 if(allocatable_regs == 0)
950 be_assure_dom_front(birg);
951 be_assure_liveness(birg);
954 env.chordal_env = chordal_env;
955 env.colors_n = colors_n;
956 env.colors = bitset_alloca(colors_n);
957 env.tmp_colors = bitset_alloca(colors_n);
958 env.in_colors = bitset_alloca(colors_n);
959 env.pre_colored = pset_new_ptr_default();
961 /* Handle register targeting constraints */
962 dom_tree_walk_irg(irg, constraints, NULL, &env);
964 if(chordal_env->opts->dump_flags & BE_CH_DUMP_CONSTR) {
965 snprintf(buf, sizeof(buf), "-%s-constr", chordal_env->cls->name);
966 be_dump(chordal_env->irg, buf, dump_ir_block_graph_sched);
969 env.live = bitset_malloc(get_irg_last_idx(chordal_env->irg));
971 /* First, determine the pressure */
972 dom_tree_walk_irg(irg, pressure, NULL, &env);
974 /* Assign the colors */
975 dom_tree_walk_irg(irg, assign, NULL, &env);
977 if(chordal_env->opts->dump_flags & BE_CH_DUMP_TREE_INTV) {
979 ir_snprintf(buf, sizeof(buf), "ifg_%s_%F.eps", chordal_env->cls->name, irg);
980 plotter = new_plotter_ps(buf);
981 draw_interval_tree(&draw_chordal_def_opts, chordal_env, plotter);
982 plotter_free(plotter);
985 bitset_free(env.live);
986 del_pset(env.pre_colored);
989 void be_init_chordal(void)
991 FIRM_DBG_REGISTER(dbg, "firm.be.chordal.constr");
994 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_chordal);