2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Chordal register allocation.
23 * @author Sebastian Hack
37 #include "raw_bitset.h"
39 #include "bipartite.h"
40 #include "hungarian.h"
43 #include "irgraph_t.h"
44 #include "irprintf_t.h"
55 #include "besched_t.h"
62 #include "bestatevent.h"
64 #include "beintlive_t.h"
66 #include "bechordal_t.h"
67 #include "bechordal_draw.h"
70 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
74 #define DUMP_INTERVALS
76 typedef struct _be_chordal_alloc_env_t {
77 be_chordal_env_t *chordal_env;
79 pset *pre_colored; /**< Set of precolored nodes. */
80 bitset_t *live; /**< A liveness bitset. */
81 bitset_t *tmp_colors; /**< An auxiliary bitset which is as long as the number of colors in the class. */
82 bitset_t *colors; /**< The color mask. */
83 bitset_t *in_colors; /**< Colors used by live in values. */
84 int colors_n; /**< The number of colors. */
85 } be_chordal_alloc_env_t;
89 /* Make a fourcc for border checking. */
90 #define BORDER_FOURCC FOURCC('B', 'O', 'R', 'D')
93 static void check_border_list(struct list_head *head)
96 list_for_each_entry(border_t, x, head, list) {
97 assert(x->magic == BORDER_FOURCC);
101 static void check_heads(be_chordal_env_t *env)
104 for(ent = pmap_first(env->border_heads); ent; ent = pmap_next(env->border_heads)) {
105 /* ir_printf("checking border list of block %+F\n", ent->key); */
106 check_border_list(ent->value);
112 * Add an interval border to the list of a block's list
113 * of interval border.
114 * @note You always have to create the use before the def.
115 * @param env The environment.
116 * @param head The list head to enqueue the borders.
117 * @param irn The node (value) the border belongs to.
118 * @param pressure The pressure at this point in time.
119 * @param step A time step for the border.
120 * @param is_def Is the border a use or a def.
121 * @return The created border.
123 static INLINE border_t *border_add(be_chordal_env_t *env, struct list_head *head,
124 ir_node *irn, unsigned step, unsigned pressure,
125 unsigned is_def, unsigned is_real)
132 b = obstack_alloc(env->obst, sizeof(*b));
134 /* also allocate the def and tie it to the use. */
135 def = obstack_alloc(env->obst, sizeof(*def));
136 memset(def, 0, sizeof(*def));
141 * Set the link field of the irn to the def.
142 * This strongly relies on the fact, that the use is always
143 * made before the def.
145 set_irn_link(irn, def);
147 DEBUG_ONLY(b->magic = BORDER_FOURCC);
148 DEBUG_ONLY(def->magic = BORDER_FOURCC);
152 * If the def is encountered, the use was made and so was the
153 * the def node (see the code above). It was placed into the
154 * link field of the irn, so we can get it there.
157 b = get_irn_link(irn);
159 DEBUG_ONLY(assert(b && b->magic == BORDER_FOURCC && "Illegal border encountered"));
162 b->pressure = pressure;
164 b->is_real = is_real;
167 list_add_tail(&b->list, head);
168 DBG((dbg, LEVEL_5, "\t\t%s adding %+F, step: %d\n", is_def ? "def" : "use", irn, step));
175 * Check, if an irn is of the register class currently under processing.
176 * @param env The chordal environment.
177 * @param irn The node.
178 * @return 1, if the node is of that register class, 0 if not.
180 static INLINE int has_reg_class(const be_chordal_env_t *env, const ir_node *irn)
182 return arch_irn_consider_in_reg_alloc(env->birg->main_env->arch_env, env->cls, irn);
185 static int get_next_free_reg(const be_chordal_alloc_env_t *alloc_env, bitset_t *colors)
187 bitset_t *tmp = alloc_env->tmp_colors;
188 bitset_copy(tmp, colors);
189 bitset_or(tmp, alloc_env->chordal_env->ignore_colors);
190 return bitset_next_clear(tmp, 0);
193 static bitset_t *get_decisive_partner_regs(bitset_t *bs, const be_operand_t *o1, const be_operand_t *o2)
198 bitset_copy(bs, o2->regs);
203 bitset_copy(bs, o1->regs);
207 assert(o1->req->cls == o2->req->cls || ! o1->req->cls || ! o2->req->cls);
209 if(bitset_contains(o1->regs, o2->regs))
210 bitset_copy(bs, o1->regs);
211 else if(bitset_contains(o2->regs, o1->regs))
212 bitset_copy(bs, o2->regs);
219 static be_insn_t *chordal_scan_insn(be_chordal_env_t *env, ir_node *irn)
223 ie.ignore_colors = env->ignore_colors;
224 ie.aenv = env->birg->main_env->arch_env;
227 return be_scan_insn(&ie, irn);
230 static ir_node *prepare_constr_insn(be_chordal_env_t *env, ir_node *irn)
232 const be_irg_t *birg = env->birg;
233 const arch_env_t *aenv = birg->main_env->arch_env;
234 bitset_t *tmp = bitset_alloca(env->cls->n_regs);
235 bitset_t *def_constr = bitset_alloca(env->cls->n_regs);
236 ir_node *bl = get_nodes_block(irn);
237 be_lv_t *lv = env->birg->lv;
242 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
243 ir_node *op = get_irn_n(irn, i);
245 const arch_register_t *reg;
246 const arch_register_req_t *req;
248 if (arch_get_irn_reg_class(aenv, irn, i) != env->cls)
251 reg = arch_get_irn_register(aenv, op);
253 if (reg == NULL || !arch_register_type_is(reg, ignore))
255 if(arch_register_type_is(reg, joker))
258 req = arch_get_register_req(irn, i);
259 if (!arch_register_req_is(req, limited))
262 if (rbitset_is_set(req->limited, reg->index))
265 copy = be_new_Copy(env->cls, env->irg, bl, op);
266 be_stat_ev("constr_copy", 1);
268 sched_add_before(irn, copy);
269 set_irn_n(irn, i, copy);
270 DBG((dbg, LEVEL_3, "inserting ignore arg copy %+F for %+F pos %d\n", copy, irn, i));
273 insn = chordal_scan_insn(env, irn);
275 if(!insn->has_constraints)
278 /* insert copies for nodes that occur constrained more than once. */
279 for(i = insn->use_start; i < insn->n_ops; ++i) {
280 be_operand_t *op = &insn->ops[i];
282 if(!op->has_constraints)
285 for(j = i + 1; j < insn->n_ops; ++j) {
287 be_operand_t *a_op = &insn->ops[j];
289 if(a_op->carrier != op->carrier || !a_op->has_constraints)
292 /* if the constraint is the same, no copy is necessary
293 * TODO generalise unequal but overlapping constraints */
294 if (a_op->req == op->req)
297 if (be_is_Copy(get_irn_n(insn->irn, a_op->pos)))
300 copy = be_new_Copy(env->cls, env->irg, bl, op->carrier);
301 be_stat_ev("constr_copy", 1);
303 sched_add_before(insn->irn, copy);
304 set_irn_n(insn->irn, a_op->pos, copy);
305 DBG((dbg, LEVEL_3, "inserting multiple constr copy %+F for %+F pos %d\n", copy, insn->irn, a_op->pos));
309 /* collect all registers occurring in out constraints. */
310 for(i = 0; i < insn->use_start; ++i) {
311 be_operand_t *op = &insn->ops[i];
312 if(op->has_constraints)
313 bitset_or(def_constr, op->regs);
317 insert copies for all constrained arguments living through the node
318 and being constrained to a register which also occurs in out constraints.
320 for(i = insn->use_start; i < insn->n_ops; ++i) {
322 be_operand_t *op = &insn->ops[i];
324 bitset_copy(tmp, op->regs);
325 bitset_and(tmp, def_constr);
329 1) the operand is constrained.
330 2) lives through the node.
331 3) is constrained to a register occurring in out constraints.
333 if(!op->has_constraints ||
334 !values_interfere(birg, insn->irn, op->carrier) ||
335 bitset_popcnt(tmp) == 0)
339 only create the copy if the operand is no copy.
340 this is necessary since the assure constraints phase inserts
341 Copies and Keeps for operands which must be different from the
342 results. Additional copies here would destroy this.
344 if (be_is_Copy(get_irn_n(insn->irn, op->pos)))
347 copy = be_new_Copy(env->cls, env->irg, bl, op->carrier);
349 sched_add_before(insn->irn, copy);
350 set_irn_n(insn->irn, op->pos, copy);
351 DBG((dbg, LEVEL_3, "inserting constr copy %+F for %+F pos %d\n", copy, insn->irn, op->pos));
352 be_liveness_update(lv, op->carrier);
356 obstack_free(env->obst, insn);
357 return insn->next_insn;
360 static void pre_spill_prepare_constr_walker(ir_node *bl, void *data)
362 be_chordal_env_t *env = data;
364 for(irn = sched_first(bl); !sched_is_end(irn);) {
365 irn = prepare_constr_insn(env, irn);
369 void be_pre_spill_prepare_constr(be_chordal_env_t *cenv) {
370 irg_block_walk_graph(cenv->irg, pre_spill_prepare_constr_walker, NULL, (void *) cenv);
373 static void pair_up_operands(const be_chordal_alloc_env_t *alloc_env, be_insn_t *insn)
375 const be_chordal_env_t *env = alloc_env->chordal_env;
377 int n_uses = be_insn_n_uses(insn);
378 int n_defs = be_insn_n_defs(insn);
379 bitset_t *bs = bitset_alloca(env->cls->n_regs);
380 int *pairing = alloca(MAX(n_defs, n_uses) * sizeof(pairing[0]));
385 For each out operand, try to find an in operand which can be assigned the
386 same register as the out operand.
388 for (j = 0; j < insn->use_start; ++j) {
390 int smallest_n_regs = 2 * env->cls->n_regs + 1;
391 be_operand_t *out_op = &insn->ops[j];
393 /* Try to find an in operand which has ... */
394 for(i = insn->use_start; i < insn->n_ops; ++i) {
396 const be_operand_t *op = &insn->ops[i];
398 if (op->partner != NULL)
400 if (values_interfere(env->birg, op->irn, op->carrier))
403 bitset_clear_all(bs);
404 bitset_copy(bs, op->regs);
405 bitset_and(bs, out_op->regs);
406 n_total = bitset_popcnt(op->regs) + bitset_popcnt(out_op->regs);
408 if (bitset_popcnt(bs) > 0 && n_total < smallest_n_regs) {
410 smallest_n_regs = n_total;
415 be_operand_t *partner = &insn->ops[smallest];
416 for(i = insn->use_start; i < insn->n_ops; ++i) {
417 if(insn->ops[i].carrier == partner->carrier)
418 insn->ops[i].partner = out_op;
421 out_op->partner = partner;
422 partner->partner = out_op;
428 static ir_node *pre_process_constraints(be_chordal_alloc_env_t *alloc_env,
429 be_insn_t **the_insn)
431 be_chordal_env_t *env = alloc_env->chordal_env;
432 const arch_env_t *aenv = env->birg->main_env->arch_env;
433 be_insn_t *insn = *the_insn;
434 ir_node *perm = NULL;
435 bitset_t *out_constr = bitset_alloca(env->cls->n_regs);
436 const ir_edge_t *edge;
439 assert(insn->has_constraints && "only do this for constrained nodes");
442 Collect all registers that occur in output constraints.
443 This is necessary, since if the insn has one of these as an input constraint
444 and the corresponding operand interferes with the insn, the operand must
447 for(i = 0; i < insn->use_start; ++i) {
448 be_operand_t *op = &insn->ops[i];
449 if(op->has_constraints)
450 bitset_or(out_constr, op->regs);
454 Make the Perm, recompute liveness and re-scan the insn since the
455 in operands are now the Projs of the Perm.
457 perm = insert_Perm_after(env->birg, env->cls, sched_prev(insn->irn));
459 /* Registers are propagated by insert_Perm_after(). Clean them here! */
463 be_stat_ev("constr_perm", get_irn_arity(perm));
464 foreach_out_edge(perm, edge) {
465 ir_node *proj = get_edge_src_irn(edge);
466 arch_set_irn_register(aenv, proj, NULL);
470 We also have to re-build the insn since the input operands are now the Projs of
471 the Perm. Recomputing liveness is also a good idea if a Perm is inserted, since
472 the live sets may change.
474 obstack_free(env->obst, insn);
475 *the_insn = insn = chordal_scan_insn(env, insn->irn);
478 Copy the input constraints of the insn to the Perm as output
479 constraints. Succeeding phases (coalescing) will need that.
481 for(i = insn->use_start; i < insn->n_ops; ++i) {
482 be_operand_t *op = &insn->ops[i];
483 ir_node *proj = op->carrier;
485 Note that the predecessor must not be a Proj of the Perm,
486 since ignore-nodes are not Perm'ed.
488 if(op->has_constraints && is_Proj(proj) && get_Proj_pred(proj) == perm) {
489 be_set_constr_limited(perm, BE_OUT_POS(get_Proj_proj(proj)), op->req);
496 static ir_node *handle_constraints(be_chordal_alloc_env_t *alloc_env,
497 ir_node *irn, int *silent)
499 const arch_env_t *aenv;
502 ir_node **alloc_nodes;
503 //hungarian_problem_t *bp;
508 const ir_edge_t *edge;
509 ir_node *perm = NULL;
510 //int match_res, cost;
511 be_chordal_env_t *env = alloc_env->chordal_env;
512 void *base = obstack_base(env->obst);
513 be_insn_t *insn = chordal_scan_insn(env, irn);
514 ir_node *res = insn->next_insn;
515 int be_silent = *silent;
516 be_irg_t *birg = env->birg;
519 if(insn->pre_colored) {
521 for(i = 0; i < insn->use_start; ++i)
522 pset_insert_ptr(alloc_env->pre_colored, insn->ops[i].carrier);
526 If the current node is a barrier toggle the silent flag.
527 If we are in the start block, we are ought to be silent at the beginning,
528 so the toggling activates the constraint handling but skips the barrier.
529 If we are in the end block we handle the in requirements of the barrier
530 and set the rest to silent.
532 if(be_is_Barrier(irn))
539 Perms inserted before the constraint handling phase are considered to be
540 correctly precolored. These Perms arise during the ABI handling phase.
542 if(!insn->has_constraints)
545 aenv = env->birg->main_env->arch_env;
546 n_regs = env->cls->n_regs;
547 bs = bitset_alloca(n_regs);
548 alloc_nodes = alloca(n_regs * sizeof(alloc_nodes[0]));
549 //bp = hungarian_new(n_regs, n_regs, 2, HUNGARIAN_MATCH_PERFECT);
550 bp = bipartite_new(n_regs, n_regs);
551 assignment = alloca(n_regs * sizeof(assignment[0]));
552 partners = pmap_create();
555 prepare the constraint handling of this node.
556 Perms are constructed and Copies are created for constrained values
557 interfering with the instruction.
559 perm = pre_process_constraints(alloc_env, &insn);
561 /* find suitable in operands to the out operands of the node. */
562 pair_up_operands(alloc_env, insn);
565 look at the in/out operands and add each operand (and its possible partner)
566 to a bipartite graph (left: nodes with partners, right: admissible colors).
568 for(i = 0, n_alloc = 0; i < insn->n_ops; ++i) {
569 be_operand_t *op = &insn->ops[i];
572 If the operand has no partner or the partner has not been marked
573 for allocation, determine the admissible registers and mark it
574 for allocation by associating the node and its partner with the
575 set of admissible registers via a bipartite graph.
577 if(!op->partner || !pmap_contains(partners, op->partner->carrier)) {
578 ir_node *partner = op->partner ? op->partner->carrier : NULL;
581 pmap_insert(partners, op->carrier, partner);
583 pmap_insert(partners, partner, op->carrier);
585 /* don't insert a node twice */
586 for(i = 0; i < n_alloc; ++i) {
587 if(alloc_nodes[i] == op->carrier) {
594 alloc_nodes[n_alloc] = op->carrier;
596 DBG((dbg, LEVEL_2, "\tassociating %+F and %+F\n", op->carrier,
599 bitset_clear_all(bs);
600 get_decisive_partner_regs(bs, op, op->partner);
602 DBG((dbg, LEVEL_2, "\tallowed registers for %+F: %B\n", op->carrier,
605 bitset_foreach(bs, col) {
606 //hungarian_add(bp, n_alloc, col, 1);
607 bipartite_add(bp, n_alloc, col);
615 Put all nodes which live through the constrained instruction also to the
616 allocation bipartite graph. They are considered unconstrained.
619 foreach_out_edge(perm, edge) {
621 ir_node *proj = get_edge_src_irn(edge);
623 assert(is_Proj(proj));
625 if(!values_interfere(birg, proj, irn) || pmap_contains(partners, proj))
628 /* don't insert a node twice */
629 for(i = 0; i < n_alloc; ++i) {
630 if(alloc_nodes[i] == proj) {
638 assert(n_alloc < n_regs);
640 alloc_nodes[n_alloc] = proj;
641 pmap_insert(partners, proj, NULL);
643 bitset_clear_all(bs);
644 arch_put_non_ignore_regs(env->cls, bs);
645 bitset_andnot(bs, env->ignore_colors);
646 bitset_foreach(bs, col) {
647 //hungarian_add(bp, n_alloc, col, 1);
648 bipartite_add(bp, n_alloc, col);
655 /* Compute a valid register allocation. */
657 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
658 match_res = hungarian_solve(bp, assignment, &cost, 1);
659 assert(match_res == 0 && "matching failed");
661 bipartite_matching(bp, assignment);
664 /* Assign colors obtained from the matching. */
665 for(i = 0; i < n_alloc; ++i) {
666 const arch_register_t *reg;
669 assert(assignment[i] >= 0 && "there must have been a register assigned");
670 reg = arch_register_for_index(env->cls, assignment[i]);
671 assert(! (reg->type & arch_register_type_ignore));
673 irn = alloc_nodes[i];
675 arch_set_irn_register(aenv, irn, reg);
676 (void) pset_hinsert_ptr(alloc_env->pre_colored, irn);
677 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", irn, reg->name));
680 irn = pmap_get(partners, alloc_nodes[i]);
682 arch_set_irn_register(aenv, irn, reg);
683 (void) pset_hinsert_ptr(alloc_env->pre_colored, irn);
684 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", irn, reg->name));
688 /* Allocate the non-constrained Projs of the Perm. */
690 bitset_clear_all(bs);
692 /* Put the colors of all Projs in a bitset. */
693 foreach_out_edge(perm, edge) {
694 ir_node *proj = get_edge_src_irn(edge);
695 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
698 bitset_set(bs, reg->index);
701 /* Assign the not yet assigned Projs of the Perm a suitable color. */
702 foreach_out_edge(perm, edge) {
703 ir_node *proj = get_edge_src_irn(edge);
704 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
706 DBG((dbg, LEVEL_2, "\tchecking reg of %+F: %s\n", proj, reg ? reg->name : "<none>"));
709 col = get_next_free_reg(alloc_env, bs);
710 reg = arch_register_for_index(env->cls, col);
711 bitset_set(bs, reg->index);
712 arch_set_irn_register(aenv, proj, reg);
713 pset_insert_ptr(alloc_env->pre_colored, proj);
714 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", proj, reg->name));
720 //hungarian_free(bp);
721 pmap_destroy(partners);
724 obstack_free(env->obst, base);
729 * Handle constraint nodes in each basic block.
730 * handle_constraints() inserts Perm nodes which perm
731 * over all values live at the constrained node right in front
732 * of the constrained node. These Perms signal a constrained node.
733 * For further comments, refer to handle_constraints().
735 static void constraints(ir_node *bl, void *data)
737 be_chordal_alloc_env_t *env = data;
740 Start silent in the start block.
741 The silence remains until the first barrier is seen.
742 Each other block is begun loud.
744 int silent = bl == get_irg_start_block(get_irn_irg(bl));
748 If the block is the start block search the barrier and
749 start handling constraints from there.
752 for(irn = sched_first(bl); !sched_is_end(irn);) {
753 irn = handle_constraints(env, irn, &silent);
758 * Annotate the register pressure to the nodes and compute
759 * the liveness intervals.
760 * @param block The block to do it for.
761 * @param env_ptr The environment.
763 static void pressure(ir_node *block, void *env_ptr)
765 /* Convenience macro for a def */
766 #define border_def(irn, step, real) \
767 border_add(env, head, irn, step, pressure--, 1, real)
769 /* Convenience macro for a use */
770 #define border_use(irn, step, real) \
771 border_add(env, head, irn, step, ++pressure, 0, real)
773 be_chordal_alloc_env_t *alloc_env = env_ptr;
774 be_chordal_env_t *env = alloc_env->chordal_env;
775 bitset_t *live = alloc_env->live;
777 be_lv_t *lv = env->birg->lv;
782 unsigned pressure = 0;
783 struct list_head *head;
785 DBG((dbg, LEVEL_1, "Computing pressure in block %+F\n", block));
786 bitset_clear_all(live);
788 /* Set up the border list in the block info */
789 head = obstack_alloc(env->obst, sizeof(*head));
790 INIT_LIST_HEAD(head);
791 assert(pmap_get(env->border_heads, block) == NULL);
792 pmap_insert(env->border_heads, block, head);
795 * Make final uses of all values live out of the block.
796 * They are necessary to build up real intervals.
798 be_lv_foreach(lv, block, be_lv_state_end, i) {
799 ir_node *irn = be_lv_get_irn(lv, block, i);
800 if(has_reg_class(env, irn)) {
801 DBG((dbg, LEVEL_3, "\tMaking live: %+F/%d\n", irn, get_irn_idx(irn)));
802 bitset_set(live, get_irn_idx(irn));
803 border_use(irn, step, 0);
809 * Determine the last uses of a value inside the block, since they are
810 * relevant for the interval borders.
812 sched_foreach_reverse(block, irn) {
813 DBG((dbg, LEVEL_1, "\tinsn: %+F, pressure: %d\n", irn, pressure));
814 DBG((dbg, LEVEL_2, "\tlive: %B\n", live));
816 if (get_irn_mode(irn) == mode_T) {
817 const ir_edge_t *edge;
819 foreach_out_edge(irn, edge) {
820 ir_node *proj = get_edge_src_irn(edge);
823 * If the node defines some value, which can put into a
824 * register of the current class, make a border for it.
826 if(has_reg_class(env, proj)) {
827 int nr = get_irn_idx(proj);
829 bitset_clear(live, nr);
830 border_def(proj, step, 1);
836 * If the node defines some value, which can put into a
837 * register of the current class, make a border for it.
839 if(has_reg_class(env, irn)) {
840 int nr = get_irn_idx(irn);
842 bitset_clear(live, nr);
843 border_def(irn, step, 1);
847 * If the node is no phi node we can examine the uses.
850 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
851 ir_node *op = get_irn_n(irn, i);
853 if(has_reg_class(env, op)) {
854 int nr = get_irn_idx(op);
855 const char *msg = "-";
857 if(!bitset_is_set(live, nr)) {
858 border_use(op, step, 1);
859 bitset_set(live, nr);
863 DBG((dbg, LEVEL_4, "\t\t%s pos: %d, use: %+F\n", msg, i, op));
870 bitset_foreach(live, elm) {
871 ir_node *irn = get_idx_irn(env->irg, elm);
872 if (be_is_live_in(lv, block, irn))
873 border_def(irn, step, 0);
877 static void assign(ir_node *block, void *env_ptr)
879 be_chordal_alloc_env_t *alloc_env = env_ptr;
880 be_chordal_env_t *env = alloc_env->chordal_env;
881 bitset_t *live = alloc_env->live;
882 bitset_t *colors = alloc_env->colors;
883 bitset_t *in_colors = alloc_env->in_colors;
884 const arch_env_t *arch_env = env->birg->main_env->arch_env;
885 struct list_head *head = get_block_border_head(env, block);
886 be_lv_t *lv = env->birg->lv;
892 bitset_clear_all(colors);
893 bitset_clear_all(live);
894 bitset_clear_all(in_colors);
896 DBG((dbg, LEVEL_4, "Assigning colors for block %+F\n", block));
897 DBG((dbg, LEVEL_4, "\tusedef chain for block\n"));
898 list_for_each_entry(border_t, b, head, list) {
899 DBG((dbg, LEVEL_4, "\t%s %+F/%d\n", b->is_def ? "def" : "use",
900 b->irn, get_irn_idx(b->irn)));
904 * Add initial defs for all values live in.
905 * Since their colors have already been assigned (The dominators were
906 * allocated before), we have to mark their colors as used also.
908 be_lv_foreach(lv, block, be_lv_state_in, idx) {
909 irn = be_lv_get_irn(lv, block, idx);
910 if(has_reg_class(env, irn)) {
911 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
914 assert(reg && "Node must have been assigned a register");
915 col = arch_register_get_index(reg);
917 DBG((dbg, LEVEL_4, "%+F has reg %s\n", irn, reg->name));
919 /* Mark the color of the live in value as used. */
920 bitset_set(colors, col);
921 bitset_set(in_colors, col);
923 /* Mark the value live in. */
924 bitset_set(live, get_irn_idx(irn));
929 * Mind that the sequence of defs from back to front defines a perfect
930 * elimination order. So, coloring the definitions from first to last
933 list_for_each_entry_reverse(border_t, b, head, list) {
934 ir_node *irn = b->irn;
935 int nr = get_irn_idx(irn);
936 int ignore = arch_irn_is(arch_env, irn, ignore);
939 * Assign a color, if it is a local def. Global defs already have a
942 if(b->is_def && !be_is_live_in(lv, block, irn)) {
943 const arch_register_t *reg;
946 if(ignore || pset_find_ptr(alloc_env->pre_colored, irn)) {
947 reg = arch_get_irn_register(arch_env, irn);
949 assert(!bitset_is_set(colors, col) && "pre-colored register must be free");
951 col = get_next_free_reg(alloc_env, colors);
952 reg = arch_register_for_index(env->cls, col);
953 assert(arch_get_irn_register(arch_env, irn) == NULL && "This node must not have been assigned a register yet");
954 assert(!arch_register_type_is(reg, ignore) && "Must not assign ignore register");
957 bitset_set(colors, col);
958 arch_set_irn_register(arch_env, irn, reg);
960 DBG((dbg, LEVEL_1, "\tassigning register %s(%d) to %+F\n", arch_register_get_name(reg), col, irn));
962 assert(!bitset_is_set(live, nr) && "Value's definition must not have been encountered");
963 bitset_set(live, nr);
966 /* Clear the color upon a use. */
967 else if(!b->is_def) {
968 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
971 assert(reg && "Register must have been assigned");
973 col = arch_register_get_index(reg);
975 if(!arch_register_type_is(reg, ignore)) {
976 assert(bitset_is_set(live, nr) && "Cannot have a non live use");
980 bitset_clear(colors, col);
981 bitset_clear(live, nr);
986 void be_ra_chordal_color(be_chordal_env_t *chordal_env)
988 be_chordal_alloc_env_t env;
991 be_irg_t *birg = chordal_env->birg;
992 const arch_register_class_t *cls = chordal_env->cls;
994 int colors_n = arch_register_class_n_regs(cls);
995 ir_graph *irg = chordal_env->irg;
997 lv = be_assure_liveness(birg);
998 be_liveness_assure_sets(lv);
999 be_liveness_assure_chk(lv);
1003 env.chordal_env = chordal_env;
1004 env.colors_n = colors_n;
1005 env.colors = bitset_alloca(colors_n);
1006 env.tmp_colors = bitset_alloca(colors_n);
1007 env.in_colors = bitset_alloca(colors_n);
1008 env.pre_colored = pset_new_ptr_default();
1010 BE_TIMER_PUSH(t_constr);
1012 /* Handle register targeting constraints */
1013 dom_tree_walk_irg(irg, constraints, NULL, &env);
1015 if(chordal_env->opts->dump_flags & BE_CH_DUMP_CONSTR) {
1016 snprintf(buf, sizeof(buf), "-%s-constr", chordal_env->cls->name);
1017 be_dump(chordal_env->irg, buf, dump_ir_block_graph_sched);
1020 BE_TIMER_POP(t_constr);
1022 env.live = bitset_malloc(get_irg_last_idx(chordal_env->irg));
1024 /* First, determine the pressure */
1025 dom_tree_walk_irg(irg, pressure, NULL, &env);
1027 /* Assign the colors */
1028 dom_tree_walk_irg(irg, assign, NULL, &env);
1030 if(chordal_env->opts->dump_flags & BE_CH_DUMP_TREE_INTV) {
1032 ir_snprintf(buf, sizeof(buf), "ifg_%s_%F.eps", chordal_env->cls->name, irg);
1033 plotter = new_plotter_ps(buf);
1034 draw_interval_tree(&draw_chordal_def_opts, chordal_env, plotter);
1035 plotter_free(plotter);
1038 bitset_free(env.live);
1039 del_pset(env.pre_colored);
1042 void be_init_chordal(void)
1044 FIRM_DBG_REGISTER(dbg, "firm.be.chordal");
1047 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_chordal);