2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Chordal register allocation.
23 * @author Sebastian Hack
37 #include "raw_bitset.h"
39 #include "bipartite.h"
40 #include "hungarian.h"
43 #include "irgraph_t.h"
44 #include "irprintf_t.h"
56 #include "besched_t.h"
63 #include "bestatevent.h"
65 #include "beintlive_t.h"
67 #include "bechordal_t.h"
68 #include "bechordal_draw.h"
71 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
75 #define DUMP_INTERVALS
77 typedef struct _be_chordal_alloc_env_t {
78 be_chordal_env_t *chordal_env;
80 pset *pre_colored; /**< Set of precolored nodes. */
81 bitset_t *live; /**< A liveness bitset. */
82 bitset_t *tmp_colors; /**< An auxiliary bitset which is as long as the number of colors in the class. */
83 bitset_t *colors; /**< The color mask. */
84 bitset_t *in_colors; /**< Colors used by live in values. */
85 int colors_n; /**< The number of colors. */
86 } be_chordal_alloc_env_t;
90 /* Make a fourcc for border checking. */
91 #define BORDER_FOURCC FOURCC('B', 'O', 'R', 'D')
94 static void check_border_list(struct list_head *head)
97 list_for_each_entry(border_t, x, head, list) {
98 assert(x->magic == BORDER_FOURCC);
102 static void check_heads(be_chordal_env_t *env)
105 for(ent = pmap_first(env->border_heads); ent; ent = pmap_next(env->border_heads)) {
106 /* ir_printf("checking border list of block %+F\n", ent->key); */
107 check_border_list(ent->value);
113 * Add an interval border to the list of a block's list
114 * of interval border.
115 * @note You always have to create the use before the def.
116 * @param env The environment.
117 * @param head The list head to enqueue the borders.
118 * @param irn The node (value) the border belongs to.
119 * @param pressure The pressure at this point in time.
120 * @param step A time step for the border.
121 * @param is_def Is the border a use or a def.
122 * @return The created border.
124 static INLINE border_t *border_add(be_chordal_env_t *env, struct list_head *head,
125 ir_node *irn, unsigned step, unsigned pressure,
126 unsigned is_def, unsigned is_real)
133 b = obstack_alloc(env->obst, sizeof(*b));
135 /* also allocate the def and tie it to the use. */
136 def = obstack_alloc(env->obst, sizeof(*def));
137 memset(def, 0, sizeof(*def));
142 * Set the link field of the irn to the def.
143 * This strongly relies on the fact, that the use is always
144 * made before the def.
146 set_irn_link(irn, def);
148 DEBUG_ONLY(b->magic = BORDER_FOURCC);
149 DEBUG_ONLY(def->magic = BORDER_FOURCC);
153 * If the def is encountered, the use was made and so was the
154 * the def node (see the code above). It was placed into the
155 * link field of the irn, so we can get it there.
158 b = get_irn_link(irn);
160 assert(b && b->magic == BORDER_FOURCC && "Illegal border encountered");
163 b->pressure = pressure;
165 b->is_real = is_real;
168 list_add_tail(&b->list, head);
169 DBG((dbg, LEVEL_5, "\t\t%s adding %+F, step: %d\n", is_def ? "def" : "use", irn, step));
176 * Check, if an irn is of the register class currently under processing.
177 * @param env The chordal environment.
178 * @param irn The node.
179 * @return 1, if the node is of that register class, 0 if not.
181 static INLINE int has_reg_class(const be_chordal_env_t *env, const ir_node *irn)
183 return arch_irn_consider_in_reg_alloc(env->birg->main_env->arch_env, env->cls, irn);
186 #define has_limited_constr(req, irn) \
187 (arch_get_register_req(arch_env, (req), irn, -1) && (req)->type == arch_register_req_type_limited)
189 static int get_next_free_reg(const be_chordal_alloc_env_t *alloc_env, bitset_t *colors)
191 bitset_t *tmp = alloc_env->tmp_colors;
192 bitset_copy(tmp, colors);
193 bitset_or(tmp, alloc_env->chordal_env->ignore_colors);
194 return bitset_next_clear(tmp, 0);
197 static bitset_t *get_decisive_partner_regs(bitset_t *bs, const be_operand_t *o1, const be_operand_t *o2)
202 bitset_copy(bs, o2->regs);
207 bitset_copy(bs, o1->regs);
211 assert(o1->req->cls == o2->req->cls || ! o1->req->cls || ! o2->req->cls);
213 if(bitset_contains(o1->regs, o2->regs))
214 bitset_copy(bs, o1->regs);
215 else if(bitset_contains(o2->regs, o1->regs))
216 bitset_copy(bs, o2->regs);
223 static be_insn_t *chordal_scan_insn(be_chordal_env_t *env, ir_node *irn)
227 ie.ignore_colors = env->ignore_colors;
228 ie.aenv = env->birg->main_env->arch_env;
231 return be_scan_insn(&ie, irn);
234 static ir_node *prepare_constr_insn(be_chordal_env_t *env, ir_node *irn)
236 const be_irg_t *birg = env->birg;
237 const arch_env_t *aenv = birg->main_env->arch_env;
238 bitset_t *tmp = bitset_alloca(env->cls->n_regs);
239 bitset_t *def_constr = bitset_alloca(env->cls->n_regs);
240 ir_node *bl = get_nodes_block(irn);
241 be_lv_t *lv = env->birg->lv;
246 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
247 ir_node *op = get_irn_n(irn, i);
249 const arch_register_t *reg;
250 const arch_register_req_t *req;
252 if (arch_get_irn_reg_class(aenv, irn, i) != env->cls)
255 reg = arch_get_irn_register(aenv, op);
257 if (reg == NULL || !arch_register_type_is(reg, ignore))
259 if(arch_register_type_is(reg, joker))
262 req = arch_get_register_req(aenv, irn, i);
263 if (!arch_register_req_is(req, limited))
266 if (rbitset_is_set(req->limited, reg->index))
269 copy = be_new_Copy(env->cls, env->irg, bl, op);
270 be_stat_ev("constr_copy", 1);
272 sched_add_before(irn, copy);
273 set_irn_n(irn, i, copy);
274 DBG((dbg, LEVEL_3, "inserting ignore arg copy %+F for %+F pos %d\n", copy, irn, i));
277 insn = chordal_scan_insn(env, irn);
279 if(!insn->has_constraints)
282 /* insert copies for nodes that occur constrained more than once. */
283 for(i = insn->use_start; i < insn->n_ops; ++i) {
284 be_operand_t *op = &insn->ops[i];
286 if(!op->has_constraints)
289 for(j = i + 1; j < insn->n_ops; ++j) {
291 be_operand_t *a_op = &insn->ops[j];
293 if(a_op->carrier != op->carrier || !a_op->has_constraints)
296 if (be_is_Copy(get_irn_n(insn->irn, a_op->pos)))
299 copy = be_new_Copy(env->cls, env->irg, bl, op->carrier);
300 be_stat_ev("constr_copy", 1);
302 sched_add_before(insn->irn, copy);
303 set_irn_n(insn->irn, a_op->pos, copy);
304 DBG((dbg, LEVEL_3, "inserting multiple constr copy %+F for %+F pos %d\n", copy, insn->irn, a_op->pos));
308 /* collect all registers occuring in out constraints. */
309 for(i = 0; i < insn->use_start; ++i) {
310 be_operand_t *op = &insn->ops[i];
311 if(op->has_constraints)
312 bitset_or(def_constr, op->regs);
316 insert copies for all constrained arguments living through the node
317 and being constrained to a register which also occurs in out constraints.
319 for(i = insn->use_start; i < insn->n_ops; ++i) {
321 be_operand_t *op = &insn->ops[i];
323 bitset_copy(tmp, op->regs);
324 bitset_and(tmp, def_constr);
328 1) the operand is constrained.
329 2) lives through the node.
330 3) is constrained to a register occuring in out constraints.
332 if(!op->has_constraints ||
333 !values_interfere(birg, insn->irn, op->carrier) ||
334 bitset_popcnt(tmp) == 0)
338 only create the copy if the operand is no copy.
339 this is necessary since the assure constraints phase inserts
340 Copies and Keeps for operands which must be different from the
341 results. Additional copies here would destroy this.
343 if (be_is_Copy(get_irn_n(insn->irn, op->pos)))
346 copy = be_new_Copy(env->cls, env->irg, bl, op->carrier);
348 sched_add_before(insn->irn, copy);
349 set_irn_n(insn->irn, op->pos, copy);
350 DBG((dbg, LEVEL_3, "inserting constr copy %+F for %+F pos %d\n", copy, insn->irn, op->pos));
351 be_liveness_update(lv, op->carrier);
355 obstack_free(env->obst, insn);
356 return insn->next_insn;
359 static void pre_spill_prepare_constr_walker(ir_node *bl, void *data)
361 be_chordal_env_t *env = data;
363 for(irn = sched_first(bl); !sched_is_end(irn);) {
364 irn = prepare_constr_insn(env, irn);
368 void be_pre_spill_prepare_constr(be_chordal_env_t *cenv) {
369 irg_block_walk_graph(cenv->irg, pre_spill_prepare_constr_walker, NULL, (void *) cenv);
372 static void pair_up_operands(const be_chordal_alloc_env_t *alloc_env, be_insn_t *insn)
374 const be_chordal_env_t *env = alloc_env->chordal_env;
376 int n_uses = be_insn_n_uses(insn);
377 int n_defs = be_insn_n_defs(insn);
378 bitset_t *bs = bitset_alloca(env->cls->n_regs);
379 int *pairing = alloca(MAX(n_defs, n_uses) * sizeof(pairing[0]));
384 For each out operand, try to find an in operand which can be assigned the
385 same register as the out operand.
387 for (j = 0; j < insn->use_start; ++j) {
389 int smallest_n_regs = 2 * env->cls->n_regs + 1;
390 be_operand_t *out_op = &insn->ops[j];
392 /* Try to find an in operand which has ... */
393 for(i = insn->use_start; i < insn->n_ops; ++i) {
395 const be_operand_t *op = &insn->ops[i];
397 if (op->partner != NULL)
399 if (values_interfere(env->birg, op->irn, op->carrier))
402 bitset_clear_all(bs);
403 bitset_copy(bs, op->regs);
404 bitset_and(bs, out_op->regs);
405 n_total = bitset_popcnt(op->regs) + bitset_popcnt(out_op->regs);
407 if (bitset_popcnt(bs) > 0 && n_total < smallest_n_regs) {
409 smallest_n_regs = n_total;
414 be_operand_t *partner = &insn->ops[smallest];
415 for(i = insn->use_start; i < insn->n_ops; ++i) {
416 if(insn->ops[i].carrier == partner->carrier)
417 insn->ops[i].partner = out_op;
420 out_op->partner = partner;
421 partner->partner = out_op;
427 static ir_node *pre_process_constraints(be_chordal_alloc_env_t *alloc_env,
428 be_insn_t **the_insn)
430 be_chordal_env_t *env = alloc_env->chordal_env;
431 const arch_env_t *aenv = env->birg->main_env->arch_env;
432 be_insn_t *insn = *the_insn;
433 ir_node *perm = NULL;
434 bitset_t *out_constr = bitset_alloca(env->cls->n_regs);
435 const ir_edge_t *edge;
438 assert(insn->has_constraints && "only do this for constrained nodes");
441 Collect all registers that occur in output constraints.
442 This is necessary, since if the insn has one of these as an input constraint
443 and the corresponding operand interferes with the insn, the operand must
446 for(i = 0; i < insn->use_start; ++i) {
447 be_operand_t *op = &insn->ops[i];
448 if(op->has_constraints)
449 bitset_or(out_constr, op->regs);
453 Make the Perm, recompute liveness and re-scan the insn since the
454 in operands are now the Projs of the Perm.
456 perm = insert_Perm_after(env->birg, env->cls, sched_prev(insn->irn));
458 /* Registers are propagated by insert_Perm_after(). Clean them here! */
462 be_stat_ev("constr_perm", get_irn_arity(perm));
463 foreach_out_edge(perm, edge) {
464 ir_node *proj = get_edge_src_irn(edge);
465 arch_set_irn_register(aenv, proj, NULL);
469 We also have to re-build the insn since the input operands are now the Projs of
470 the Perm. Recomputing liveness is also a good idea if a Perm is inserted, since
471 the live sets may change.
473 obstack_free(env->obst, insn);
474 *the_insn = insn = chordal_scan_insn(env, insn->irn);
477 Copy the input constraints of the insn to the Perm as output
478 constraints. Succeeding phases (coalescing) will need that.
480 for(i = insn->use_start; i < insn->n_ops; ++i) {
481 be_operand_t *op = &insn->ops[i];
482 ir_node *proj = op->carrier;
484 Note that the predecessor must not be a Proj of the Perm,
485 since ignore-nodes are not Perm'ed.
487 if(op->has_constraints && is_Proj(proj) && get_Proj_pred(proj) == perm) {
488 be_set_constr_limited(perm, BE_OUT_POS(get_Proj_proj(proj)), op->req);
495 static ir_node *handle_constraints(be_chordal_alloc_env_t *alloc_env,
496 ir_node *irn, int *silent)
498 const arch_env_t *aenv;
501 ir_node **alloc_nodes;
502 //hungarian_problem_t *bp;
507 const ir_edge_t *edge;
508 ir_node *perm = NULL;
509 //int match_res, cost;
510 be_chordal_env_t *env = alloc_env->chordal_env;
511 void *base = obstack_base(env->obst);
512 be_insn_t *insn = chordal_scan_insn(env, irn);
513 ir_node *res = insn->next_insn;
514 int be_silent = *silent;
515 be_irg_t *birg = env->birg;
518 if(insn->pre_colored) {
520 for(i = 0; i < insn->use_start; ++i)
521 pset_insert_ptr(alloc_env->pre_colored, insn->ops[i].carrier);
525 If the current node is a barrier toggle the silent flag.
526 If we are in the start block, we are ought to be silent at the beginning,
527 so the toggling activates the constraint handling but skips the barrier.
528 If we are in the end block we handle the in requirements of the barrier
529 and set the rest to silent.
531 if(be_is_Barrier(irn))
538 Perms inserted before the constraint handling phase are considered to be
539 correctly precolored. These Perms arise during the ABI handling phase.
541 if(!insn->has_constraints)
544 aenv = env->birg->main_env->arch_env;
545 n_regs = env->cls->n_regs;
546 bs = bitset_alloca(n_regs);
547 alloc_nodes = alloca(n_regs * sizeof(alloc_nodes[0]));
548 //bp = hungarian_new(n_regs, n_regs, 2, HUNGARIAN_MATCH_PERFECT);
549 bp = bipartite_new(n_regs, n_regs);
550 assignment = alloca(n_regs * sizeof(assignment[0]));
551 partners = pmap_create();
554 prepare the constraint handling of this node.
555 Perms are constructed and Copies are created for constrained values
556 interfering with the instruction.
558 perm = pre_process_constraints(alloc_env, &insn);
560 /* find suitable in operands to the out operands of the node. */
561 pair_up_operands(alloc_env, insn);
564 look at the in/out operands and add each operand (and its possible partner)
565 to a bipartite graph (left: nodes with partners, right: admissible colors).
567 for(i = 0, n_alloc = 0; i < insn->n_ops; ++i) {
568 be_operand_t *op = &insn->ops[i];
571 If the operand has no partner or the partner has not been marked
572 for allocation, determine the admissible registers and mark it
573 for allocation by associating the node and its partner with the
574 set of admissible registers via a bipartite graph.
576 if(!op->partner || !pmap_contains(partners, op->partner->carrier)) {
577 ir_node *partner = op->partner ? op->partner->carrier : NULL;
580 pmap_insert(partners, op->carrier, partner);
582 pmap_insert(partners, partner, op->carrier);
584 /* don't insert a node twice */
585 for(i = 0; i < n_alloc; ++i) {
586 if(alloc_nodes[i] == op->carrier) {
593 alloc_nodes[n_alloc] = op->carrier;
595 DBG((dbg, LEVEL_2, "\tassociating %+F and %+F\n", op->carrier,
598 bitset_clear_all(bs);
599 get_decisive_partner_regs(bs, op, op->partner);
601 DBG((dbg, LEVEL_2, "\tallowed registers for %+F: %B\n", op->carrier,
604 bitset_foreach(bs, col) {
605 //hungarian_add(bp, n_alloc, col, 1);
606 bipartite_add(bp, n_alloc, col);
614 Put all nodes which live through the constrained instruction also to the
615 allocation bipartite graph. They are considered unconstrained.
618 foreach_out_edge(perm, edge) {
620 ir_node *proj = get_edge_src_irn(edge);
622 assert(is_Proj(proj));
624 if(!values_interfere(birg, proj, irn) || pmap_contains(partners, proj))
627 /* don't insert a node twice */
628 for(i = 0; i < n_alloc; ++i) {
629 if(alloc_nodes[i] == proj) {
637 assert(n_alloc < n_regs);
639 alloc_nodes[n_alloc] = proj;
640 pmap_insert(partners, proj, NULL);
642 bitset_clear_all(bs);
643 arch_put_non_ignore_regs(aenv, env->cls, bs);
644 bitset_andnot(bs, env->ignore_colors);
645 bitset_foreach(bs, col) {
646 //hungarian_add(bp, n_alloc, col, 1);
647 bipartite_add(bp, n_alloc, col);
654 /* Compute a valid register allocation. */
656 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
657 match_res = hungarian_solve(bp, assignment, &cost, 1);
658 assert(match_res == 0 && "matching failed");
660 bipartite_matching(bp, assignment);
663 /* Assign colors obtained from the matching. */
664 for(i = 0; i < n_alloc; ++i) {
665 const arch_register_t *reg;
668 assert(assignment[i] >= 0 && "there must have been a register assigned");
669 reg = arch_register_for_index(env->cls, assignment[i]);
670 assert(! (reg->type & arch_register_type_ignore));
672 irn = alloc_nodes[i];
674 arch_set_irn_register(aenv, irn, reg);
675 (void) pset_hinsert_ptr(alloc_env->pre_colored, irn);
676 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", irn, reg->name));
679 irn = pmap_get(partners, alloc_nodes[i]);
681 arch_set_irn_register(aenv, irn, reg);
682 (void) pset_hinsert_ptr(alloc_env->pre_colored, irn);
683 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", irn, reg->name));
687 /* Allocate the non-constrained Projs of the Perm. */
689 bitset_clear_all(bs);
691 /* Put the colors of all Projs in a bitset. */
692 foreach_out_edge(perm, edge) {
693 ir_node *proj = get_edge_src_irn(edge);
694 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
697 bitset_set(bs, reg->index);
700 /* Assign the not yet assigned Projs of the Perm a suitable color. */
701 foreach_out_edge(perm, edge) {
702 ir_node *proj = get_edge_src_irn(edge);
703 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
705 DBG((dbg, LEVEL_2, "\tchecking reg of %+F: %s\n", proj, reg ? reg->name : "<none>"));
708 col = get_next_free_reg(alloc_env, bs);
709 reg = arch_register_for_index(env->cls, col);
710 bitset_set(bs, reg->index);
711 arch_set_irn_register(aenv, proj, reg);
712 pset_insert_ptr(alloc_env->pre_colored, proj);
713 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", proj, reg->name));
719 //hungarian_free(bp);
720 pmap_destroy(partners);
723 obstack_free(env->obst, base);
728 * Handle constraint nodes in each basic block.
729 * handle_constraints() inserts Perm nodes which perm
730 * over all values live at the constrained node right in front
731 * of the constrained node. These Perms signal a constrained node.
732 * For further comments, refer to handle_constraints().
734 static void constraints(ir_node *bl, void *data)
736 be_chordal_alloc_env_t *env = data;
739 Start silent in the start block.
740 The silence remains until the first barrier is seen.
741 Each other block is begun loud.
743 int silent = bl == get_irg_start_block(get_irn_irg(bl));
747 If the block is the start block search the barrier and
748 start handling constraints from there.
751 for(irn = sched_first(bl); !sched_is_end(irn);) {
752 irn = handle_constraints(env, irn, &silent);
757 * Annotate the register pressure to the nodes and compute
758 * the liveness intervals.
759 * @param block The block to do it for.
760 * @param env_ptr The environment.
762 static void pressure(ir_node *block, void *env_ptr)
764 /* Convenience macro for a def */
765 #define border_def(irn, step, real) \
766 border_add(env, head, irn, step, pressure--, 1, real)
768 /* Convenience macro for a use */
769 #define border_use(irn, step, real) \
770 border_add(env, head, irn, step, ++pressure, 0, real)
772 be_chordal_alloc_env_t *alloc_env = env_ptr;
773 be_chordal_env_t *env = alloc_env->chordal_env;
774 bitset_t *live = alloc_env->live;
776 be_lv_t *lv = env->birg->lv;
781 unsigned pressure = 0;
782 struct list_head *head;
784 DBG((dbg, LEVEL_1, "Computing pressure in block %+F\n", block));
785 bitset_clear_all(live);
787 /* Set up the border list in the block info */
788 head = obstack_alloc(env->obst, sizeof(*head));
789 INIT_LIST_HEAD(head);
790 assert(pmap_get(env->border_heads, block) == NULL);
791 pmap_insert(env->border_heads, block, head);
794 * Make final uses of all values live out of the block.
795 * They are necessary to build up real intervals.
797 be_lv_foreach(lv, block, be_lv_state_end, i) {
798 ir_node *irn = be_lv_get_irn(lv, block, i);
799 if(has_reg_class(env, irn)) {
800 DBG((dbg, LEVEL_3, "\tMaking live: %+F/%d\n", irn, get_irn_idx(irn)));
801 bitset_set(live, get_irn_idx(irn));
802 border_use(irn, step, 0);
808 * Determine the last uses of a value inside the block, since they are
809 * relevant for the interval borders.
811 sched_foreach_reverse(block, irn) {
812 DBG((dbg, LEVEL_1, "\tinsn: %+F, pressure: %d\n", irn, pressure));
813 DBG((dbg, LEVEL_2, "\tlive: %B\n", live));
815 if (get_irn_mode(irn) == mode_T) {
816 const ir_edge_t *edge;
818 foreach_out_edge(irn, edge) {
819 ir_node *proj = get_edge_src_irn(edge);
822 * If the node defines some value, which can put into a
823 * register of the current class, make a border for it.
825 if(has_reg_class(env, proj)) {
826 int nr = get_irn_idx(proj);
828 bitset_clear(live, nr);
829 border_def(proj, step, 1);
835 * If the node defines some value, which can put into a
836 * register of the current class, make a border for it.
838 if(has_reg_class(env, irn)) {
839 int nr = get_irn_idx(irn);
841 bitset_clear(live, nr);
842 border_def(irn, step, 1);
846 * If the node is no phi node we can examine the uses.
849 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
850 ir_node *op = get_irn_n(irn, i);
852 if(has_reg_class(env, op)) {
853 int nr = get_irn_idx(op);
854 const char *msg = "-";
856 if(!bitset_is_set(live, nr)) {
857 border_use(op, step, 1);
858 bitset_set(live, nr);
862 DBG((dbg, LEVEL_4, "\t\t%s pos: %d, use: %+F\n", msg, i, op));
869 bitset_foreach(live, elm) {
870 ir_node *irn = get_idx_irn(env->irg, elm);
871 if (be_is_live_in(lv, block, irn))
872 border_def(irn, step, 0);
876 static void assign(ir_node *block, void *env_ptr)
878 be_chordal_alloc_env_t *alloc_env = env_ptr;
879 be_chordal_env_t *env = alloc_env->chordal_env;
880 bitset_t *live = alloc_env->live;
881 bitset_t *colors = alloc_env->colors;
882 bitset_t *in_colors = alloc_env->in_colors;
883 const arch_env_t *arch_env = env->birg->main_env->arch_env;
884 struct list_head *head = get_block_border_head(env, block);
885 be_lv_t *lv = env->birg->lv;
891 bitset_clear_all(colors);
892 bitset_clear_all(live);
893 bitset_clear_all(in_colors);
895 DBG((dbg, LEVEL_4, "Assigning colors for block %+F\n", block));
896 DBG((dbg, LEVEL_4, "\tusedef chain for block\n"));
897 list_for_each_entry(border_t, b, head, list) {
898 DBG((dbg, LEVEL_4, "\t%s %+F/%d\n", b->is_def ? "def" : "use",
899 b->irn, get_irn_idx(b->irn)));
903 * Add initial defs for all values live in.
904 * Since their colors have already been assigned (The dominators were
905 * allocated before), we have to mark their colors as used also.
907 be_lv_foreach(lv, block, be_lv_state_in, idx) {
908 irn = be_lv_get_irn(lv, block, idx);
909 if(has_reg_class(env, irn)) {
910 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
913 assert(reg && "Node must have been assigned a register");
914 col = arch_register_get_index(reg);
916 DBG((dbg, LEVEL_4, "%+F has reg %s\n", irn, reg->name));
918 /* Mark the color of the live in value as used. */
919 bitset_set(colors, col);
920 bitset_set(in_colors, col);
922 /* Mark the value live in. */
923 bitset_set(live, get_irn_idx(irn));
928 * Mind that the sequence of defs from back to front defines a perfect
929 * elimination order. So, coloring the definitions from first to last
932 list_for_each_entry_reverse(border_t, b, head, list) {
933 ir_node *irn = b->irn;
934 int nr = get_irn_idx(irn);
935 int ignore = arch_irn_is(arch_env, irn, ignore);
938 * Assign a color, if it is a local def. Global defs already have a
941 if(b->is_def && !be_is_live_in(lv, block, irn)) {
942 const arch_register_t *reg;
945 if(ignore || pset_find_ptr(alloc_env->pre_colored, irn)) {
946 reg = arch_get_irn_register(arch_env, irn);
948 assert(!bitset_is_set(colors, col) && "pre-colored register must be free");
950 col = get_next_free_reg(alloc_env, colors);
951 reg = arch_register_for_index(env->cls, col);
952 assert(arch_get_irn_register(arch_env, irn) == NULL && "This node must not have been assigned a register yet");
953 assert(!arch_register_type_is(reg, ignore) && "Must not assign ignore register");
956 bitset_set(colors, col);
957 arch_set_irn_register(arch_env, irn, reg);
959 DBG((dbg, LEVEL_1, "\tassigning register %s(%d) to %+F\n", arch_register_get_name(reg), col, irn));
961 assert(!bitset_is_set(live, nr) && "Value's definition must not have been encountered");
962 bitset_set(live, nr);
965 /* Clear the color upon a use. */
966 else if(!b->is_def) {
967 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
970 assert(reg && "Register must have been assigned");
972 col = arch_register_get_index(reg);
974 if(!arch_register_type_is(reg, ignore)) {
975 assert(bitset_is_set(live, nr) && "Cannot have a non live use");
979 bitset_clear(colors, col);
980 bitset_clear(live, nr);
985 void be_ra_chordal_color(be_chordal_env_t *chordal_env)
987 be_chordal_alloc_env_t env;
990 be_irg_t *birg = chordal_env->birg;
991 const arch_register_class_t *cls = chordal_env->cls;
993 int colors_n = arch_register_class_n_regs(cls);
994 ir_graph *irg = chordal_env->irg;
995 int allocatable_regs = colors_n - be_put_ignore_regs(birg, cls, NULL);
997 /* some special classes contain only ignore regs, no work to be done */
998 if(allocatable_regs == 0)
1001 be_assure_dom_front(birg);
1002 lv = be_assure_liveness(birg);
1003 be_liveness_assure_sets(lv);
1004 be_liveness_assure_chk(lv);
1008 env.chordal_env = chordal_env;
1009 env.colors_n = colors_n;
1010 env.colors = bitset_alloca(colors_n);
1011 env.tmp_colors = bitset_alloca(colors_n);
1012 env.in_colors = bitset_alloca(colors_n);
1013 env.pre_colored = pset_new_ptr_default();
1015 /* Handle register targeting constraints */
1016 dom_tree_walk_irg(irg, constraints, NULL, &env);
1018 if(chordal_env->opts->dump_flags & BE_CH_DUMP_CONSTR) {
1019 snprintf(buf, sizeof(buf), "-%s-constr", chordal_env->cls->name);
1020 be_dump(chordal_env->irg, buf, dump_ir_block_graph_sched);
1023 env.live = bitset_malloc(get_irg_last_idx(chordal_env->irg));
1025 /* First, determine the pressure */
1026 dom_tree_walk_irg(irg, pressure, NULL, &env);
1028 /* Assign the colors */
1029 dom_tree_walk_irg(irg, assign, NULL, &env);
1031 if(chordal_env->opts->dump_flags & BE_CH_DUMP_TREE_INTV) {
1033 ir_snprintf(buf, sizeof(buf), "ifg_%s_%F.eps", chordal_env->cls->name, irg);
1034 plotter = new_plotter_ps(buf);
1035 draw_interval_tree(&draw_chordal_def_opts, chordal_env, plotter);
1036 plotter_free(plotter);
1039 bitset_free(env.live);
1040 del_pset(env.pre_colored);
1043 void be_init_chordal(void)
1045 FIRM_DBG_REGISTER(dbg, "firm.be.chordal");
1048 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_chordal);