2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Chordal register allocation.
23 * @author Sebastian Hack
37 #include "raw_bitset.h"
39 #include "bipartite.h"
40 #include "hungarian.h"
43 #include "irgraph_t.h"
44 #include "irprintf_t.h"
55 #include "besched_t.h"
62 #include "bestatevent.h"
64 #include "beintlive_t.h"
66 #include "bechordal_t.h"
67 #include "bechordal_draw.h"
70 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
74 #define DUMP_INTERVALS
76 typedef struct _be_chordal_alloc_env_t {
77 be_chordal_env_t *chordal_env;
79 pset *pre_colored; /**< Set of precolored nodes. */
80 bitset_t *live; /**< A liveness bitset. */
81 bitset_t *tmp_colors; /**< An auxiliary bitset which is as long as the number of colors in the class. */
82 bitset_t *colors; /**< The color mask. */
83 bitset_t *in_colors; /**< Colors used by live in values. */
84 int colors_n; /**< The number of colors. */
85 } be_chordal_alloc_env_t;
89 /* Make a fourcc for border checking. */
90 #define BORDER_FOURCC FOURCC('B', 'O', 'R', 'D')
93 static void check_border_list(struct list_head *head)
96 list_for_each_entry(border_t, x, head, list) {
97 assert(x->magic == BORDER_FOURCC);
101 static void check_heads(be_chordal_env_t *env)
104 for(ent = pmap_first(env->border_heads); ent; ent = pmap_next(env->border_heads)) {
105 /* ir_printf("checking border list of block %+F\n", ent->key); */
106 check_border_list(ent->value);
112 * Add an interval border to the list of a block's list
113 * of interval border.
114 * @note You always have to create the use before the def.
115 * @param env The environment.
116 * @param head The list head to enqueue the borders.
117 * @param irn The node (value) the border belongs to.
118 * @param pressure The pressure at this point in time.
119 * @param step A time step for the border.
120 * @param is_def Is the border a use or a def.
121 * @return The created border.
123 static INLINE border_t *border_add(be_chordal_env_t *env, struct list_head *head,
124 ir_node *irn, unsigned step, unsigned pressure,
125 unsigned is_def, unsigned is_real)
132 b = obstack_alloc(env->obst, sizeof(*b));
134 /* also allocate the def and tie it to the use. */
135 def = obstack_alloc(env->obst, sizeof(*def));
136 memset(def, 0, sizeof(*def));
141 * Set the link field of the irn to the def.
142 * This strongly relies on the fact, that the use is always
143 * made before the def.
145 set_irn_link(irn, def);
147 DEBUG_ONLY(b->magic = BORDER_FOURCC);
148 DEBUG_ONLY(def->magic = BORDER_FOURCC);
152 * If the def is encountered, the use was made and so was the
153 * the def node (see the code above). It was placed into the
154 * link field of the irn, so we can get it there.
157 b = get_irn_link(irn);
159 DEBUG_ONLY(assert(b && b->magic == BORDER_FOURCC && "Illegal border encountered"));
162 b->pressure = pressure;
164 b->is_real = is_real;
167 list_add_tail(&b->list, head);
168 DBG((dbg, LEVEL_5, "\t\t%s adding %+F, step: %d\n", is_def ? "def" : "use", irn, step));
175 * Check, if an irn is of the register class currently under processing.
176 * @param env The chordal environment.
177 * @param irn The node.
178 * @return 1, if the node is of that register class, 0 if not.
180 static INLINE int has_reg_class(const be_chordal_env_t *env, const ir_node *irn)
182 return arch_irn_consider_in_reg_alloc(env->birg->main_env->arch_env, env->cls, irn);
185 #define has_limited_constr(req, irn) \
186 (arch_get_register_req(arch_env, (req), irn, -1) && (req)->type == arch_register_req_type_limited)
188 static int get_next_free_reg(const be_chordal_alloc_env_t *alloc_env, bitset_t *colors)
190 bitset_t *tmp = alloc_env->tmp_colors;
191 bitset_copy(tmp, colors);
192 bitset_or(tmp, alloc_env->chordal_env->ignore_colors);
193 return bitset_next_clear(tmp, 0);
196 static bitset_t *get_decisive_partner_regs(bitset_t *bs, const be_operand_t *o1, const be_operand_t *o2)
201 bitset_copy(bs, o2->regs);
206 bitset_copy(bs, o1->regs);
210 assert(o1->req->cls == o2->req->cls || ! o1->req->cls || ! o2->req->cls);
212 if(bitset_contains(o1->regs, o2->regs))
213 bitset_copy(bs, o1->regs);
214 else if(bitset_contains(o2->regs, o1->regs))
215 bitset_copy(bs, o2->regs);
222 static be_insn_t *chordal_scan_insn(be_chordal_env_t *env, ir_node *irn)
226 ie.ignore_colors = env->ignore_colors;
227 ie.aenv = env->birg->main_env->arch_env;
230 return be_scan_insn(&ie, irn);
233 static ir_node *prepare_constr_insn(be_chordal_env_t *env, ir_node *irn)
235 const be_irg_t *birg = env->birg;
236 const arch_env_t *aenv = birg->main_env->arch_env;
237 bitset_t *tmp = bitset_alloca(env->cls->n_regs);
238 bitset_t *def_constr = bitset_alloca(env->cls->n_regs);
239 ir_node *bl = get_nodes_block(irn);
240 be_lv_t *lv = env->birg->lv;
245 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
246 ir_node *op = get_irn_n(irn, i);
248 const arch_register_t *reg;
249 const arch_register_req_t *req;
251 if (arch_get_irn_reg_class(aenv, irn, i) != env->cls)
254 reg = arch_get_irn_register(aenv, op);
256 if (reg == NULL || !arch_register_type_is(reg, ignore))
258 if(arch_register_type_is(reg, joker))
261 req = arch_get_register_req(aenv, irn, i);
262 if (!arch_register_req_is(req, limited))
265 if (rbitset_is_set(req->limited, reg->index))
268 copy = be_new_Copy(env->cls, env->irg, bl, op);
269 be_stat_ev("constr_copy", 1);
271 sched_add_before(irn, copy);
272 set_irn_n(irn, i, copy);
273 DBG((dbg, LEVEL_3, "inserting ignore arg copy %+F for %+F pos %d\n", copy, irn, i));
276 insn = chordal_scan_insn(env, irn);
278 if(!insn->has_constraints)
281 /* insert copies for nodes that occur constrained more than once. */
282 for(i = insn->use_start; i < insn->n_ops; ++i) {
283 be_operand_t *op = &insn->ops[i];
285 if(!op->has_constraints)
288 for(j = i + 1; j < insn->n_ops; ++j) {
290 be_operand_t *a_op = &insn->ops[j];
292 if(a_op->carrier != op->carrier || !a_op->has_constraints)
295 /* if the constraint is the same, no copy is necessary
296 * TODO generalise unequal but overlapping constraints */
297 if (a_op->req == op->req)
300 if (be_is_Copy(get_irn_n(insn->irn, a_op->pos)))
303 copy = be_new_Copy(env->cls, env->irg, bl, op->carrier);
304 be_stat_ev("constr_copy", 1);
306 sched_add_before(insn->irn, copy);
307 set_irn_n(insn->irn, a_op->pos, copy);
308 DBG((dbg, LEVEL_3, "inserting multiple constr copy %+F for %+F pos %d\n", copy, insn->irn, a_op->pos));
312 /* collect all registers occurring in out constraints. */
313 for(i = 0; i < insn->use_start; ++i) {
314 be_operand_t *op = &insn->ops[i];
315 if(op->has_constraints)
316 bitset_or(def_constr, op->regs);
320 insert copies for all constrained arguments living through the node
321 and being constrained to a register which also occurs in out constraints.
323 for(i = insn->use_start; i < insn->n_ops; ++i) {
325 be_operand_t *op = &insn->ops[i];
327 bitset_copy(tmp, op->regs);
328 bitset_and(tmp, def_constr);
332 1) the operand is constrained.
333 2) lives through the node.
334 3) is constrained to a register occurring in out constraints.
336 if(!op->has_constraints ||
337 !values_interfere(birg, insn->irn, op->carrier) ||
338 bitset_popcnt(tmp) == 0)
342 only create the copy if the operand is no copy.
343 this is necessary since the assure constraints phase inserts
344 Copies and Keeps for operands which must be different from the
345 results. Additional copies here would destroy this.
347 if (be_is_Copy(get_irn_n(insn->irn, op->pos)))
350 copy = be_new_Copy(env->cls, env->irg, bl, op->carrier);
352 sched_add_before(insn->irn, copy);
353 set_irn_n(insn->irn, op->pos, copy);
354 DBG((dbg, LEVEL_3, "inserting constr copy %+F for %+F pos %d\n", copy, insn->irn, op->pos));
355 be_liveness_update(lv, op->carrier);
359 obstack_free(env->obst, insn);
360 return insn->next_insn;
363 static void pre_spill_prepare_constr_walker(ir_node *bl, void *data)
365 be_chordal_env_t *env = data;
367 for(irn = sched_first(bl); !sched_is_end(irn);) {
368 irn = prepare_constr_insn(env, irn);
372 void be_pre_spill_prepare_constr(be_chordal_env_t *cenv) {
373 irg_block_walk_graph(cenv->irg, pre_spill_prepare_constr_walker, NULL, (void *) cenv);
376 static void pair_up_operands(const be_chordal_alloc_env_t *alloc_env, be_insn_t *insn)
378 const be_chordal_env_t *env = alloc_env->chordal_env;
380 int n_uses = be_insn_n_uses(insn);
381 int n_defs = be_insn_n_defs(insn);
382 bitset_t *bs = bitset_alloca(env->cls->n_regs);
383 int *pairing = alloca(MAX(n_defs, n_uses) * sizeof(pairing[0]));
388 For each out operand, try to find an in operand which can be assigned the
389 same register as the out operand.
391 for (j = 0; j < insn->use_start; ++j) {
393 int smallest_n_regs = 2 * env->cls->n_regs + 1;
394 be_operand_t *out_op = &insn->ops[j];
396 /* Try to find an in operand which has ... */
397 for(i = insn->use_start; i < insn->n_ops; ++i) {
399 const be_operand_t *op = &insn->ops[i];
401 if (op->partner != NULL)
403 if (values_interfere(env->birg, op->irn, op->carrier))
406 bitset_clear_all(bs);
407 bitset_copy(bs, op->regs);
408 bitset_and(bs, out_op->regs);
409 n_total = bitset_popcnt(op->regs) + bitset_popcnt(out_op->regs);
411 if (bitset_popcnt(bs) > 0 && n_total < smallest_n_regs) {
413 smallest_n_regs = n_total;
418 be_operand_t *partner = &insn->ops[smallest];
419 for(i = insn->use_start; i < insn->n_ops; ++i) {
420 if(insn->ops[i].carrier == partner->carrier)
421 insn->ops[i].partner = out_op;
424 out_op->partner = partner;
425 partner->partner = out_op;
431 static ir_node *pre_process_constraints(be_chordal_alloc_env_t *alloc_env,
432 be_insn_t **the_insn)
434 be_chordal_env_t *env = alloc_env->chordal_env;
435 const arch_env_t *aenv = env->birg->main_env->arch_env;
436 be_insn_t *insn = *the_insn;
437 ir_node *perm = NULL;
438 bitset_t *out_constr = bitset_alloca(env->cls->n_regs);
439 const ir_edge_t *edge;
442 assert(insn->has_constraints && "only do this for constrained nodes");
445 Collect all registers that occur in output constraints.
446 This is necessary, since if the insn has one of these as an input constraint
447 and the corresponding operand interferes with the insn, the operand must
450 for(i = 0; i < insn->use_start; ++i) {
451 be_operand_t *op = &insn->ops[i];
452 if(op->has_constraints)
453 bitset_or(out_constr, op->regs);
457 Make the Perm, recompute liveness and re-scan the insn since the
458 in operands are now the Projs of the Perm.
460 perm = insert_Perm_after(env->birg, env->cls, sched_prev(insn->irn));
462 /* Registers are propagated by insert_Perm_after(). Clean them here! */
466 be_stat_ev("constr_perm", get_irn_arity(perm));
467 foreach_out_edge(perm, edge) {
468 ir_node *proj = get_edge_src_irn(edge);
469 arch_set_irn_register(aenv, proj, NULL);
473 We also have to re-build the insn since the input operands are now the Projs of
474 the Perm. Recomputing liveness is also a good idea if a Perm is inserted, since
475 the live sets may change.
477 obstack_free(env->obst, insn);
478 *the_insn = insn = chordal_scan_insn(env, insn->irn);
481 Copy the input constraints of the insn to the Perm as output
482 constraints. Succeeding phases (coalescing) will need that.
484 for(i = insn->use_start; i < insn->n_ops; ++i) {
485 be_operand_t *op = &insn->ops[i];
486 ir_node *proj = op->carrier;
488 Note that the predecessor must not be a Proj of the Perm,
489 since ignore-nodes are not Perm'ed.
491 if(op->has_constraints && is_Proj(proj) && get_Proj_pred(proj) == perm) {
492 be_set_constr_limited(perm, BE_OUT_POS(get_Proj_proj(proj)), op->req);
499 static ir_node *handle_constraints(be_chordal_alloc_env_t *alloc_env,
500 ir_node *irn, int *silent)
502 const arch_env_t *aenv;
505 ir_node **alloc_nodes;
506 //hungarian_problem_t *bp;
511 const ir_edge_t *edge;
512 ir_node *perm = NULL;
513 //int match_res, cost;
514 be_chordal_env_t *env = alloc_env->chordal_env;
515 void *base = obstack_base(env->obst);
516 be_insn_t *insn = chordal_scan_insn(env, irn);
517 ir_node *res = insn->next_insn;
518 int be_silent = *silent;
519 be_irg_t *birg = env->birg;
522 if(insn->pre_colored) {
524 for(i = 0; i < insn->use_start; ++i)
525 pset_insert_ptr(alloc_env->pre_colored, insn->ops[i].carrier);
529 If the current node is a barrier toggle the silent flag.
530 If we are in the start block, we are ought to be silent at the beginning,
531 so the toggling activates the constraint handling but skips the barrier.
532 If we are in the end block we handle the in requirements of the barrier
533 and set the rest to silent.
535 if(be_is_Barrier(irn))
542 Perms inserted before the constraint handling phase are considered to be
543 correctly precolored. These Perms arise during the ABI handling phase.
545 if(!insn->has_constraints)
548 aenv = env->birg->main_env->arch_env;
549 n_regs = env->cls->n_regs;
550 bs = bitset_alloca(n_regs);
551 alloc_nodes = alloca(n_regs * sizeof(alloc_nodes[0]));
552 //bp = hungarian_new(n_regs, n_regs, 2, HUNGARIAN_MATCH_PERFECT);
553 bp = bipartite_new(n_regs, n_regs);
554 assignment = alloca(n_regs * sizeof(assignment[0]));
555 partners = pmap_create();
558 prepare the constraint handling of this node.
559 Perms are constructed and Copies are created for constrained values
560 interfering with the instruction.
562 perm = pre_process_constraints(alloc_env, &insn);
564 /* find suitable in operands to the out operands of the node. */
565 pair_up_operands(alloc_env, insn);
568 look at the in/out operands and add each operand (and its possible partner)
569 to a bipartite graph (left: nodes with partners, right: admissible colors).
571 for(i = 0, n_alloc = 0; i < insn->n_ops; ++i) {
572 be_operand_t *op = &insn->ops[i];
575 If the operand has no partner or the partner has not been marked
576 for allocation, determine the admissible registers and mark it
577 for allocation by associating the node and its partner with the
578 set of admissible registers via a bipartite graph.
580 if(!op->partner || !pmap_contains(partners, op->partner->carrier)) {
581 ir_node *partner = op->partner ? op->partner->carrier : NULL;
584 pmap_insert(partners, op->carrier, partner);
586 pmap_insert(partners, partner, op->carrier);
588 /* don't insert a node twice */
589 for(i = 0; i < n_alloc; ++i) {
590 if(alloc_nodes[i] == op->carrier) {
597 alloc_nodes[n_alloc] = op->carrier;
599 DBG((dbg, LEVEL_2, "\tassociating %+F and %+F\n", op->carrier,
602 bitset_clear_all(bs);
603 get_decisive_partner_regs(bs, op, op->partner);
605 DBG((dbg, LEVEL_2, "\tallowed registers for %+F: %B\n", op->carrier,
608 bitset_foreach(bs, col) {
609 //hungarian_add(bp, n_alloc, col, 1);
610 bipartite_add(bp, n_alloc, col);
618 Put all nodes which live through the constrained instruction also to the
619 allocation bipartite graph. They are considered unconstrained.
622 foreach_out_edge(perm, edge) {
624 ir_node *proj = get_edge_src_irn(edge);
626 assert(is_Proj(proj));
628 if(!values_interfere(birg, proj, irn) || pmap_contains(partners, proj))
631 /* don't insert a node twice */
632 for(i = 0; i < n_alloc; ++i) {
633 if(alloc_nodes[i] == proj) {
641 assert(n_alloc < n_regs);
643 alloc_nodes[n_alloc] = proj;
644 pmap_insert(partners, proj, NULL);
646 bitset_clear_all(bs);
647 arch_put_non_ignore_regs(env->cls, bs);
648 bitset_andnot(bs, env->ignore_colors);
649 bitset_foreach(bs, col) {
650 //hungarian_add(bp, n_alloc, col, 1);
651 bipartite_add(bp, n_alloc, col);
658 /* Compute a valid register allocation. */
660 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
661 match_res = hungarian_solve(bp, assignment, &cost, 1);
662 assert(match_res == 0 && "matching failed");
664 bipartite_matching(bp, assignment);
667 /* Assign colors obtained from the matching. */
668 for(i = 0; i < n_alloc; ++i) {
669 const arch_register_t *reg;
672 assert(assignment[i] >= 0 && "there must have been a register assigned");
673 reg = arch_register_for_index(env->cls, assignment[i]);
674 assert(! (reg->type & arch_register_type_ignore));
676 irn = alloc_nodes[i];
678 arch_set_irn_register(aenv, irn, reg);
679 (void) pset_hinsert_ptr(alloc_env->pre_colored, irn);
680 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", irn, reg->name));
683 irn = pmap_get(partners, alloc_nodes[i]);
685 arch_set_irn_register(aenv, irn, reg);
686 (void) pset_hinsert_ptr(alloc_env->pre_colored, irn);
687 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", irn, reg->name));
691 /* Allocate the non-constrained Projs of the Perm. */
693 bitset_clear_all(bs);
695 /* Put the colors of all Projs in a bitset. */
696 foreach_out_edge(perm, edge) {
697 ir_node *proj = get_edge_src_irn(edge);
698 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
701 bitset_set(bs, reg->index);
704 /* Assign the not yet assigned Projs of the Perm a suitable color. */
705 foreach_out_edge(perm, edge) {
706 ir_node *proj = get_edge_src_irn(edge);
707 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
709 DBG((dbg, LEVEL_2, "\tchecking reg of %+F: %s\n", proj, reg ? reg->name : "<none>"));
712 col = get_next_free_reg(alloc_env, bs);
713 reg = arch_register_for_index(env->cls, col);
714 bitset_set(bs, reg->index);
715 arch_set_irn_register(aenv, proj, reg);
716 pset_insert_ptr(alloc_env->pre_colored, proj);
717 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", proj, reg->name));
723 //hungarian_free(bp);
724 pmap_destroy(partners);
727 obstack_free(env->obst, base);
732 * Handle constraint nodes in each basic block.
733 * handle_constraints() inserts Perm nodes which perm
734 * over all values live at the constrained node right in front
735 * of the constrained node. These Perms signal a constrained node.
736 * For further comments, refer to handle_constraints().
738 static void constraints(ir_node *bl, void *data)
740 be_chordal_alloc_env_t *env = data;
743 Start silent in the start block.
744 The silence remains until the first barrier is seen.
745 Each other block is begun loud.
747 int silent = bl == get_irg_start_block(get_irn_irg(bl));
751 If the block is the start block search the barrier and
752 start handling constraints from there.
755 for(irn = sched_first(bl); !sched_is_end(irn);) {
756 irn = handle_constraints(env, irn, &silent);
761 * Annotate the register pressure to the nodes and compute
762 * the liveness intervals.
763 * @param block The block to do it for.
764 * @param env_ptr The environment.
766 static void pressure(ir_node *block, void *env_ptr)
768 /* Convenience macro for a def */
769 #define border_def(irn, step, real) \
770 border_add(env, head, irn, step, pressure--, 1, real)
772 /* Convenience macro for a use */
773 #define border_use(irn, step, real) \
774 border_add(env, head, irn, step, ++pressure, 0, real)
776 be_chordal_alloc_env_t *alloc_env = env_ptr;
777 be_chordal_env_t *env = alloc_env->chordal_env;
778 bitset_t *live = alloc_env->live;
780 be_lv_t *lv = env->birg->lv;
785 unsigned pressure = 0;
786 struct list_head *head;
788 DBG((dbg, LEVEL_1, "Computing pressure in block %+F\n", block));
789 bitset_clear_all(live);
791 /* Set up the border list in the block info */
792 head = obstack_alloc(env->obst, sizeof(*head));
793 INIT_LIST_HEAD(head);
794 assert(pmap_get(env->border_heads, block) == NULL);
795 pmap_insert(env->border_heads, block, head);
798 * Make final uses of all values live out of the block.
799 * They are necessary to build up real intervals.
801 be_lv_foreach(lv, block, be_lv_state_end, i) {
802 ir_node *irn = be_lv_get_irn(lv, block, i);
803 if(has_reg_class(env, irn)) {
804 DBG((dbg, LEVEL_3, "\tMaking live: %+F/%d\n", irn, get_irn_idx(irn)));
805 bitset_set(live, get_irn_idx(irn));
806 border_use(irn, step, 0);
812 * Determine the last uses of a value inside the block, since they are
813 * relevant for the interval borders.
815 sched_foreach_reverse(block, irn) {
816 DBG((dbg, LEVEL_1, "\tinsn: %+F, pressure: %d\n", irn, pressure));
817 DBG((dbg, LEVEL_2, "\tlive: %B\n", live));
819 if (get_irn_mode(irn) == mode_T) {
820 const ir_edge_t *edge;
822 foreach_out_edge(irn, edge) {
823 ir_node *proj = get_edge_src_irn(edge);
826 * If the node defines some value, which can put into a
827 * register of the current class, make a border for it.
829 if(has_reg_class(env, proj)) {
830 int nr = get_irn_idx(proj);
832 bitset_clear(live, nr);
833 border_def(proj, step, 1);
839 * If the node defines some value, which can put into a
840 * register of the current class, make a border for it.
842 if(has_reg_class(env, irn)) {
843 int nr = get_irn_idx(irn);
845 bitset_clear(live, nr);
846 border_def(irn, step, 1);
850 * If the node is no phi node we can examine the uses.
853 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
854 ir_node *op = get_irn_n(irn, i);
856 if(has_reg_class(env, op)) {
857 int nr = get_irn_idx(op);
858 const char *msg = "-";
860 if(!bitset_is_set(live, nr)) {
861 border_use(op, step, 1);
862 bitset_set(live, nr);
866 DBG((dbg, LEVEL_4, "\t\t%s pos: %d, use: %+F\n", msg, i, op));
873 bitset_foreach(live, elm) {
874 ir_node *irn = get_idx_irn(env->irg, elm);
875 if (be_is_live_in(lv, block, irn))
876 border_def(irn, step, 0);
880 static void assign(ir_node *block, void *env_ptr)
882 be_chordal_alloc_env_t *alloc_env = env_ptr;
883 be_chordal_env_t *env = alloc_env->chordal_env;
884 bitset_t *live = alloc_env->live;
885 bitset_t *colors = alloc_env->colors;
886 bitset_t *in_colors = alloc_env->in_colors;
887 const arch_env_t *arch_env = env->birg->main_env->arch_env;
888 struct list_head *head = get_block_border_head(env, block);
889 be_lv_t *lv = env->birg->lv;
895 bitset_clear_all(colors);
896 bitset_clear_all(live);
897 bitset_clear_all(in_colors);
899 DBG((dbg, LEVEL_4, "Assigning colors for block %+F\n", block));
900 DBG((dbg, LEVEL_4, "\tusedef chain for block\n"));
901 list_for_each_entry(border_t, b, head, list) {
902 DBG((dbg, LEVEL_4, "\t%s %+F/%d\n", b->is_def ? "def" : "use",
903 b->irn, get_irn_idx(b->irn)));
907 * Add initial defs for all values live in.
908 * Since their colors have already been assigned (The dominators were
909 * allocated before), we have to mark their colors as used also.
911 be_lv_foreach(lv, block, be_lv_state_in, idx) {
912 irn = be_lv_get_irn(lv, block, idx);
913 if(has_reg_class(env, irn)) {
914 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
917 assert(reg && "Node must have been assigned a register");
918 col = arch_register_get_index(reg);
920 DBG((dbg, LEVEL_4, "%+F has reg %s\n", irn, reg->name));
922 /* Mark the color of the live in value as used. */
923 bitset_set(colors, col);
924 bitset_set(in_colors, col);
926 /* Mark the value live in. */
927 bitset_set(live, get_irn_idx(irn));
932 * Mind that the sequence of defs from back to front defines a perfect
933 * elimination order. So, coloring the definitions from first to last
936 list_for_each_entry_reverse(border_t, b, head, list) {
937 ir_node *irn = b->irn;
938 int nr = get_irn_idx(irn);
939 int ignore = arch_irn_is(arch_env, irn, ignore);
942 * Assign a color, if it is a local def. Global defs already have a
945 if(b->is_def && !be_is_live_in(lv, block, irn)) {
946 const arch_register_t *reg;
949 if(ignore || pset_find_ptr(alloc_env->pre_colored, irn)) {
950 reg = arch_get_irn_register(arch_env, irn);
952 assert(!bitset_is_set(colors, col) && "pre-colored register must be free");
954 col = get_next_free_reg(alloc_env, colors);
955 reg = arch_register_for_index(env->cls, col);
956 assert(arch_get_irn_register(arch_env, irn) == NULL && "This node must not have been assigned a register yet");
957 assert(!arch_register_type_is(reg, ignore) && "Must not assign ignore register");
960 bitset_set(colors, col);
961 arch_set_irn_register(arch_env, irn, reg);
963 DBG((dbg, LEVEL_1, "\tassigning register %s(%d) to %+F\n", arch_register_get_name(reg), col, irn));
965 assert(!bitset_is_set(live, nr) && "Value's definition must not have been encountered");
966 bitset_set(live, nr);
969 /* Clear the color upon a use. */
970 else if(!b->is_def) {
971 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
974 assert(reg && "Register must have been assigned");
976 col = arch_register_get_index(reg);
978 if(!arch_register_type_is(reg, ignore)) {
979 assert(bitset_is_set(live, nr) && "Cannot have a non live use");
983 bitset_clear(colors, col);
984 bitset_clear(live, nr);
989 void be_ra_chordal_color(be_chordal_env_t *chordal_env)
991 be_chordal_alloc_env_t env;
994 be_irg_t *birg = chordal_env->birg;
995 const arch_register_class_t *cls = chordal_env->cls;
997 int colors_n = arch_register_class_n_regs(cls);
998 ir_graph *irg = chordal_env->irg;
1000 lv = be_assure_liveness(birg);
1001 be_liveness_assure_sets(lv);
1002 be_liveness_assure_chk(lv);
1006 env.chordal_env = chordal_env;
1007 env.colors_n = colors_n;
1008 env.colors = bitset_alloca(colors_n);
1009 env.tmp_colors = bitset_alloca(colors_n);
1010 env.in_colors = bitset_alloca(colors_n);
1011 env.pre_colored = pset_new_ptr_default();
1013 BE_TIMER_PUSH(t_constr);
1015 /* Handle register targeting constraints */
1016 dom_tree_walk_irg(irg, constraints, NULL, &env);
1018 if(chordal_env->opts->dump_flags & BE_CH_DUMP_CONSTR) {
1019 snprintf(buf, sizeof(buf), "-%s-constr", chordal_env->cls->name);
1020 be_dump(chordal_env->irg, buf, dump_ir_block_graph_sched);
1023 BE_TIMER_POP(t_constr);
1025 env.live = bitset_malloc(get_irg_last_idx(chordal_env->irg));
1027 /* First, determine the pressure */
1028 dom_tree_walk_irg(irg, pressure, NULL, &env);
1030 /* Assign the colors */
1031 dom_tree_walk_irg(irg, assign, NULL, &env);
1033 if(chordal_env->opts->dump_flags & BE_CH_DUMP_TREE_INTV) {
1035 ir_snprintf(buf, sizeof(buf), "ifg_%s_%F.eps", chordal_env->cls->name, irg);
1036 plotter = new_plotter_ps(buf);
1037 draw_interval_tree(&draw_chordal_def_opts, chordal_env, plotter);
1038 plotter_free(plotter);
1041 bitset_free(env.live);
1042 del_pset(env.pre_colored);
1045 void be_init_chordal(void)
1047 FIRM_DBG_REGISTER(dbg, "firm.be.chordal");
1050 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_chordal);