2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Chordal register allocation.
23 * @author Sebastian Hack
37 #include "raw_bitset.h"
39 #include "bipartite.h"
40 #include "hungarian.h"
43 #include "irgraph_t.h"
44 #include "irprintf_t.h"
56 #include "besched_t.h"
63 #include "bestatevent.h"
65 #include "beintlive_t.h"
67 #include "bechordal_t.h"
68 #include "bechordal_draw.h"
71 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
75 #define DUMP_INTERVALS
77 typedef struct _be_chordal_alloc_env_t {
78 be_chordal_env_t *chordal_env;
80 pset *pre_colored; /**< Set of precolored nodes. */
81 bitset_t *live; /**< A liveness bitset. */
82 bitset_t *tmp_colors; /**< An auxiliary bitset which is as long as the number of colors in the class. */
83 bitset_t *colors; /**< The color mask. */
84 bitset_t *in_colors; /**< Colors used by live in values. */
85 int colors_n; /**< The number of colors. */
86 } be_chordal_alloc_env_t;
90 /* Make a fourcc for border checking. */
91 #define BORDER_FOURCC FOURCC('B', 'O', 'R', 'D')
94 static void check_border_list(struct list_head *head)
97 list_for_each_entry(border_t, x, head, list) {
98 assert(x->magic == BORDER_FOURCC);
102 static void check_heads(be_chordal_env_t *env)
105 for(ent = pmap_first(env->border_heads); ent; ent = pmap_next(env->border_heads)) {
106 /* ir_printf("checking border list of block %+F\n", ent->key); */
107 check_border_list(ent->value);
113 * Add an interval border to the list of a block's list
114 * of interval border.
115 * @note You always have to create the use before the def.
116 * @param env The environment.
117 * @param head The list head to enqueue the borders.
118 * @param irn The node (value) the border belongs to.
119 * @param pressure The pressure at this point in time.
120 * @param step A time step for the border.
121 * @param is_def Is the border a use or a def.
122 * @return The created border.
124 static INLINE border_t *border_add(be_chordal_env_t *env, struct list_head *head,
125 ir_node *irn, unsigned step, unsigned pressure,
126 unsigned is_def, unsigned is_real)
133 b = obstack_alloc(env->obst, sizeof(*b));
135 /* also allocate the def and tie it to the use. */
136 def = obstack_alloc(env->obst, sizeof(*def));
137 memset(def, 0, sizeof(*def));
142 * Set the link field of the irn to the def.
143 * This strongly relies on the fact, that the use is always
144 * made before the def.
146 set_irn_link(irn, def);
148 DEBUG_ONLY(b->magic = BORDER_FOURCC);
149 DEBUG_ONLY(def->magic = BORDER_FOURCC);
153 * If the def is encountered, the use was made and so was the
154 * the def node (see the code above). It was placed into the
155 * link field of the irn, so we can get it there.
158 b = get_irn_link(irn);
160 DEBUG_ONLY(assert(b && b->magic == BORDER_FOURCC && "Illegal border encountered"));
163 b->pressure = pressure;
165 b->is_real = is_real;
168 list_add_tail(&b->list, head);
169 DBG((dbg, LEVEL_5, "\t\t%s adding %+F, step: %d\n", is_def ? "def" : "use", irn, step));
176 * Check, if an irn is of the register class currently under processing.
177 * @param env The chordal environment.
178 * @param irn The node.
179 * @return 1, if the node is of that register class, 0 if not.
181 static INLINE int has_reg_class(const be_chordal_env_t *env, const ir_node *irn)
183 return arch_irn_consider_in_reg_alloc(env->birg->main_env->arch_env, env->cls, irn);
186 #define has_limited_constr(req, irn) \
187 (arch_get_register_req(arch_env, (req), irn, -1) && (req)->type == arch_register_req_type_limited)
189 static int get_next_free_reg(const be_chordal_alloc_env_t *alloc_env, bitset_t *colors)
191 bitset_t *tmp = alloc_env->tmp_colors;
192 bitset_copy(tmp, colors);
193 bitset_or(tmp, alloc_env->chordal_env->ignore_colors);
194 return bitset_next_clear(tmp, 0);
197 static bitset_t *get_decisive_partner_regs(bitset_t *bs, const be_operand_t *o1, const be_operand_t *o2)
202 bitset_copy(bs, o2->regs);
207 bitset_copy(bs, o1->regs);
211 assert(o1->req->cls == o2->req->cls || ! o1->req->cls || ! o2->req->cls);
213 if(bitset_contains(o1->regs, o2->regs))
214 bitset_copy(bs, o1->regs);
215 else if(bitset_contains(o2->regs, o1->regs))
216 bitset_copy(bs, o2->regs);
223 static be_insn_t *chordal_scan_insn(be_chordal_env_t *env, ir_node *irn)
227 ie.ignore_colors = env->ignore_colors;
228 ie.aenv = env->birg->main_env->arch_env;
231 return be_scan_insn(&ie, irn);
234 static ir_node *prepare_constr_insn(be_chordal_env_t *env, ir_node *irn)
236 const be_irg_t *birg = env->birg;
237 const arch_env_t *aenv = birg->main_env->arch_env;
238 bitset_t *tmp = bitset_alloca(env->cls->n_regs);
239 bitset_t *def_constr = bitset_alloca(env->cls->n_regs);
240 ir_node *bl = get_nodes_block(irn);
241 be_lv_t *lv = env->birg->lv;
246 for (i = get_irn_arity(irn) - 1; i >= 0; --i) {
247 ir_node *op = get_irn_n(irn, i);
249 const arch_register_t *reg;
250 const arch_register_req_t *req;
252 if (arch_get_irn_reg_class(aenv, irn, i) != env->cls)
255 reg = arch_get_irn_register(aenv, op);
257 if (reg == NULL || !arch_register_type_is(reg, ignore))
259 if(arch_register_type_is(reg, joker))
262 req = arch_get_register_req(aenv, irn, i);
263 if (!arch_register_req_is(req, limited))
266 if (rbitset_is_set(req->limited, reg->index))
269 copy = be_new_Copy(env->cls, env->irg, bl, op);
270 be_stat_ev("constr_copy", 1);
272 sched_add_before(irn, copy);
273 set_irn_n(irn, i, copy);
274 DBG((dbg, LEVEL_3, "inserting ignore arg copy %+F for %+F pos %d\n", copy, irn, i));
277 insn = chordal_scan_insn(env, irn);
279 if(!insn->has_constraints)
282 /* insert copies for nodes that occur constrained more than once. */
283 for(i = insn->use_start; i < insn->n_ops; ++i) {
284 be_operand_t *op = &insn->ops[i];
286 if(!op->has_constraints)
289 for(j = i + 1; j < insn->n_ops; ++j) {
291 be_operand_t *a_op = &insn->ops[j];
293 if(a_op->carrier != op->carrier || !a_op->has_constraints)
296 /* if the constraint is the same, no copy is necessary
297 * TODO generalise unequal but overlapping constraints */
298 if (a_op->req == op->req)
301 if (be_is_Copy(get_irn_n(insn->irn, a_op->pos)))
304 copy = be_new_Copy(env->cls, env->irg, bl, op->carrier);
305 be_stat_ev("constr_copy", 1);
307 sched_add_before(insn->irn, copy);
308 set_irn_n(insn->irn, a_op->pos, copy);
309 DBG((dbg, LEVEL_3, "inserting multiple constr copy %+F for %+F pos %d\n", copy, insn->irn, a_op->pos));
313 /* collect all registers occuring in out constraints. */
314 for(i = 0; i < insn->use_start; ++i) {
315 be_operand_t *op = &insn->ops[i];
316 if(op->has_constraints)
317 bitset_or(def_constr, op->regs);
321 insert copies for all constrained arguments living through the node
322 and being constrained to a register which also occurs in out constraints.
324 for(i = insn->use_start; i < insn->n_ops; ++i) {
326 be_operand_t *op = &insn->ops[i];
328 bitset_copy(tmp, op->regs);
329 bitset_and(tmp, def_constr);
333 1) the operand is constrained.
334 2) lives through the node.
335 3) is constrained to a register occuring in out constraints.
337 if(!op->has_constraints ||
338 !values_interfere(birg, insn->irn, op->carrier) ||
339 bitset_popcnt(tmp) == 0)
343 only create the copy if the operand is no copy.
344 this is necessary since the assure constraints phase inserts
345 Copies and Keeps for operands which must be different from the
346 results. Additional copies here would destroy this.
348 if (be_is_Copy(get_irn_n(insn->irn, op->pos)))
351 copy = be_new_Copy(env->cls, env->irg, bl, op->carrier);
353 sched_add_before(insn->irn, copy);
354 set_irn_n(insn->irn, op->pos, copy);
355 DBG((dbg, LEVEL_3, "inserting constr copy %+F for %+F pos %d\n", copy, insn->irn, op->pos));
356 be_liveness_update(lv, op->carrier);
360 obstack_free(env->obst, insn);
361 return insn->next_insn;
364 static void pre_spill_prepare_constr_walker(ir_node *bl, void *data)
366 be_chordal_env_t *env = data;
368 for(irn = sched_first(bl); !sched_is_end(irn);) {
369 irn = prepare_constr_insn(env, irn);
373 void be_pre_spill_prepare_constr(be_chordal_env_t *cenv) {
374 irg_block_walk_graph(cenv->irg, pre_spill_prepare_constr_walker, NULL, (void *) cenv);
377 static void pair_up_operands(const be_chordal_alloc_env_t *alloc_env, be_insn_t *insn)
379 const be_chordal_env_t *env = alloc_env->chordal_env;
381 int n_uses = be_insn_n_uses(insn);
382 int n_defs = be_insn_n_defs(insn);
383 bitset_t *bs = bitset_alloca(env->cls->n_regs);
384 int *pairing = alloca(MAX(n_defs, n_uses) * sizeof(pairing[0]));
389 For each out operand, try to find an in operand which can be assigned the
390 same register as the out operand.
392 for (j = 0; j < insn->use_start; ++j) {
394 int smallest_n_regs = 2 * env->cls->n_regs + 1;
395 be_operand_t *out_op = &insn->ops[j];
397 /* Try to find an in operand which has ... */
398 for(i = insn->use_start; i < insn->n_ops; ++i) {
400 const be_operand_t *op = &insn->ops[i];
402 if (op->partner != NULL)
404 if (values_interfere(env->birg, op->irn, op->carrier))
407 bitset_clear_all(bs);
408 bitset_copy(bs, op->regs);
409 bitset_and(bs, out_op->regs);
410 n_total = bitset_popcnt(op->regs) + bitset_popcnt(out_op->regs);
412 if (bitset_popcnt(bs) > 0 && n_total < smallest_n_regs) {
414 smallest_n_regs = n_total;
419 be_operand_t *partner = &insn->ops[smallest];
420 for(i = insn->use_start; i < insn->n_ops; ++i) {
421 if(insn->ops[i].carrier == partner->carrier)
422 insn->ops[i].partner = out_op;
425 out_op->partner = partner;
426 partner->partner = out_op;
432 static ir_node *pre_process_constraints(be_chordal_alloc_env_t *alloc_env,
433 be_insn_t **the_insn)
435 be_chordal_env_t *env = alloc_env->chordal_env;
436 const arch_env_t *aenv = env->birg->main_env->arch_env;
437 be_insn_t *insn = *the_insn;
438 ir_node *perm = NULL;
439 bitset_t *out_constr = bitset_alloca(env->cls->n_regs);
440 const ir_edge_t *edge;
443 assert(insn->has_constraints && "only do this for constrained nodes");
446 Collect all registers that occur in output constraints.
447 This is necessary, since if the insn has one of these as an input constraint
448 and the corresponding operand interferes with the insn, the operand must
451 for(i = 0; i < insn->use_start; ++i) {
452 be_operand_t *op = &insn->ops[i];
453 if(op->has_constraints)
454 bitset_or(out_constr, op->regs);
458 Make the Perm, recompute liveness and re-scan the insn since the
459 in operands are now the Projs of the Perm.
461 perm = insert_Perm_after(env->birg, env->cls, sched_prev(insn->irn));
463 /* Registers are propagated by insert_Perm_after(). Clean them here! */
467 be_stat_ev("constr_perm", get_irn_arity(perm));
468 foreach_out_edge(perm, edge) {
469 ir_node *proj = get_edge_src_irn(edge);
470 arch_set_irn_register(aenv, proj, NULL);
474 We also have to re-build the insn since the input operands are now the Projs of
475 the Perm. Recomputing liveness is also a good idea if a Perm is inserted, since
476 the live sets may change.
478 obstack_free(env->obst, insn);
479 *the_insn = insn = chordal_scan_insn(env, insn->irn);
482 Copy the input constraints of the insn to the Perm as output
483 constraints. Succeeding phases (coalescing) will need that.
485 for(i = insn->use_start; i < insn->n_ops; ++i) {
486 be_operand_t *op = &insn->ops[i];
487 ir_node *proj = op->carrier;
489 Note that the predecessor must not be a Proj of the Perm,
490 since ignore-nodes are not Perm'ed.
492 if(op->has_constraints && is_Proj(proj) && get_Proj_pred(proj) == perm) {
493 be_set_constr_limited(perm, BE_OUT_POS(get_Proj_proj(proj)), op->req);
500 static ir_node *handle_constraints(be_chordal_alloc_env_t *alloc_env,
501 ir_node *irn, int *silent)
503 const arch_env_t *aenv;
506 ir_node **alloc_nodes;
507 //hungarian_problem_t *bp;
512 const ir_edge_t *edge;
513 ir_node *perm = NULL;
514 //int match_res, cost;
515 be_chordal_env_t *env = alloc_env->chordal_env;
516 void *base = obstack_base(env->obst);
517 be_insn_t *insn = chordal_scan_insn(env, irn);
518 ir_node *res = insn->next_insn;
519 int be_silent = *silent;
520 be_irg_t *birg = env->birg;
523 if(insn->pre_colored) {
525 for(i = 0; i < insn->use_start; ++i)
526 pset_insert_ptr(alloc_env->pre_colored, insn->ops[i].carrier);
530 If the current node is a barrier toggle the silent flag.
531 If we are in the start block, we are ought to be silent at the beginning,
532 so the toggling activates the constraint handling but skips the barrier.
533 If we are in the end block we handle the in requirements of the barrier
534 and set the rest to silent.
536 if(be_is_Barrier(irn))
543 Perms inserted before the constraint handling phase are considered to be
544 correctly precolored. These Perms arise during the ABI handling phase.
546 if(!insn->has_constraints)
549 aenv = env->birg->main_env->arch_env;
550 n_regs = env->cls->n_regs;
551 bs = bitset_alloca(n_regs);
552 alloc_nodes = alloca(n_regs * sizeof(alloc_nodes[0]));
553 //bp = hungarian_new(n_regs, n_regs, 2, HUNGARIAN_MATCH_PERFECT);
554 bp = bipartite_new(n_regs, n_regs);
555 assignment = alloca(n_regs * sizeof(assignment[0]));
556 partners = pmap_create();
559 prepare the constraint handling of this node.
560 Perms are constructed and Copies are created for constrained values
561 interfering with the instruction.
563 perm = pre_process_constraints(alloc_env, &insn);
565 /* find suitable in operands to the out operands of the node. */
566 pair_up_operands(alloc_env, insn);
569 look at the in/out operands and add each operand (and its possible partner)
570 to a bipartite graph (left: nodes with partners, right: admissible colors).
572 for(i = 0, n_alloc = 0; i < insn->n_ops; ++i) {
573 be_operand_t *op = &insn->ops[i];
576 If the operand has no partner or the partner has not been marked
577 for allocation, determine the admissible registers and mark it
578 for allocation by associating the node and its partner with the
579 set of admissible registers via a bipartite graph.
581 if(!op->partner || !pmap_contains(partners, op->partner->carrier)) {
582 ir_node *partner = op->partner ? op->partner->carrier : NULL;
585 pmap_insert(partners, op->carrier, partner);
587 pmap_insert(partners, partner, op->carrier);
589 /* don't insert a node twice */
590 for(i = 0; i < n_alloc; ++i) {
591 if(alloc_nodes[i] == op->carrier) {
598 alloc_nodes[n_alloc] = op->carrier;
600 DBG((dbg, LEVEL_2, "\tassociating %+F and %+F\n", op->carrier,
603 bitset_clear_all(bs);
604 get_decisive_partner_regs(bs, op, op->partner);
606 DBG((dbg, LEVEL_2, "\tallowed registers for %+F: %B\n", op->carrier,
609 bitset_foreach(bs, col) {
610 //hungarian_add(bp, n_alloc, col, 1);
611 bipartite_add(bp, n_alloc, col);
619 Put all nodes which live through the constrained instruction also to the
620 allocation bipartite graph. They are considered unconstrained.
623 foreach_out_edge(perm, edge) {
625 ir_node *proj = get_edge_src_irn(edge);
627 assert(is_Proj(proj));
629 if(!values_interfere(birg, proj, irn) || pmap_contains(partners, proj))
632 /* don't insert a node twice */
633 for(i = 0; i < n_alloc; ++i) {
634 if(alloc_nodes[i] == proj) {
642 assert(n_alloc < n_regs);
644 alloc_nodes[n_alloc] = proj;
645 pmap_insert(partners, proj, NULL);
647 bitset_clear_all(bs);
648 arch_put_non_ignore_regs(aenv, env->cls, bs);
649 bitset_andnot(bs, env->ignore_colors);
650 bitset_foreach(bs, col) {
651 //hungarian_add(bp, n_alloc, col, 1);
652 bipartite_add(bp, n_alloc, col);
659 /* Compute a valid register allocation. */
661 hungarian_prepare_cost_matrix(bp, HUNGARIAN_MODE_MAXIMIZE_UTIL);
662 match_res = hungarian_solve(bp, assignment, &cost, 1);
663 assert(match_res == 0 && "matching failed");
665 bipartite_matching(bp, assignment);
668 /* Assign colors obtained from the matching. */
669 for(i = 0; i < n_alloc; ++i) {
670 const arch_register_t *reg;
673 assert(assignment[i] >= 0 && "there must have been a register assigned");
674 reg = arch_register_for_index(env->cls, assignment[i]);
675 assert(! (reg->type & arch_register_type_ignore));
677 irn = alloc_nodes[i];
679 arch_set_irn_register(aenv, irn, reg);
680 (void) pset_hinsert_ptr(alloc_env->pre_colored, irn);
681 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", irn, reg->name));
684 irn = pmap_get(partners, alloc_nodes[i]);
686 arch_set_irn_register(aenv, irn, reg);
687 (void) pset_hinsert_ptr(alloc_env->pre_colored, irn);
688 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", irn, reg->name));
692 /* Allocate the non-constrained Projs of the Perm. */
694 bitset_clear_all(bs);
696 /* Put the colors of all Projs in a bitset. */
697 foreach_out_edge(perm, edge) {
698 ir_node *proj = get_edge_src_irn(edge);
699 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
702 bitset_set(bs, reg->index);
705 /* Assign the not yet assigned Projs of the Perm a suitable color. */
706 foreach_out_edge(perm, edge) {
707 ir_node *proj = get_edge_src_irn(edge);
708 const arch_register_t *reg = arch_get_irn_register(aenv, proj);
710 DBG((dbg, LEVEL_2, "\tchecking reg of %+F: %s\n", proj, reg ? reg->name : "<none>"));
713 col = get_next_free_reg(alloc_env, bs);
714 reg = arch_register_for_index(env->cls, col);
715 bitset_set(bs, reg->index);
716 arch_set_irn_register(aenv, proj, reg);
717 pset_insert_ptr(alloc_env->pre_colored, proj);
718 DBG((dbg, LEVEL_2, "\tsetting %+F to register %s\n", proj, reg->name));
724 //hungarian_free(bp);
725 pmap_destroy(partners);
728 obstack_free(env->obst, base);
733 * Handle constraint nodes in each basic block.
734 * handle_constraints() inserts Perm nodes which perm
735 * over all values live at the constrained node right in front
736 * of the constrained node. These Perms signal a constrained node.
737 * For further comments, refer to handle_constraints().
739 static void constraints(ir_node *bl, void *data)
741 be_chordal_alloc_env_t *env = data;
744 Start silent in the start block.
745 The silence remains until the first barrier is seen.
746 Each other block is begun loud.
748 int silent = bl == get_irg_start_block(get_irn_irg(bl));
752 If the block is the start block search the barrier and
753 start handling constraints from there.
756 for(irn = sched_first(bl); !sched_is_end(irn);) {
757 irn = handle_constraints(env, irn, &silent);
762 * Annotate the register pressure to the nodes and compute
763 * the liveness intervals.
764 * @param block The block to do it for.
765 * @param env_ptr The environment.
767 static void pressure(ir_node *block, void *env_ptr)
769 /* Convenience macro for a def */
770 #define border_def(irn, step, real) \
771 border_add(env, head, irn, step, pressure--, 1, real)
773 /* Convenience macro for a use */
774 #define border_use(irn, step, real) \
775 border_add(env, head, irn, step, ++pressure, 0, real)
777 be_chordal_alloc_env_t *alloc_env = env_ptr;
778 be_chordal_env_t *env = alloc_env->chordal_env;
779 bitset_t *live = alloc_env->live;
781 be_lv_t *lv = env->birg->lv;
786 unsigned pressure = 0;
787 struct list_head *head;
789 DBG((dbg, LEVEL_1, "Computing pressure in block %+F\n", block));
790 bitset_clear_all(live);
792 /* Set up the border list in the block info */
793 head = obstack_alloc(env->obst, sizeof(*head));
794 INIT_LIST_HEAD(head);
795 assert(pmap_get(env->border_heads, block) == NULL);
796 pmap_insert(env->border_heads, block, head);
799 * Make final uses of all values live out of the block.
800 * They are necessary to build up real intervals.
802 be_lv_foreach(lv, block, be_lv_state_end, i) {
803 ir_node *irn = be_lv_get_irn(lv, block, i);
804 if(has_reg_class(env, irn)) {
805 DBG((dbg, LEVEL_3, "\tMaking live: %+F/%d\n", irn, get_irn_idx(irn)));
806 bitset_set(live, get_irn_idx(irn));
807 border_use(irn, step, 0);
813 * Determine the last uses of a value inside the block, since they are
814 * relevant for the interval borders.
816 sched_foreach_reverse(block, irn) {
817 DBG((dbg, LEVEL_1, "\tinsn: %+F, pressure: %d\n", irn, pressure));
818 DBG((dbg, LEVEL_2, "\tlive: %B\n", live));
820 if (get_irn_mode(irn) == mode_T) {
821 const ir_edge_t *edge;
823 foreach_out_edge(irn, edge) {
824 ir_node *proj = get_edge_src_irn(edge);
827 * If the node defines some value, which can put into a
828 * register of the current class, make a border for it.
830 if(has_reg_class(env, proj)) {
831 int nr = get_irn_idx(proj);
833 bitset_clear(live, nr);
834 border_def(proj, step, 1);
840 * If the node defines some value, which can put into a
841 * register of the current class, make a border for it.
843 if(has_reg_class(env, irn)) {
844 int nr = get_irn_idx(irn);
846 bitset_clear(live, nr);
847 border_def(irn, step, 1);
851 * If the node is no phi node we can examine the uses.
854 for(i = 0, n = get_irn_arity(irn); i < n; ++i) {
855 ir_node *op = get_irn_n(irn, i);
857 if(has_reg_class(env, op)) {
858 int nr = get_irn_idx(op);
859 const char *msg = "-";
861 if(!bitset_is_set(live, nr)) {
862 border_use(op, step, 1);
863 bitset_set(live, nr);
867 DBG((dbg, LEVEL_4, "\t\t%s pos: %d, use: %+F\n", msg, i, op));
874 bitset_foreach(live, elm) {
875 ir_node *irn = get_idx_irn(env->irg, elm);
876 if (be_is_live_in(lv, block, irn))
877 border_def(irn, step, 0);
881 static void assign(ir_node *block, void *env_ptr)
883 be_chordal_alloc_env_t *alloc_env = env_ptr;
884 be_chordal_env_t *env = alloc_env->chordal_env;
885 bitset_t *live = alloc_env->live;
886 bitset_t *colors = alloc_env->colors;
887 bitset_t *in_colors = alloc_env->in_colors;
888 const arch_env_t *arch_env = env->birg->main_env->arch_env;
889 struct list_head *head = get_block_border_head(env, block);
890 be_lv_t *lv = env->birg->lv;
896 bitset_clear_all(colors);
897 bitset_clear_all(live);
898 bitset_clear_all(in_colors);
900 DBG((dbg, LEVEL_4, "Assigning colors for block %+F\n", block));
901 DBG((dbg, LEVEL_4, "\tusedef chain for block\n"));
902 list_for_each_entry(border_t, b, head, list) {
903 DBG((dbg, LEVEL_4, "\t%s %+F/%d\n", b->is_def ? "def" : "use",
904 b->irn, get_irn_idx(b->irn)));
908 * Add initial defs for all values live in.
909 * Since their colors have already been assigned (The dominators were
910 * allocated before), we have to mark their colors as used also.
912 be_lv_foreach(lv, block, be_lv_state_in, idx) {
913 irn = be_lv_get_irn(lv, block, idx);
914 if(has_reg_class(env, irn)) {
915 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
918 assert(reg && "Node must have been assigned a register");
919 col = arch_register_get_index(reg);
921 DBG((dbg, LEVEL_4, "%+F has reg %s\n", irn, reg->name));
923 /* Mark the color of the live in value as used. */
924 bitset_set(colors, col);
925 bitset_set(in_colors, col);
927 /* Mark the value live in. */
928 bitset_set(live, get_irn_idx(irn));
933 * Mind that the sequence of defs from back to front defines a perfect
934 * elimination order. So, coloring the definitions from first to last
937 list_for_each_entry_reverse(border_t, b, head, list) {
938 ir_node *irn = b->irn;
939 int nr = get_irn_idx(irn);
940 int ignore = arch_irn_is(arch_env, irn, ignore);
943 * Assign a color, if it is a local def. Global defs already have a
946 if(b->is_def && !be_is_live_in(lv, block, irn)) {
947 const arch_register_t *reg;
950 if(ignore || pset_find_ptr(alloc_env->pre_colored, irn)) {
951 reg = arch_get_irn_register(arch_env, irn);
953 assert(!bitset_is_set(colors, col) && "pre-colored register must be free");
955 col = get_next_free_reg(alloc_env, colors);
956 reg = arch_register_for_index(env->cls, col);
957 assert(arch_get_irn_register(arch_env, irn) == NULL && "This node must not have been assigned a register yet");
958 assert(!arch_register_type_is(reg, ignore) && "Must not assign ignore register");
961 bitset_set(colors, col);
962 arch_set_irn_register(arch_env, irn, reg);
964 DBG((dbg, LEVEL_1, "\tassigning register %s(%d) to %+F\n", arch_register_get_name(reg), col, irn));
966 assert(!bitset_is_set(live, nr) && "Value's definition must not have been encountered");
967 bitset_set(live, nr);
970 /* Clear the color upon a use. */
971 else if(!b->is_def) {
972 const arch_register_t *reg = arch_get_irn_register(arch_env, irn);
975 assert(reg && "Register must have been assigned");
977 col = arch_register_get_index(reg);
979 if(!arch_register_type_is(reg, ignore)) {
980 assert(bitset_is_set(live, nr) && "Cannot have a non live use");
984 bitset_clear(colors, col);
985 bitset_clear(live, nr);
990 void be_ra_chordal_color(be_chordal_env_t *chordal_env)
992 be_chordal_alloc_env_t env;
995 be_irg_t *birg = chordal_env->birg;
996 const arch_register_class_t *cls = chordal_env->cls;
998 int colors_n = arch_register_class_n_regs(cls);
999 ir_graph *irg = chordal_env->irg;
1001 be_assure_dom_front(birg);
1002 lv = be_assure_liveness(birg);
1003 be_liveness_assure_sets(lv);
1004 be_liveness_assure_chk(lv);
1008 env.chordal_env = chordal_env;
1009 env.colors_n = colors_n;
1010 env.colors = bitset_alloca(colors_n);
1011 env.tmp_colors = bitset_alloca(colors_n);
1012 env.in_colors = bitset_alloca(colors_n);
1013 env.pre_colored = pset_new_ptr_default();
1015 /* Handle register targeting constraints */
1016 dom_tree_walk_irg(irg, constraints, NULL, &env);
1018 if(chordal_env->opts->dump_flags & BE_CH_DUMP_CONSTR) {
1019 snprintf(buf, sizeof(buf), "-%s-constr", chordal_env->cls->name);
1020 be_dump(chordal_env->irg, buf, dump_ir_block_graph_sched);
1023 env.live = bitset_malloc(get_irg_last_idx(chordal_env->irg));
1025 /* First, determine the pressure */
1026 dom_tree_walk_irg(irg, pressure, NULL, &env);
1028 /* Assign the colors */
1029 dom_tree_walk_irg(irg, assign, NULL, &env);
1031 if(chordal_env->opts->dump_flags & BE_CH_DUMP_TREE_INTV) {
1033 ir_snprintf(buf, sizeof(buf), "ifg_%s_%F.eps", chordal_env->cls->name, irg);
1034 plotter = new_plotter_ps(buf);
1035 draw_interval_tree(&draw_chordal_def_opts, chordal_env, plotter);
1036 plotter_free(plotter);
1039 bitset_free(env.live);
1040 del_pset(env.pre_colored);
1043 void be_init_chordal(void)
1045 FIRM_DBG_REGISTER(dbg, "firm.be.chordal");
1048 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_chordal);