2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Simple copy minimization heuristics.
23 * @author Christian Wuerdig
27 * This is the C implementation of the mst algorithm
28 * originally written in Java by Sebastian Hack.
29 * (also known as "heur3" :)
30 * Performs simple copy minimization.
34 #endif /* HAVE_CONFIG_H */
41 #include "raw_bitset.h"
42 #include "irphase_t.h"
55 #include "becopyopt_t.h"
59 #define COL_COST_INFEASIBLE DBL_MAX
60 #define AFF_NEIGHBOUR_FIX_BENEFIT 128.0
61 #define NEIGHBOUR_CONSTR_COSTS 64.0
63 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
67 #define DBG_AFF_CHUNK(env, level, chunk)
68 #define DBG_COL_COST(env, level, cost)
72 #define DBG_AFF_CHUNK(env, level, chunk) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_aff_chunk((env), (chunk)); } while(0)
73 #define DBG_COL_COST(env, level, cost) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_col_cost((env), (cost)); } while(0)
77 static int last_chunk_id = 0;
79 typedef struct _col_cost_t {
87 typedef struct _aff_chunk_t {
88 ir_node **n; /**< An ARR_F containing all nodes of the chunk. */
89 bitset_t *nodes; /**< A bitset containing all nodes inside this chunk. */
90 bitset_t *interfere; /**< A bitset containing all interfering neighbours of the nodes in this chunk. */
91 int weight; /**< Weight of this chunk */
92 unsigned weight_consistent : 1; /**< Set if the weight is consistent. */
93 unsigned deleted : 1; /**< Set if the was deleted. */
94 int id; /**< For debugging: An id of this chunk. */
100 typedef struct _aff_edge_t {
101 ir_node *src; /**< Source node. */
102 ir_node *tgt; /**< Target node. */
103 double weight; /**< The weight of this edge. */
106 /* main coalescing environment */
107 typedef struct _co_mst_env_t {
108 int n_regs; /**< number of regs in class */
109 int k; /**< number of non-ignore registers in class */
110 bitset_t *ignore_regs; /**< set containing all global ignore registers */
111 ir_phase ph; /**< phase object holding data for nodes */
112 pqueue *chunks; /**< priority queue for chunks */
113 pset *chunkset; /**< set holding all chunks */
114 be_ifg_t *ifg; /**< the interference graph */
115 const arch_env_t *aenv; /**< the arch environment */
116 copy_opt_t *co; /**< the copy opt object */
119 /* stores coalescing related information for a node */
120 typedef struct _co_mst_irn_t {
121 ir_node *irn; /**< the irn this information belongs to */
122 aff_chunk_t *chunk; /**< the chunk this irn belongs to */
123 bitset_t *adm_colors; /**< set of admissible colors for this irn */
124 ir_node **int_neighs; /**< array of all interfering neighbours (cached for speed reasons) */
125 int n_neighs; /**< length of the interfering neighbours array. */
126 int int_aff_neigh; /**< number of interfering affinity neighbours */
127 int col; /**< color currently assigned */
128 int init_col; /**< the initial color */
129 int tmp_col; /**< a temporary assigned color */
130 unsigned fixed : 1; /**< the color is fixed */
131 struct list_head list; /**< Queue for coloring undo. */
134 #define get_co_mst_irn(mst_env, irn) (phase_get_or_set_irn_data(&(mst_env)->ph, (irn)))
136 typedef int decide_func_t(const co_mst_irn_t *node, int col);
141 * Write a chunk to stderr for debugging.
143 static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c) {
145 if (c->weight_consistent)
146 ir_fprintf(stderr, " $%d ", c->weight);
147 ir_fprintf(stderr, "{");
148 bitset_foreach(c->nodes, idx) {
149 ir_node *n = get_idx_irn(env->co->irg, idx);
150 ir_fprintf(stderr, " %+F,", n);
152 ir_fprintf(stderr, "}");
156 * Dump all admissible colors to stderr.
158 static void dbg_admissible_colors(const co_mst_env_t *env, const co_mst_irn_t *node) {
162 if (bitset_popcnt(node->adm_colors) < 1)
163 fprintf(stderr, "no admissible colors?!?");
165 bitset_foreach(node->adm_colors, idx)
166 fprintf(stderr, " %d", idx);
171 * Dump color-cost pairs to stderr.
173 static void dbg_col_cost(const co_mst_env_t *env, const col_cost_t *cost) {
175 for (i = 0; i < env->n_regs; ++i) {
176 if (cost[i].cost == COL_COST_INFEASIBLE)
177 fprintf(stderr, " (%d, INF)", cost[i].col);
179 fprintf(stderr, " (%d, %.1f)", cost[i].col, cost[i].cost);
183 #endif /* DEBUG_libfirm */
185 static INLINE int get_mst_irn_col(const co_mst_irn_t *node) {
186 return node->tmp_col >= 0 ? node->tmp_col : node->col;
190 * @return 1 if node @p node has color @p col, 0 otherwise.
192 static int decider_has_color(const co_mst_irn_t *node, int col) {
193 return get_mst_irn_col(node) == col;
197 * @return 1 if node @p node has not color @p col, 0 otherwise.
199 static int decider_hasnot_color(const co_mst_irn_t *node, int col) {
200 return get_mst_irn_col(node) != col;
204 * Always returns true.
206 static int decider_always_yes(const co_mst_irn_t *node, int col) {
212 /** compares two affinity edges by its weight */
213 static int cmp_aff_edge(const void *a, const void *b) {
214 const aff_edge_t *e1 = a;
215 const aff_edge_t *e2 = b;
217 if (e2->weight == e1->weight) {
218 if (e2->src->node_idx == e1->src->node_idx)
219 return QSORT_CMP(e2->tgt->node_idx, e1->tgt->node_idx);
221 return QSORT_CMP(e2->src->node_idx, e1->src->node_idx);
223 /* sort in descending order */
224 return QSORT_CMP(e2->weight, e1->weight);
227 /** compares to color-cost pairs */
228 static int cmp_col_cost(const void *a, const void *b) {
229 const col_cost_t *c1 = a;
230 const col_cost_t *c2 = b;
232 return c1->cost < c2->cost ? -1 : 1;
236 * Creates a new affinity chunk
238 static INLINE aff_chunk_t *new_aff_chunk(co_mst_env_t *env) {
239 aff_chunk_t *c = xmalloc(sizeof(*c));
241 c->weight_consistent = 0;
242 c->n = NEW_ARR_F(ir_node *, 0);
243 c->nodes = bitset_irg_malloc(env->co->irg);
244 c->interfere = bitset_irg_malloc(env->co->irg);
245 c->id = last_chunk_id++;
246 pset_insert(env->chunkset, c, c->id);
251 * Frees all memory allocated by an affinity chunk.
253 static INLINE void delete_aff_chunk(co_mst_env_t *env, aff_chunk_t *c) {
254 pset_remove(env->chunkset, c, c->id);
255 bitset_free(c->nodes);
256 bitset_free(c->interfere);
263 * Adds a node to an affinity chunk
265 static INLINE void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node) {
268 if (bitset_is_set(c->nodes, get_irn_idx(node->irn)))
271 c->weight_consistent = 0;
273 bitset_set(c->nodes, get_irn_idx(node->irn));
275 ARR_APP1(ir_node *, c->n, node->irn);
277 for (i = node->n_neighs - 1; i >= 0; --i) {
278 ir_node *neigh = node->int_neighs[i];
279 bitset_set(c->interfere, get_irn_idx(neigh));
284 * In case there is no phase information for irn, initialize it.
286 static void *co_mst_irn_init(ir_phase *ph, ir_node *irn, void *old) {
287 co_mst_irn_t *res = old ? old : phase_alloc(ph, sizeof(res[0]));
288 co_mst_env_t *env = ph->priv;
291 const arch_register_req_t *req;
292 void *nodes_it = be_ifg_nodes_iter_alloca(env->ifg);
300 res->int_neighs = NULL;
301 res->int_aff_neigh = 0;
302 res->col = arch_register_get_index(arch_get_irn_register(env->aenv, irn));
303 res->init_col = res->col;
304 INIT_LIST_HEAD(&res->list);
306 DB((dbg, LEVEL_4, "Creating phase info for %+F\n", irn));
308 /* set admissible registers */
309 res->adm_colors = bitset_obstack_alloc(phase_obst(ph), env->n_regs);
311 /* Exclude colors not assignable to the irn */
312 req = arch_get_register_req(env->aenv, irn, -1);
313 if (arch_register_req_is(req, limited))
314 rbitset_copy_to_bitset(req->limited, res->adm_colors);
316 bitset_set_all(res->adm_colors);
318 /* exclude global ignore registers as well */
319 bitset_andnot(res->adm_colors, env->ignore_regs);
321 /* set the number of interfering affinity neighbours to -1, they are calculated later */
322 res->int_aff_neigh = -1;
324 /* build list of interfering neighbours */
326 be_ifg_foreach_neighbour(env->ifg, nodes_it, irn, neigh) {
327 if (! arch_irn_is(env->aenv, neigh, ignore)) {
328 obstack_ptr_grow(phase_obst(ph), neigh);
332 res->int_neighs = obstack_finish(phase_obst(ph));
339 * Check if affinity chunk @p chunk interferes with node @p irn.
341 static INLINE int aff_chunk_interferes(co_mst_env_t *env, const aff_chunk_t *chunk, ir_node *irn) {
343 return bitset_is_set(chunk->interfere, get_irn_idx(irn));
347 * Check if there are interference edges from c1 to c2.
348 * @param env The global co_mst environment
350 * @param c2 Another chunk
351 * @return 1 if there are interferences between nodes of c1 and c2, 0 otherwise.
353 static INLINE int aff_chunks_interfere(co_mst_env_t *env, const aff_chunk_t *c1, const aff_chunk_t *c2) {
359 /* check if there is a node in c2 having an interfering neighbor in c1 */
360 tmp = bitset_alloca(get_irg_last_idx(env->co->irg));
361 tmp = bitset_copy(tmp, c1->interfere);
362 tmp = bitset_and(tmp, c2->nodes);
364 return bitset_popcnt(tmp) > 0;
368 * Returns the affinity chunk of @p irn or creates a new
369 * one with @p irn as element if there is none assigned.
371 static INLINE aff_chunk_t *get_aff_chunk(co_mst_env_t *env, ir_node *irn) {
372 co_mst_irn_t *node = get_co_mst_irn(env, irn);
377 * Let chunk(src) absorb the nodes of chunk(tgt) (only possible when there
378 * are no interference edges from chunk(src) to chunk(tgt)).
379 * @return 1 if successful, 0 if not possible
381 static int aff_chunk_absorb(co_mst_env_t *env, ir_node *src, ir_node *tgt) {
382 aff_chunk_t *c1 = get_aff_chunk(env, src);
383 aff_chunk_t *c2 = get_aff_chunk(env, tgt);
386 DB((dbg, LEVEL_4, "Attempt to let c1 (id %d): ", c1 ? c1->id : -1));
388 DBG_AFF_CHUNK(env, LEVEL_4, c1);
390 DB((dbg, LEVEL_4, "{%+F}", src));
392 DB((dbg, LEVEL_4, "\n\tabsorb c2 (id %d): ", c2 ? c2->id : -1));
394 DBG_AFF_CHUNK(env, LEVEL_4, c2);
396 DB((dbg, LEVEL_4, "{%+F}", tgt));
398 DB((dbg, LEVEL_4, "\n"));
403 /* no chunk exists */
404 co_mst_irn_t *mirn = get_co_mst_irn(env, src);
407 for (i = mirn->n_neighs - 1; i >= 0; --i) {
408 if (mirn->int_neighs[i] == tgt)
412 /* create one containing both nodes */
413 c1 = new_aff_chunk(env);
414 aff_chunk_add_node(c1, get_co_mst_irn(env, src));
415 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
419 /* c2 already exists */
420 if (! aff_chunk_interferes(env, c2, src)) {
421 aff_chunk_add_node(c2, get_co_mst_irn(env, src));
425 } else if (c2 == NULL) {
426 /* c1 already exists */
427 if (! aff_chunk_interferes(env, c1, tgt)) {
428 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
431 } else if (c1 != c2 && ! aff_chunks_interfere(env, c1, c2)) {
434 for (idx = 0, len = ARR_LEN(c2->n); idx < len; ++idx) {
435 ir_node *n = c2->n[idx];
436 co_mst_irn_t *mn = get_co_mst_irn(env, n);
440 if (! bitset_is_set(c1->nodes, get_irn_idx(n)))
441 ARR_APP1(ir_node *, c1->n, n);
444 bitset_or(c1->nodes, c2->nodes);
445 bitset_or(c1->interfere, c2->interfere);
446 c1->weight_consistent = 0;
448 delete_aff_chunk(env, c2);
451 DB((dbg, LEVEL_4, " ... c1 interferes with c2, skipped\n"));
455 DB((dbg, LEVEL_4, " ... absorbed\n"));
460 * Assures that the weight of the given chunk is consistent.
462 static void aff_chunk_assure_weight(const co_mst_env_t *env, aff_chunk_t *c) {
463 if (! c->weight_consistent) {
467 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
468 ir_node *n = c->n[idx];
469 const affinity_node_t *an = get_affinity_info(env->co, n);
473 co_gs_foreach_neighb(an, neigh) {
474 const ir_node *m = neigh->irn;
475 const int m_idx = get_irn_idx(m);
477 /* skip ignore nodes */
478 if (arch_irn_is(env->aenv, m, ignore))
481 w += bitset_is_set(c->nodes, m_idx) ? neigh->costs : 0;
487 c->weight_consistent = 1;
492 * Count the number of interfering affinity neighbours
494 static int count_interfering_aff_neighs(co_mst_env_t *env, const affinity_node_t *an) {
495 const neighb_t *neigh;
496 ir_node *irn = an->irn;
497 const co_mst_irn_t *node = get_co_mst_irn(env, irn);
500 co_gs_foreach_neighb(an, neigh) {
501 const ir_node *n = neigh->irn;
504 /* skip ignore nodes */
505 if (arch_irn_is(env->aenv, n, ignore))
508 /* check if the affinity neighbour interfere */
509 for (i = 0; i < node->n_neighs; ++i) {
510 if (node->int_neighs[i] == n) {
521 * Build chunks of nodes connected by affinity edges.
522 * We start at the heaviest affinity edge.
523 * The chunks of the two edge-defining nodes will be
524 * merged if there are no interference edges from one
525 * chunk to the other.
527 static void build_affinity_chunks(co_mst_env_t *env) {
528 void *nodes_it = be_ifg_nodes_iter_alloca(env->ifg);
529 aff_edge_t *edges = NEW_ARR_F(aff_edge_t, 0);
532 aff_chunk_t *curr_chunk;
534 /* at first we create the affinity edge objects */
535 be_ifg_foreach_node(env->ifg, nodes_it, n) {
536 int n_idx = get_irn_idx(n);
540 /* skip ignore nodes */
541 if (arch_irn_is(env->aenv, n, ignore))
544 n1 = get_co_mst_irn(env, n);
545 an = get_affinity_info(env->co, n);
550 if (n1->int_aff_neigh < 0)
551 n1->int_aff_neigh = count_interfering_aff_neighs(env, an);
553 /* build the affinity edges */
554 co_gs_foreach_neighb(an, neigh) {
555 ir_node *m = neigh->irn;
556 int m_idx = get_irn_idx(m);
558 /* record the edge in only one direction */
563 /* skip ignore nodes */
564 if (arch_irn_is(env->aenv, m, ignore))
570 n2 = get_co_mst_irn(env, m);
571 if (n2->int_aff_neigh < 0) {
572 affinity_node_t *am = get_affinity_info(env->co, m);
573 n2->int_aff_neigh = count_interfering_aff_neighs(env, am);
576 * these weights are pure hackery ;-).
577 * It's not chriswue's fault but mine.
579 edge.weight = (double)neigh->costs / (double)(1 + n1->int_aff_neigh + n2->int_aff_neigh);
580 ARR_APP1(aff_edge_t, edges, edge);
586 /* now: sort edges and build the affinity chunks */
587 len = ARR_LEN(edges);
588 qsort(edges, len, sizeof(edges[0]), cmp_aff_edge);
589 for (i = 0; i < len; ++i) {
590 DBG((dbg, LEVEL_1, "edge (%u,%u) %f\n", edges[i].src->node_idx, edges[i].tgt->node_idx, edges[i].weight));
592 (void)aff_chunk_absorb(env, edges[i].src, edges[i].tgt);
595 /* now insert all chunks into a priority queue */
596 foreach_pset(env->chunkset, curr_chunk) {
597 aff_chunk_assure_weight(env, curr_chunk);
599 DBG((dbg, LEVEL_1, "entry #%d", curr_chunk->id));
600 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
601 DBG((dbg, LEVEL_1, "\n"));
603 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
605 foreach_phase_irn(&env->ph, n) {
606 co_mst_irn_t *mirn = get_co_mst_irn(env, n);
608 if (mirn->chunk == NULL) {
609 /* no chunk is allocated so far, do it now */
610 aff_chunk_t *curr_chunk = new_aff_chunk(env);
611 aff_chunk_add_node(curr_chunk, mirn);
613 aff_chunk_assure_weight(env, curr_chunk);
615 DBG((dbg, LEVEL_1, "entry #%d", curr_chunk->id));
616 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
617 DBG((dbg, LEVEL_1, "\n"));
619 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
627 * Greedy collect affinity neighbours into thew new chunk @p chunk starting at node @p node.
629 static void expand_chunk_from(co_mst_env_t *env, co_mst_irn_t *node, bitset_t *visited,
630 aff_chunk_t *chunk, aff_chunk_t *orig_chunk, decide_func_t *decider, int col)
632 waitq *nodes = new_waitq();
634 DBG((dbg, LEVEL_1, "\n\tExpanding new chunk (#%d) from %+F, color %d:", chunk->id, node->irn, col));
636 /* init queue and chunk */
637 waitq_put(nodes, node);
638 bitset_set(visited, get_irn_idx(node->irn));
639 aff_chunk_add_node(chunk, node);
640 DB((dbg, LEVEL_1, " %+F", node->irn));
642 /* as long as there are nodes in the queue */
643 while (! waitq_empty(nodes)) {
644 co_mst_irn_t *n = waitq_get(nodes);
645 affinity_node_t *an = get_affinity_info(env->co, n->irn);
647 /* check all affinity neighbors */
650 co_gs_foreach_neighb(an, neigh) {
651 ir_node *m = neigh->irn;
652 int m_idx = get_irn_idx(m);
655 /* skip ignore nodes */
656 if (arch_irn_is(env->aenv, m, ignore))
659 n2 = get_co_mst_irn(env, m);
661 if (! bitset_is_set(visited, m_idx) &&
664 ! aff_chunk_interferes(env, chunk, m) &&
665 bitset_is_set(orig_chunk->nodes, m_idx))
668 following conditions are met:
669 - neighbour is not visited
670 - neighbour likes the color
671 - neighbour has not yet a fixed color
672 - the new chunk doesn't interfere with the neighbour
673 - neighbour belongs or belonged once to the original chunk
675 bitset_set(visited, m_idx);
676 aff_chunk_add_node(chunk, n2);
677 DB((dbg, LEVEL_1, " %+F", n2->irn));
678 /* enqueue for further search */
679 waitq_put(nodes, n2);
685 DB((dbg, LEVEL_1, "\n"));
691 * Fragment the given chunk into chunks having given color and not having given color.
693 static aff_chunk_t *fragment_chunk(co_mst_env_t *env, int col, aff_chunk_t *c, waitq *tmp) {
694 bitset_t *visited = bitset_irg_malloc(env->co->irg);
696 aff_chunk_t *best = NULL;
698 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
701 aff_chunk_t *tmp_chunk;
702 decide_func_t *decider;
706 if (bitset_is_set(visited, get_irn_idx(irn)))
709 node = get_co_mst_irn(env, irn);
711 if (get_mst_irn_col(node) == col) {
712 decider = decider_has_color;
714 DBG((dbg, LEVEL_4, "\tcolor %d wanted", col));
717 decider = decider_hasnot_color;
719 DBG((dbg, LEVEL_4, "\tcolor %d forbidden", col));
722 /* create a new chunk starting at current node */
723 tmp_chunk = new_aff_chunk(env);
724 waitq_put(tmp, tmp_chunk);
725 expand_chunk_from(env, node, visited, tmp_chunk, c, decider, col);
726 assert(bitset_popcnt(tmp_chunk->nodes) > 0 && "No nodes added to chunk");
728 /* remember the local best */
729 aff_chunk_assure_weight(env, tmp_chunk);
730 if (check_for_best && (! best || best->weight < tmp_chunk->weight))
734 assert(best && "No chunk found?");
735 bitset_free(visited);
740 * Initializes an array of color-cost pairs.
741 * Sets forbidden colors to costs COL_COST_INFEASIBLE and all others to @p c.
743 static INLINE void col_cost_init(co_mst_env_t *env, col_cost_t *cost, double c) {
746 for (i = 0; i < env->n_regs; ++i) {
748 if (bitset_is_set(env->ignore_regs, i))
749 cost[i].cost = COL_COST_INFEASIBLE;
756 * Initializes an array of color-cost pairs.
757 * Sets all colors except color @p col to COL_COST_INFEASIBLE and @p col to 0.0
759 static INLINE void col_cost_init_single(co_mst_env_t *env, col_cost_t *cost, int col) {
760 assert(! bitset_is_set(env->ignore_regs, col) && "Attempt to use forbidden color.");
761 col_cost_init(env, cost, COL_COST_INFEASIBLE);
768 * Resets the temporary fixed color of all nodes within wait queue @p nodes.
769 * ATTENTION: the queue is empty after calling this function!
771 static INLINE void reject_coloring(struct list_head *nodes) {
772 co_mst_irn_t *n, *temp;
773 DB((dbg, LEVEL_4, "\treject coloring for"));
774 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
775 DB((dbg, LEVEL_4, " %+F", n->irn));
776 assert(n->tmp_col >= 0);
778 list_del_init(&n->list);
780 DB((dbg, LEVEL_4, "\n"));
783 static INLINE void materialize_coloring(struct list_head *nodes) {
784 co_mst_irn_t *n, *temp;
785 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
786 assert(n->tmp_col >= 0);
789 list_del_init(&n->list);
793 static INLINE void set_temp_color(co_mst_irn_t *node, int col, struct list_head *changed)
796 assert(!node->fixed);
797 assert(node->tmp_col < 0);
798 assert(node->list.next == &node->list && node->list.prev == &node->list);
800 list_add_tail(&node->list, changed);
804 static INLINE int is_loose(co_mst_irn_t *node)
806 return !node->fixed && node->tmp_col < 0;
810 * Determines the costs for each color if it would be assigned to node @p node.
812 static void determine_color_costs(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs) {
813 affinity_node_t *an = get_affinity_info(env->co, node->irn);
818 col_cost_init(env, costs, 0.0);
820 /* calculate (negative) costs for affinity neighbours */
822 co_gs_foreach_neighb(an, aff_neigh) {
823 ir_node *m = aff_neigh->irn;
827 /* skip ignore nodes */
828 if (arch_irn_is(env->aenv, m, ignore))
831 neigh = get_co_mst_irn(env, m);
832 c = (double)aff_neigh->costs;
834 /* calculate costs for fixed affinity neighbours */
835 if (!is_loose(neigh)) {
836 int col = get_mst_irn_col(neigh);
837 costs[col].cost -= c * AFF_NEIGHBOUR_FIX_BENEFIT;
842 /* calculate (positive) costs for interfering neighbours */
843 for (i = 0; i < node->n_neighs; ++i) {
848 int_neigh = node->int_neighs[i];
850 assert(!arch_irn_is(env->aenv, int_neigh, ignore));
852 neigh = get_co_mst_irn(env, int_neigh);
853 col = get_mst_irn_col(neigh);
854 col_cnt = bitset_popcnt(neigh->adm_colors);
856 if (!is_loose(neigh)) {
857 /* colors of fixed interfering neighbours are infeasible */
858 costs[col].cost = COL_COST_INFEASIBLE;
860 else if (col_cnt < env->k) {
861 /* calculate costs for constrained interfering neighbours */
862 double ratio = 1.0 - ((double)col_cnt / (double)env->k);
864 bitset_foreach_clear(neigh->adm_colors, idx) {
865 /* check only explicitly forbidden colors (skip global forbidden ones) */
866 if (! bitset_is_set(env->ignore_regs, idx)) {
867 costs[col].cost += ratio * NEIGHBOUR_CONSTR_COSTS;
872 DB((dbg, LEVEL_4, "\tneigh %+F, loose: %d, color: %d\n", int_neigh, is_loose(neigh), col));
875 /* set all not admissible colors to COL_COST_INFEASIBLE */
876 bitset_foreach_clear(node->adm_colors, idx)
877 costs[idx].cost = COL_COST_INFEASIBLE;
880 /* need forward declaration due to recursive call */
881 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed_ones);
884 * Tries to change node to a color but @p explude_col.
885 * @return 1 if succeeded, 0 otherwise.
887 static int change_node_color_excluded(co_mst_env_t *env, co_mst_irn_t *node, int exclude_col, struct list_head *changed_ones) {
888 int col = get_mst_irn_col(node);
891 /* neighbours has already a different color -> good, temporary fix it */
892 if (col != exclude_col) {
894 set_temp_color(node, col, changed_ones);
898 /* The node has the color it should not have _and_ has not been visited yet. */
899 if (is_loose(node)) {
900 col_cost_t *costs = alloca(env->n_regs * sizeof(costs[0]));
902 /* Get the costs for giving the node a specific color. */
903 determine_color_costs(env, node, costs);
905 /* Since the node must not have the not_col, set the costs for that color to "infinity" */
906 costs[exclude_col].cost = COL_COST_INFEASIBLE;
908 /* sort the colors according costs, cheapest first. */
909 qsort(costs, env->n_regs, sizeof(costs[0]), cmp_col_cost);
911 /* Try recoloring the node using the color list. */
912 res = recolor_nodes(env, node, costs, changed_ones);
919 * Tries to bring node @p node to cheapest color and color all interfering neighbours with other colors.
920 * ATTENTION: Expect @p costs already sorted by increasing costs.
921 * @return 1 if coloring could be applied, 0 otherwise.
923 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed_ones) {
925 struct list_head local_changed;
927 DBG((dbg, LEVEL_1, "\tRecoloring %+F with color-costs", node->irn));
928 DBG_COL_COST(env, LEVEL_1, costs);
929 DB((dbg, LEVEL_1, "\n"));
931 for (i = 0; i < env->n_regs; ++i) {
932 int tgt_col = costs[i].col;
936 /* If the costs for that color (and all successive) are infinite, bail out we won't make it anyway. */
937 if (costs[i].cost == COL_COST_INFEASIBLE) {
941 /* Set the new color of the node and mark the node as temporarily fixed. */
942 assert(node->tmp_col < 0 && "Node must not have been temporary fixed.");
943 INIT_LIST_HEAD(&local_changed);
944 set_temp_color(node, tgt_col, &local_changed);
945 DBG((dbg, LEVEL_4, "\tTemporary setting %+F to color %d\n", node->irn, tgt_col));
947 /* try to color all interfering neighbours with current color forbidden */
948 for (j = 0; j < node->n_neighs; ++j) {
952 neigh = node->int_neighs[j];
954 /* skip ignore nodes */
955 if (arch_irn_is(env->aenv, neigh, ignore))
958 nn = get_co_mst_irn(env, neigh);
959 DB((dbg, LEVEL_4, "\tHandling neighbour %+F, at position %d (fixed: %d, tmp_col: %d, col: %d)\n",
960 neigh, j, nn->fixed, nn->tmp_col, nn->col));
963 Try to change the color of the neighbor and record all nodes which
964 get changed in the tmp list. Add this list to the "changed" list for
965 that color. If we did not succeed to change the color of the neighbor,
966 we bail out and try the next color.
968 if (get_mst_irn_col(nn) == tgt_col) {
969 /* try to color neighbour with tgt_col forbidden */
970 neigh_ok = change_node_color_excluded(env, nn, tgt_col, &local_changed);
978 We managed to assign the target color to all neighbors, so from the perspective
979 of the current node, every thing was ok and we can return safely.
982 /* append the local_changed ones to global ones */
983 list_splice(&local_changed, changed_ones);
987 /* coloring of neighbours failed, so we try next color */
988 reject_coloring(&local_changed);
996 * Tries to bring node @p node and all it's neighbours to color @p tgt_col.
997 * @return 1 if color @p col could be applied, 0 otherwise
999 static int change_node_color(co_mst_env_t *env, co_mst_irn_t *node, int tgt_col, struct list_head *changed_ones) {
1000 int col = get_mst_irn_col(node);
1002 /* if node already has the target color -> good, temporary fix it */
1003 if (col == tgt_col) {
1004 DBG((dbg, LEVEL_4, "\t\tCNC: %+F has already color %d, fix temporary\n", node->irn, tgt_col));
1006 set_temp_color(node, tgt_col, changed_ones);
1011 Node has not yet a fixed color and target color is admissible
1012 -> try to recolor node and it's affinity neighbours
1014 if (is_loose(node) && bitset_is_set(node->adm_colors, tgt_col)) {
1015 col_cost_t *costs = alloca(env->n_regs * sizeof(costs[0]));
1018 col_cost_init_single(env, costs, tgt_col);
1020 DBG((dbg, LEVEL_4, "\t\tCNC: Attempt to recolor %+F ===>>\n", node->irn));
1021 res = recolor_nodes(env, node, costs, changed_ones);
1022 DBG((dbg, LEVEL_4, "\t\tCNC: <<=== Recoloring of %+F %s\n", node->irn, res ? "succeeded" : "failed"));
1028 if (firm_dbg_get_mask(dbg) & LEVEL_4) {
1029 if (!is_loose(node))
1030 DB((dbg, LEVEL_4, "\t\tCNC: %+F has already fixed color %d\n", node->irn, col));
1032 DB((dbg, LEVEL_4, "\t\tCNC: color %d not admissible for %+F (", tgt_col, node->irn));
1033 dbg_admissible_colors(env, node);
1034 DB((dbg, LEVEL_4, ")\n"));
1043 * Tries to color an affinity chunk (or at least a part of it).
1044 * Inserts uncolored parts of the chunk as a new chunk into the priority queue.
1046 static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c) {
1047 aff_chunk_t *best_chunk = NULL;
1048 int best_color = -1;
1050 waitq *tmp_chunks = new_waitq();
1051 waitq *best_starts = NULL;
1054 struct list_head changed_ones;
1056 DB((dbg, LEVEL_2, "fragmentizing chunk #%d", c->id));
1057 DBG_AFF_CHUNK(env, LEVEL_2, c);
1058 DB((dbg, LEVEL_2, "\n"));
1061 /* check which color is the "best" for the given chunk.
1062 * if we found a color which was ok for all nodes, we take it
1063 * and do not look further. (see did_all flag usage below.)
1064 * If we have many colors which fit all nodes it is hard to decide
1065 * which one to take anyway.
1066 * TODO Sebastian: Perhaps we should at all nodes and figure out
1067 * a suitable color using costs as done above (determine_color_costs).
1069 for (col = 0; col < env->n_regs && !did_all; ++col) {
1071 waitq *good_starts = new_waitq();
1072 aff_chunk_t *local_best;
1074 /* skip ignore colors */
1075 if (bitset_is_set(env->ignore_regs, col))
1078 DB((dbg, LEVEL_3, "\ttrying color %d\n", col));
1080 /* suppose we can color all nodes to the same color */
1083 INIT_LIST_HEAD(&changed_ones);
1085 /* try to bring all nodes of given chunk to the current color. */
1086 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1087 ir_node *irn = c->n[idx];
1088 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1091 assert(! node->fixed && "Node must not have a fixed color.");
1092 DB((dbg, LEVEL_4, "\t\tBringing %+F from color %d to color %d ...\n", irn, node->col, col));
1095 The order of the colored nodes is important, so we record the successfully
1096 colored ones in the order they appeared.
1098 good = change_node_color(env, node, col, &changed_ones);
1100 waitq_put(good_starts, node);
1106 DB((dbg, LEVEL_4, "\t\t... %+F attempt from %d to %d %s\n", irn, node->col, col, one_good ? "succeeded" : "failed"));
1109 /* try next color when failed */
1111 reject_coloring(&changed_ones);
1115 /* fragment the chunk according to the coloring */
1116 local_best = fragment_chunk(env, col, c, tmp_chunks);
1118 /* search the best of the good list
1119 and make it the new best if it is better than the current */
1121 aff_chunk_assure_weight(env, local_best);
1123 DB((dbg, LEVEL_4, "\t\tlocal best chunk (id %d) for color %d: ", local_best->id, col));
1124 DBG_AFF_CHUNK(env, LEVEL_4, local_best);
1126 if (! best_chunk || best_chunk->weight < local_best->weight) {
1127 best_chunk = local_best;
1130 del_waitq(best_starts);
1131 best_starts = good_starts;
1132 DB((dbg, LEVEL_4, "\n\t\t... setting global best chunk (id %d), color %d\n", best_chunk->id, best_color));
1134 DB((dbg, LEVEL_4, "\n\t\t... omitting, global best is better\n"));
1135 del_waitq(good_starts);
1139 del_waitq(good_starts);
1142 reject_coloring(&changed_ones);
1145 /* free all intermediate created chunks except best one */
1146 while (! waitq_empty(tmp_chunks)) {
1147 aff_chunk_t *tmp = waitq_get(tmp_chunks);
1148 if (tmp != best_chunk)
1149 delete_aff_chunk(env, tmp);
1151 del_waitq(tmp_chunks);
1153 /* return if coloring failed */
1156 del_waitq(best_starts);
1160 DB((dbg, LEVEL_2, "\tbest chunk #%d ", best_chunk->id));
1161 DBG_AFF_CHUNK(env, LEVEL_2, best_chunk);
1162 DB((dbg, LEVEL_2, "using color %d\n", best_color));
1164 INIT_LIST_HEAD(&changed_ones);
1165 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1166 ir_node *irn = best_chunk->n[idx];
1167 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1170 /* bring the node to the color. */
1171 DB((dbg, LEVEL_4, "\tManifesting color %d for %+F, chunk #%d\n", best_color, node->irn, best_chunk->id));
1172 INIT_LIST_HEAD(&changed_ones);
1173 res = change_node_color(env, node, best_color, &changed_ones);
1175 materialize_coloring(&changed_ones);
1180 /* remove the nodes in best chunk from original chunk */
1181 bitset_andnot(c->nodes, best_chunk->nodes);
1182 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1183 ir_node *irn = c->n[idx];
1185 if (bitset_is_set(best_chunk->nodes, get_irn_idx(irn))) {
1186 int last = ARR_LEN(c->n) - 1;
1188 c->n[idx] = c->n[last];
1189 ARR_SHRINKLEN(c->n, last);
1194 /* we have to get the nodes back into the original chunk because they are scattered over temporary chunks */
1195 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1196 ir_node *n = c->n[idx];
1197 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1201 /* fragment the remaining chunk */
1202 visited = bitset_irg_malloc(env->co->irg);
1203 bitset_or(visited, best_chunk->nodes);
1204 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1205 ir_node *irn = c->n[idx];
1206 if (! bitset_is_set(visited, get_irn_idx(irn))) {
1207 aff_chunk_t *new_chunk = new_aff_chunk(env);
1208 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1210 expand_chunk_from(env, node, visited, new_chunk, c, decider_always_yes, 0);
1211 aff_chunk_assure_weight(env, new_chunk);
1212 pqueue_put(env->chunks, new_chunk, new_chunk->weight);
1216 /* clear obsolete chunks and free some memory */
1217 delete_aff_chunk(env, best_chunk);
1218 bitset_free(visited);
1220 del_waitq(best_starts);
1224 * Main driver for mst safe coalescing algorithm.
1226 int co_solve_heuristic_mst(copy_opt_t *co) {
1227 unsigned n_regs = co->cls->n_regs;
1228 bitset_t *ignore_regs = bitset_alloca(n_regs);
1231 co_mst_env_t mst_env;
1234 phase_init(&mst_env.ph, "co_mst", co->irg, PHASE_DEFAULT_GROWTH, co_mst_irn_init, &mst_env);
1236 k = be_put_ignore_regs(co->cenv->birg, co->cls, ignore_regs);
1239 mst_env.n_regs = n_regs;
1241 mst_env.chunks = new_pqueue();
1243 mst_env.ignore_regs = ignore_regs;
1244 mst_env.ifg = co->cenv->ifg;
1245 mst_env.aenv = co->aenv;
1246 mst_env.chunkset = pset_new_ptr(512);
1248 DBG((dbg, LEVEL_1, "==== Coloring %+F, class %s ====\n", co->irg, co->cls->name));
1250 /* build affinity chunks */
1251 build_affinity_chunks(&mst_env);
1253 /* color chunks as long as there are some */
1254 while (! pqueue_empty(mst_env.chunks)) {
1255 aff_chunk_t *chunk = pqueue_get(mst_env.chunks);
1257 color_aff_chunk(&mst_env, chunk);
1258 DB((dbg, LEVEL_4, "<<<====== Coloring chunk (%d) done\n", chunk->id));
1259 delete_aff_chunk(&mst_env, chunk);
1262 /* apply coloring */
1263 foreach_phase_irn(&mst_env.ph, irn) {
1264 co_mst_irn_t *mirn = get_co_mst_irn(&mst_env, irn);
1265 const arch_register_t *reg;
1267 if (arch_irn_is(mst_env.aenv, irn, ignore))
1270 // assert(mirn->fixed && "Node should have fixed color");
1272 /* skip nodes where color hasn't changed */
1273 if (mirn->init_col == mirn->col)
1276 reg = arch_register_for_index(co->cls, mirn->col);
1277 arch_set_irn_register(co->aenv, irn, reg);
1278 DB((dbg, LEVEL_1, "%+F set color from %d to %d\n", irn, mirn->init_col, mirn->col));
1281 /* free allocated memory */
1282 del_pqueue(mst_env.chunks);
1283 phase_free(&mst_env.ph);
1284 del_pset(mst_env.chunkset);
1289 void be_init_copyheur4(void) {
1290 FIRM_DBG_REGISTER(dbg, "firm.be.co.heur4");
1293 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur4);