2 * Copyright (C) 1995-2007 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Simple copy minimization heuristics.
23 * @author Christian Wuerdig
27 * This is the C implementation of the mst algorithm
28 * originally written in Java by Sebastian Hack.
29 * (also known as "heur3" :)
30 * Performs simple copy minimization.
34 #endif /* HAVE_CONFIG_H */
41 #include "raw_bitset.h"
42 #include "irphase_t.h"
58 #include "becopyopt_t.h"
62 #define COL_COST_INFEASIBLE DBL_MAX
63 #define AFF_NEIGHBOUR_FIX_BENEFIT 128.0
64 #define NEIGHBOUR_CONSTR_COSTS 64.0
69 #define DBG_AFF_CHUNK(env, level, chunk) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_aff_chunk((env), (chunk)); } while(0)
70 #define DBG_COL_COST(env, level, cost) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_col_cost((env), (cost)); } while(0)
72 static firm_dbg_module_t *dbg = NULL;
76 #define DBG_AFF_CHUNK(env, level, chunk)
77 #define DBG_COL_COST(env, level, cost)
81 static int last_chunk_id = 0;
82 static int recolor_limit = 4;
83 static double dislike_influence = 0.1;
85 typedef struct _col_cost_t {
93 typedef struct _aff_chunk_t {
94 const ir_node **n; /**< An ARR_F containing all nodes of the chunk. */
95 bitset_t *nodes; /**< A bitset containing all nodes inside this chunk. */
96 bitset_t *interfere; /**< A bitset containing all interfering neighbours of the nodes in this chunk. */
97 int weight; /**< Weight of this chunk */
98 unsigned weight_consistent : 1; /**< Set if the weight is consistent. */
99 unsigned deleted : 1; /**< Set if the was deleted. */
100 int id; /**< For debugging: An id of this chunk. */
102 col_cost_t *color_affinity;
108 typedef struct _aff_edge_t {
109 const ir_node *src; /**< Source node. */
110 const ir_node *tgt; /**< Target node. */
111 double weight; /**< The weight of this edge. */
114 /* main coalescing environment */
115 typedef struct _co_mst_env_t {
116 int n_regs; /**< number of regs in class */
117 int k; /**< number of non-ignore registers in class */
118 bitset_t *ignore_regs; /**< set containing all global ignore registers */
119 ir_phase ph; /**< phase object holding data for nodes */
120 pqueue *chunks; /**< priority queue for chunks */
121 pset *chunkset; /**< set holding all chunks */
122 be_ifg_t *ifg; /**< the interference graph */
123 const arch_env_t *aenv; /**< the arch environment */
124 copy_opt_t *co; /**< the copy opt object */
126 col_cost_t **single_cols;
129 /* stores coalescing related information for a node */
130 typedef struct _co_mst_irn_t {
131 const ir_node *irn; /**< the irn this information belongs to */
132 aff_chunk_t *chunk; /**< the chunk this irn belongs to */
133 bitset_t *adm_colors; /**< set of admissible colors for this irn */
134 ir_node **int_neighs; /**< array of all interfering neighbours (cached for speed reasons) */
135 int n_neighs; /**< length of the interfering neighbours array. */
136 int int_aff_neigh; /**< number of interfering affinity neighbours */
137 int col; /**< color currently assigned */
138 int init_col; /**< the initial color */
139 int tmp_col; /**< a temporary assigned color */
140 unsigned fixed : 1; /**< the color is fixed */
141 struct list_head list; /**< Queue for coloring undo. */
142 double constr_factor;
145 #define get_co_mst_irn(mst_env, irn) (phase_get_or_set_irn_data(&(mst_env)->ph, (irn)))
147 typedef int decide_func_t(const co_mst_irn_t *node, int col);
152 * Write a chunk to stderr for debugging.
154 static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c) {
156 if (c->weight_consistent)
157 ir_fprintf(stderr, " $%d ", c->weight);
158 ir_fprintf(stderr, "{");
159 bitset_foreach(c->nodes, idx) {
160 ir_node *n = get_idx_irn(env->co->irg, idx);
161 ir_fprintf(stderr, " %+F,", n);
163 ir_fprintf(stderr, "}");
167 * Dump all admissible colors to stderr.
169 static void dbg_admissible_colors(const co_mst_env_t *env, const co_mst_irn_t *node) {
173 if (bitset_popcnt(node->adm_colors) < 1)
174 fprintf(stderr, "no admissible colors?!?");
176 bitset_foreach(node->adm_colors, idx)
177 fprintf(stderr, " %d", idx);
182 * Dump color-cost pairs to stderr.
184 static void dbg_col_cost(const co_mst_env_t *env, const col_cost_t *cost) {
186 for (i = 0; i < env->n_regs; ++i)
187 fprintf(stderr, " (%d, %.4f)", cost[i].col, cost[i].cost);
190 #endif /* DEBUG_libfirm */
192 static INLINE int get_mst_irn_col(const co_mst_irn_t *node) {
193 return node->tmp_col >= 0 ? node->tmp_col : node->col;
197 * @return 1 if node @p node has color @p col, 0 otherwise.
199 static int decider_has_color(const co_mst_irn_t *node, int col) {
200 return get_mst_irn_col(node) == col;
204 * @return 1 if node @p node has not color @p col, 0 otherwise.
206 static int decider_hasnot_color(const co_mst_irn_t *node, int col) {
207 return get_mst_irn_col(node) != col;
211 * Always returns true.
213 static int decider_always_yes(const co_mst_irn_t *node, int col) {
219 /** compares two affinity edges by its weight */
220 static int cmp_aff_edge(const void *a, const void *b) {
221 const aff_edge_t *e1 = a;
222 const aff_edge_t *e2 = b;
224 if (e2->weight == e1->weight) {
225 if (e2->src->node_idx == e1->src->node_idx)
226 return QSORT_CMP(e2->tgt->node_idx, e1->tgt->node_idx);
228 return QSORT_CMP(e2->src->node_idx, e1->src->node_idx);
230 /* sort in descending order */
231 return QSORT_CMP(e2->weight, e1->weight);
234 /** compares to color-cost pairs */
235 static __attribute__((unused)) int cmp_col_cost_lt(const void *a, const void *b) {
236 const col_cost_t *c1 = a;
237 const col_cost_t *c2 = b;
238 double diff = c1->cost - c2->cost;
239 return (diff > 0) - (diff < 0);
242 static int cmp_col_cost_gt(const void *a, const void *b) {
243 const col_cost_t *c1 = a;
244 const col_cost_t *c2 = b;
245 double diff = c2->cost - c1->cost;
246 return (diff > 0) - (diff < 0);
250 * Creates a new affinity chunk
252 static INLINE aff_chunk_t *new_aff_chunk(co_mst_env_t *env) {
253 aff_chunk_t *c = xmalloc(sizeof(*c));
255 c->weight_consistent = 0;
256 c->n = NEW_ARR_F(const ir_node *, 0);
257 c->nodes = bitset_irg_malloc(env->co->irg);
258 c->interfere = bitset_irg_malloc(env->co->irg);
259 c->color_affinity = xmalloc(env->n_regs * sizeof(c->color_affinity[0]));
260 c->id = last_chunk_id++;
262 pset_insert(env->chunkset, c, c->id);
267 * Frees all memory allocated by an affinity chunk.
269 static INLINE void delete_aff_chunk(co_mst_env_t *env, aff_chunk_t *c) {
270 pset_remove(env->chunkset, c, c->id);
271 bitset_free(c->nodes);
272 bitset_free(c->interfere);
273 xfree(c->color_affinity);
280 * Adds a node to an affinity chunk
282 static INLINE void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node) {
285 if (bitset_is_set(c->nodes, get_irn_idx(node->irn)))
288 c->weight_consistent = 0;
290 bitset_set(c->nodes, get_irn_idx(node->irn));
292 ARR_APP1(ir_node *, c->n, node->irn);
294 for (i = node->n_neighs - 1; i >= 0; --i) {
295 ir_node *neigh = node->int_neighs[i];
296 bitset_set(c->interfere, get_irn_idx(neigh));
301 * In case there is no phase information for irn, initialize it.
303 static void *co_mst_irn_init(ir_phase *ph, const ir_node *irn, void *old) {
304 co_mst_irn_t *res = old ? old : phase_alloc(ph, sizeof(res[0]));
305 co_mst_env_t *env = ph->priv;
308 const arch_register_req_t *req;
309 void *nodes_it = be_ifg_nodes_iter_alloca(env->ifg);
317 res->int_neighs = NULL;
318 res->int_aff_neigh = 0;
319 res->col = arch_register_get_index(arch_get_irn_register(env->aenv, irn));
320 res->init_col = res->col;
321 INIT_LIST_HEAD(&res->list);
323 DB((dbg, LEVEL_4, "Creating phase info for %+F\n", irn));
325 /* set admissible registers */
326 res->adm_colors = bitset_obstack_alloc(phase_obst(ph), env->n_regs);
328 /* Exclude colors not assignable to the irn */
329 req = arch_get_register_req(env->aenv, irn, -1);
330 if (arch_register_req_is(req, limited))
331 rbitset_copy_to_bitset(req->limited, res->adm_colors);
333 bitset_set_all(res->adm_colors);
335 /* exclude global ignore registers as well */
336 bitset_andnot(res->adm_colors, env->ignore_regs);
338 /* compute the constraint factor */
339 res->constr_factor = (double) (1 + env->n_regs - bitset_popcnt(res->adm_colors)) / env->n_regs;
341 /* set the number of interfering affinity neighbours to -1, they are calculated later */
342 res->int_aff_neigh = -1;
344 /* build list of interfering neighbours */
346 be_ifg_foreach_neighbour(env->ifg, nodes_it, irn, neigh) {
347 if (! arch_irn_is(env->aenv, neigh, ignore)) {
348 obstack_ptr_grow(phase_obst(ph), neigh);
352 res->int_neighs = obstack_finish(phase_obst(ph));
359 * Check if affinity chunk @p chunk interferes with node @p irn.
361 static INLINE int aff_chunk_interferes(co_mst_env_t *env, const aff_chunk_t *chunk, const ir_node *irn) {
363 return bitset_is_set(chunk->interfere, get_irn_idx(irn));
367 * Check if there are interference edges from c1 to c2.
368 * @param env The global co_mst environment
370 * @param c2 Another chunk
371 * @return 1 if there are interferences between nodes of c1 and c2, 0 otherwise.
373 static INLINE int aff_chunks_interfere(co_mst_env_t *env, const aff_chunk_t *c1, const aff_chunk_t *c2) {
378 /* check if there is a node in c2 having an interfering neighbor in c1 */
379 return bitset_intersect(c1->interfere, c2->nodes);
383 * Returns the affinity chunk of @p irn or creates a new
384 * one with @p irn as element if there is none assigned.
386 static INLINE aff_chunk_t *get_aff_chunk(co_mst_env_t *env, const ir_node *irn) {
387 co_mst_irn_t *node = get_co_mst_irn(env, irn);
392 * Let chunk(src) absorb the nodes of chunk(tgt) (only possible when there
393 * are no interference edges from chunk(src) to chunk(tgt)).
394 * @return 1 if successful, 0 if not possible
396 static int aff_chunk_absorb(co_mst_env_t *env, const ir_node *src, const ir_node *tgt) {
397 aff_chunk_t *c1 = get_aff_chunk(env, src);
398 aff_chunk_t *c2 = get_aff_chunk(env, tgt);
401 DB((dbg, LEVEL_4, "Attempt to let c1 (id %d): ", c1 ? c1->id : -1));
403 DBG_AFF_CHUNK(env, LEVEL_4, c1);
405 DB((dbg, LEVEL_4, "{%+F}", src));
407 DB((dbg, LEVEL_4, "\n\tabsorb c2 (id %d): ", c2 ? c2->id : -1));
409 DBG_AFF_CHUNK(env, LEVEL_4, c2);
411 DB((dbg, LEVEL_4, "{%+F}", tgt));
413 DB((dbg, LEVEL_4, "\n"));
418 /* no chunk exists */
419 co_mst_irn_t *mirn = get_co_mst_irn(env, src);
422 for (i = mirn->n_neighs - 1; i >= 0; --i) {
423 if (mirn->int_neighs[i] == tgt)
427 /* create one containing both nodes */
428 c1 = new_aff_chunk(env);
429 aff_chunk_add_node(c1, get_co_mst_irn(env, src));
430 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
434 /* c2 already exists */
435 if (! aff_chunk_interferes(env, c2, src)) {
436 aff_chunk_add_node(c2, get_co_mst_irn(env, src));
440 } else if (c2 == NULL) {
441 /* c1 already exists */
442 if (! aff_chunk_interferes(env, c1, tgt)) {
443 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
446 } else if (c1 != c2 && ! aff_chunks_interfere(env, c1, c2)) {
449 for (idx = 0, len = ARR_LEN(c2->n); idx < len; ++idx)
450 aff_chunk_add_node(c1, get_co_mst_irn(env, c2->n[idx]));
452 bitset_or(c1->interfere, c2->interfere);
453 c1->weight_consistent = 0;
455 delete_aff_chunk(env, c2);
458 DB((dbg, LEVEL_4, " ... c1 interferes with c2, skipped\n"));
462 DB((dbg, LEVEL_4, " ... absorbed\n"));
467 * Assures that the weight of the given chunk is consistent.
469 static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c) {
470 if (! c->weight_consistent) {
474 for (i = 0; i < env->n_regs; ++i) {
475 c->color_affinity[i].col = i;
476 c->color_affinity[i].cost = 0.0;
479 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
480 const ir_node *n = c->n[idx];
481 const affinity_node_t *an = get_affinity_info(env->co, n);
482 co_mst_irn_t *node = get_co_mst_irn(env, n);
485 if (node->constr_factor > 0.0) {
487 bitset_foreach (node->adm_colors, col)
488 c->color_affinity[col].cost += node->constr_factor;
493 co_gs_foreach_neighb(an, neigh) {
494 const ir_node *m = neigh->irn;
495 const int m_idx = get_irn_idx(m);
497 /* skip ignore nodes */
498 if (arch_irn_is(env->aenv, m, ignore))
501 w += bitset_is_set(c->nodes, m_idx) ? neigh->costs : 0;
506 for (i = 0; i < env->n_regs; ++i)
507 c->color_affinity[i].cost *= (1.0 / ARR_LEN(c->n));
510 // c->weight = bitset_popcnt(c->nodes);
511 c->weight_consistent = 1;
516 * Count the number of interfering affinity neighbours
518 static int count_interfering_aff_neighs(co_mst_env_t *env, const affinity_node_t *an) {
519 const neighb_t *neigh;
520 const ir_node *irn = an->irn;
521 const co_mst_irn_t *node = get_co_mst_irn(env, irn);
524 co_gs_foreach_neighb(an, neigh) {
525 const ir_node *n = neigh->irn;
528 /* skip ignore nodes */
529 if (arch_irn_is(env->aenv, n, ignore))
532 /* check if the affinity neighbour interfere */
533 for (i = 0; i < node->n_neighs; ++i) {
534 if (node->int_neighs[i] == n) {
545 * Build chunks of nodes connected by affinity edges.
546 * We start at the heaviest affinity edge.
547 * The chunks of the two edge-defining nodes will be
548 * merged if there are no interference edges from one
549 * chunk to the other.
551 static void build_affinity_chunks(co_mst_env_t *env) {
552 void *nodes_it = be_ifg_nodes_iter_alloca(env->ifg);
553 aff_edge_t *edges = NEW_ARR_F(aff_edge_t, 0);
556 aff_chunk_t *curr_chunk;
558 /* at first we create the affinity edge objects */
559 be_ifg_foreach_node(env->ifg, nodes_it, n) {
560 int n_idx = get_irn_idx(n);
564 /* skip ignore nodes */
565 if (arch_irn_is(env->aenv, n, ignore))
568 n1 = get_co_mst_irn(env, n);
569 an = get_affinity_info(env->co, n);
574 if (n1->int_aff_neigh < 0)
575 n1->int_aff_neigh = count_interfering_aff_neighs(env, an);
577 /* build the affinity edges */
578 co_gs_foreach_neighb(an, neigh) {
579 const ir_node *m = neigh->irn;
580 int m_idx = get_irn_idx(m);
582 /* record the edge in only one direction */
587 /* skip ignore nodes */
588 if (arch_irn_is(env->aenv, m, ignore))
594 n2 = get_co_mst_irn(env, m);
595 if (n2->int_aff_neigh < 0) {
596 affinity_node_t *am = get_affinity_info(env->co, m);
597 n2->int_aff_neigh = count_interfering_aff_neighs(env, am);
600 * these weights are pure hackery ;-).
601 * It's not chriswue's fault but mine.
603 edge.weight = neigh->costs;
604 ARR_APP1(aff_edge_t, edges, edge);
610 /* now: sort edges and build the affinity chunks */
611 len = ARR_LEN(edges);
612 qsort(edges, len, sizeof(edges[0]), cmp_aff_edge);
613 for (i = 0; i < len; ++i) {
614 DBG((dbg, LEVEL_1, "edge (%u,%u) %f\n", edges[i].src->node_idx, edges[i].tgt->node_idx, edges[i].weight));
616 (void)aff_chunk_absorb(env, edges[i].src, edges[i].tgt);
619 /* now insert all chunks into a priority queue */
620 foreach_pset(env->chunkset, curr_chunk) {
621 aff_chunk_assure_weight(env, curr_chunk);
623 DBG((dbg, LEVEL_1, "entry #%d", curr_chunk->id));
624 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
625 DBG((dbg, LEVEL_1, "\n"));
627 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
629 foreach_phase_irn(&env->ph, n) {
630 co_mst_irn_t *mirn = get_co_mst_irn(env, n);
632 if (mirn->chunk == NULL) {
633 /* no chunk is allocated so far, do it now */
634 aff_chunk_t *curr_chunk = new_aff_chunk(env);
635 aff_chunk_add_node(curr_chunk, mirn);
637 aff_chunk_assure_weight(env, curr_chunk);
639 DBG((dbg, LEVEL_1, "entry #%d", curr_chunk->id));
640 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
641 DBG((dbg, LEVEL_1, "\n"));
643 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
650 static __attribute__((unused)) void chunk_order_nodes(co_mst_env_t *env, aff_chunk_t *chunk)
652 pqueue *grow = new_pqueue();
653 const ir_node *max_node = NULL;
657 for (i = ARR_LEN(chunk->n) - 1; i >= 0; i--) {
658 const ir_node *irn = chunk->n[i];
659 affinity_node_t *an = get_affinity_info(env->co, irn);
663 if (arch_irn_is(env->aenv, irn, ignore))
667 co_gs_foreach_neighb(an, neigh)
670 if (w > max_weight) {
678 bitset_t *visited = bitset_irg_malloc(env->co->irg);
680 for (i = ARR_LEN(chunk->n) - 1; i >= 0; --i)
681 bitset_add_irn(visited, chunk->n[i]);
683 pqueue_put(grow, (void *) max_node, max_weight);
684 bitset_remv_irn(visited, max_node);
686 while (!pqueue_empty(grow)) {
687 ir_node *irn = pqueue_get(grow);
688 affinity_node_t *an = get_affinity_info(env->co, irn);
691 if (arch_irn_is(env->aenv, irn, ignore))
694 assert(i <= ARR_LEN(chunk->n));
699 /* build the affinity edges */
700 co_gs_foreach_neighb(an, neigh) {
701 co_mst_irn_t *node = get_co_mst_irn(env, neigh->irn);
703 if (bitset_contains_irn(visited, node->irn)) {
704 pqueue_put(grow, (void *) neigh->irn, neigh->costs);
705 bitset_remv_irn(visited, node->irn);
711 bitset_free(visited);
716 * Greedy collect affinity neighbours into thew new chunk @p chunk starting at node @p node.
718 static void expand_chunk_from(co_mst_env_t *env, co_mst_irn_t *node, bitset_t *visited,
719 aff_chunk_t *chunk, aff_chunk_t *orig_chunk, decide_func_t *decider, int col)
721 waitq *nodes = new_waitq();
723 DBG((dbg, LEVEL_1, "\n\tExpanding new chunk (#%d) from %+F, color %d:", chunk->id, node->irn, col));
725 /* init queue and chunk */
726 waitq_put(nodes, node);
727 bitset_set(visited, get_irn_idx(node->irn));
728 aff_chunk_add_node(chunk, node);
729 DB((dbg, LEVEL_1, " %+F", node->irn));
731 /* as long as there are nodes in the queue */
732 while (! waitq_empty(nodes)) {
733 co_mst_irn_t *n = waitq_get(nodes);
734 affinity_node_t *an = get_affinity_info(env->co, n->irn);
736 /* check all affinity neighbors */
739 co_gs_foreach_neighb(an, neigh) {
740 const ir_node *m = neigh->irn;
741 int m_idx = get_irn_idx(m);
744 /* skip ignore nodes */
745 if (arch_irn_is(env->aenv, m, ignore))
748 n2 = get_co_mst_irn(env, m);
750 if (! bitset_is_set(visited, m_idx) &&
753 ! aff_chunk_interferes(env, chunk, m) &&
754 bitset_is_set(orig_chunk->nodes, m_idx))
757 following conditions are met:
758 - neighbour is not visited
759 - neighbour likes the color
760 - neighbour has not yet a fixed color
761 - the new chunk doesn't interfere with the neighbour
762 - neighbour belongs or belonged once to the original chunk
764 bitset_set(visited, m_idx);
765 aff_chunk_add_node(chunk, n2);
766 DB((dbg, LEVEL_1, " %+F", n2->irn));
767 /* enqueue for further search */
768 waitq_put(nodes, n2);
774 DB((dbg, LEVEL_1, "\n"));
780 * Fragment the given chunk into chunks having given color and not having given color.
782 static aff_chunk_t *fragment_chunk(co_mst_env_t *env, int col, aff_chunk_t *c, waitq *tmp) {
783 bitset_t *visited = bitset_irg_malloc(env->co->irg);
785 aff_chunk_t *best = NULL;
787 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
790 aff_chunk_t *tmp_chunk;
791 decide_func_t *decider;
795 if (bitset_is_set(visited, get_irn_idx(irn)))
798 node = get_co_mst_irn(env, irn);
800 if (get_mst_irn_col(node) == col) {
801 decider = decider_has_color;
803 DBG((dbg, LEVEL_4, "\tcolor %d wanted", col));
806 decider = decider_hasnot_color;
808 DBG((dbg, LEVEL_4, "\tcolor %d forbidden", col));
811 /* create a new chunk starting at current node */
812 tmp_chunk = new_aff_chunk(env);
813 waitq_put(tmp, tmp_chunk);
814 expand_chunk_from(env, node, visited, tmp_chunk, c, decider, col);
815 assert(bitset_popcnt(tmp_chunk->nodes) > 0 && "No nodes added to chunk");
817 /* remember the local best */
818 aff_chunk_assure_weight(env, tmp_chunk);
819 if (check_for_best && (! best || best->weight < tmp_chunk->weight))
823 assert(best && "No chunk found?");
824 bitset_free(visited);
829 * Resets the temporary fixed color of all nodes within wait queue @p nodes.
830 * ATTENTION: the queue is empty after calling this function!
832 static INLINE void reject_coloring(struct list_head *nodes) {
833 co_mst_irn_t *n, *temp;
834 DB((dbg, LEVEL_4, "\treject coloring for"));
835 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
836 DB((dbg, LEVEL_4, " %+F", n->irn));
837 assert(n->tmp_col >= 0);
839 list_del_init(&n->list);
841 DB((dbg, LEVEL_4, "\n"));
844 static INLINE void materialize_coloring(struct list_head *nodes) {
845 co_mst_irn_t *n, *temp;
846 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
847 assert(n->tmp_col >= 0);
850 list_del_init(&n->list);
854 static INLINE void set_temp_color(co_mst_irn_t *node, int col, struct list_head *changed)
857 assert(!node->fixed);
858 assert(node->tmp_col < 0);
859 assert(node->list.next == &node->list && node->list.prev == &node->list);
860 assert(bitset_is_set(node->adm_colors, col));
862 list_add_tail(&node->list, changed);
866 static INLINE int is_loose(co_mst_irn_t *node)
868 return !node->fixed && node->tmp_col < 0;
872 * Determines the costs for each color if it would be assigned to node @p node.
874 static void determine_color_costs(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs) {
875 int *neigh_cols = alloca(env->n_regs * sizeof(*neigh_cols));
880 for (i = 0; i < env->n_regs; ++i) {
883 costs[i].cost = bitset_is_set(node->adm_colors, i) ? node->constr_factor : 0.0;
886 for (i = 0; i < node->n_neighs; ++i) {
887 co_mst_irn_t *n = get_co_mst_irn(env, node->int_neighs[i]);
888 int col = get_mst_irn_col(n);
895 costs[col].cost = 0.0;
899 coeff = 1.0 / n_loose;
900 for (i = 0; i < env->n_regs; ++i)
901 costs[i].cost *= 1.0 - coeff * neigh_cols[i];
905 /* need forward declaration due to recursive call */
906 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed_ones, int depth, int *max_depth, int *trip);
909 * Tries to change node to a color but @p explude_col.
910 * @return 1 if succeeded, 0 otherwise.
912 static int change_node_color_excluded(co_mst_env_t *env, co_mst_irn_t *node, int exclude_col, struct list_head *changed, int depth, int *max_depth, int *trip) {
913 int col = get_mst_irn_col(node);
916 /* neighbours has already a different color -> good, temporary fix it */
917 if (col != exclude_col) {
919 set_temp_color(node, col, changed);
923 /* The node has the color it should not have _and_ has not been visited yet. */
924 if (is_loose(node)) {
925 col_cost_t *costs = alloca(env->n_regs * sizeof(costs[0]));
927 /* Get the costs for giving the node a specific color. */
928 determine_color_costs(env, node, costs);
930 /* Since the node must not have the not_col, set the costs for that color to "infinity" */
931 costs[exclude_col].cost = 0.0;
933 /* sort the colors according costs, cheapest first. */
934 qsort(costs, env->n_regs, sizeof(costs[0]), cmp_col_cost_gt);
936 /* Try recoloring the node using the color list. */
937 res = recolor_nodes(env, node, costs, changed, depth + 1, max_depth, trip);
944 * Tries to bring node @p node to cheapest color and color all interfering neighbours with other colors.
945 * ATTENTION: Expect @p costs already sorted by increasing costs.
946 * @return 1 if coloring could be applied, 0 otherwise.
948 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed, int depth, int *max_depth, int *trip) {
950 struct list_head local_changed;
953 if (depth > *max_depth)
956 if (depth >= recolor_limit)
959 DBG((dbg, LEVEL_4, "\tRecoloring %+F with color-costs", node->irn));
960 DBG_COL_COST(env, LEVEL_4, costs);
961 DB((dbg, LEVEL_4, "\n"));
963 for (i = 0; i < env->n_regs; ++i) {
964 int tgt_col = costs[i].col;
968 /* If the costs for that color (and all successive) are infinite, bail out we won't make it anyway. */
969 if (costs[i].cost == 0.0)
972 /* Set the new color of the node and mark the node as temporarily fixed. */
973 assert(node->tmp_col < 0 && "Node must not have been temporary fixed.");
974 INIT_LIST_HEAD(&local_changed);
975 set_temp_color(node, tgt_col, &local_changed);
976 DBG((dbg, LEVEL_4, "\tTemporary setting %+F to color %d\n", node->irn, tgt_col));
978 /* try to color all interfering neighbours with current color forbidden */
979 for (j = 0; j < node->n_neighs; ++j) {
983 neigh = node->int_neighs[j];
985 /* skip ignore nodes */
986 if (arch_irn_is(env->aenv, neigh, ignore))
989 nn = get_co_mst_irn(env, neigh);
990 DB((dbg, LEVEL_4, "\tHandling neighbour %+F, at position %d (fixed: %d, tmp_col: %d, col: %d)\n",
991 neigh, j, nn->fixed, nn->tmp_col, nn->col));
994 Try to change the color of the neighbor and record all nodes which
995 get changed in the tmp list. Add this list to the "changed" list for
996 that color. If we did not succeed to change the color of the neighbor,
997 we bail out and try the next color.
999 if (get_mst_irn_col(nn) == tgt_col) {
1000 /* try to color neighbour with tgt_col forbidden */
1001 neigh_ok = change_node_color_excluded(env, nn, tgt_col, &local_changed, depth + 1, max_depth, trip);
1009 We managed to assign the target color to all neighbors, so from the perspective
1010 of the current node, every thing was ok and we can return safely.
1013 /* append the local_changed ones to global ones */
1014 list_splice(&local_changed, changed);
1018 /* coloring of neighbours failed, so we try next color */
1019 reject_coloring(&local_changed);
1027 * Tries to bring node @p node and all it's neighbours to color @p tgt_col.
1028 * @return 1 if color @p col could be applied, 0 otherwise
1030 static int change_node_color(co_mst_env_t *env, co_mst_irn_t *node, int tgt_col, struct list_head *changed) {
1031 int col = get_mst_irn_col(node);
1033 /* if node already has the target color -> good, temporary fix it */
1034 if (col == tgt_col) {
1035 DBG((dbg, LEVEL_4, "\t\tCNC: %+F has already color %d, fix temporary\n", node->irn, tgt_col));
1037 set_temp_color(node, tgt_col, changed);
1042 Node has not yet a fixed color and target color is admissible
1043 -> try to recolor node and it's affinity neighbours
1045 if (is_loose(node) && bitset_is_set(node->adm_colors, tgt_col)) {
1046 col_cost_t *costs = env->single_cols[tgt_col];
1047 int res, max_depth, trip;
1052 DBG((dbg, LEVEL_4, "\t\tCNC: Attempt to recolor %+F ===>>\n", node->irn));
1053 res = recolor_nodes(env, node, costs, changed, 0, &max_depth, &trip);
1054 DBG((dbg, LEVEL_4, "\t\tCNC: <<=== Recoloring of %+F %s\n", node->irn, res ? "succeeded" : "failed"));
1055 stat_ev_int("heur4_recolor_depth_max", max_depth);
1056 stat_ev_int("heur4_recolor_trip", trip);
1062 #ifdef DEBUG_libfirm
1063 if (firm_dbg_get_mask(dbg) & LEVEL_4) {
1064 if (!is_loose(node))
1065 DB((dbg, LEVEL_4, "\t\tCNC: %+F has already fixed color %d\n", node->irn, col));
1067 DB((dbg, LEVEL_4, "\t\tCNC: color %d not admissible for %+F (", tgt_col, node->irn));
1068 dbg_admissible_colors(env, node);
1069 DB((dbg, LEVEL_4, ")\n"));
1078 * Tries to color an affinity chunk (or at least a part of it).
1079 * Inserts uncolored parts of the chunk as a new chunk into the priority queue.
1081 static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c) {
1082 aff_chunk_t *best_chunk = NULL;
1083 int n_nodes = ARR_LEN(c->n);
1084 int best_color = -1;
1085 int n_int_chunks = 0;
1086 waitq *tmp_chunks = new_waitq();
1087 waitq *best_starts = NULL;
1088 col_cost_t *order = alloca(env->n_regs * sizeof(order[0]));
1091 struct list_head changed;
1094 DB((dbg, LEVEL_2, "fragmentizing chunk #%d", c->id));
1095 DBG_AFF_CHUNK(env, LEVEL_2, c);
1096 DB((dbg, LEVEL_2, "\n"));
1098 stat_ev_ctx_push_fmt("heur4_color_chunk", "%d", c->id);
1100 ++env->chunk_visited;
1102 /* compute color preference */
1103 memset(order, 0, env->n_regs * sizeof(order[0]));
1105 bitset_foreach (c->interfere, pos) {
1106 ir_node *n = get_idx_irn(env->co->irg, pos);
1107 co_mst_irn_t *node = get_co_mst_irn(env, n);
1108 aff_chunk_t *chunk = node->chunk;
1110 if (is_loose(node) && chunk && chunk->visited < env->chunk_visited && !chunk->deleted) {
1111 chunk->visited = env->chunk_visited;
1114 aff_chunk_assure_weight(env, chunk);
1115 for (i = 0; i < env->n_regs; ++i)
1116 order[i].cost += chunk->color_affinity[i].cost;
1120 for (i = 0; i < env->n_regs; ++i) {
1121 double dislike = n_int_chunks > 0 ? 1.0 - order[i].cost / n_int_chunks : 0.0;
1123 order[i].cost = (1.0 - dislike_influence) * c->color_affinity[i].cost + dislike_influence * dislike;
1126 qsort(order, env->n_regs, sizeof(order[0]), cmp_col_cost_gt);
1128 DBG_COL_COST(env, LEVEL_2, order);
1129 DB((dbg, LEVEL_2, "\n"));
1131 /* check which color is the "best" for the given chunk.
1132 * if we found a color which was ok for all nodes, we take it
1133 * and do not look further. (see did_all flag usage below.)
1134 * If we have many colors which fit all nodes it is hard to decide
1135 * which one to take anyway.
1136 * TODO Sebastian: Perhaps we should at all nodes and figure out
1137 * a suitable color using costs as done above (determine_color_costs).
1139 for (i = 0; i < env->k; ++i) {
1140 int col = order[i].col;
1141 waitq *good_starts = new_waitq();
1142 aff_chunk_t *local_best;
1145 /* skip ignore colors */
1146 if (bitset_is_set(env->ignore_regs, col))
1149 DB((dbg, LEVEL_2, "\ttrying color %d\n", col));
1153 /* try to bring all nodes of given chunk to the current color. */
1154 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1155 const ir_node *irn = c->n[idx];
1156 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1159 assert(! node->fixed && "Node must not have a fixed color.");
1160 DB((dbg, LEVEL_4, "\t\tBringing %+F from color %d to color %d ...\n", irn, node->col, col));
1163 The order of the colored nodes is important, so we record the successfully
1164 colored ones in the order they appeared.
1166 INIT_LIST_HEAD(&changed);
1168 good = change_node_color(env, node, col, &changed);
1169 stat_ev_tim_pop("heur4_recolor");
1171 waitq_put(good_starts, node);
1172 materialize_coloring(&changed);
1177 reject_coloring(&changed);
1179 n_succeeded += good;
1180 DB((dbg, LEVEL_4, "\t\t... %+F attempt from %d to %d %s\n", irn, node->col, col, good ? "succeeded" : "failed"));
1183 /* unfix all nodes */
1184 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1185 co_mst_irn_t *node = get_co_mst_irn(env, c->n[idx]);
1189 /* try next color when failed */
1190 if (n_succeeded == 0)
1193 /* fragment the chunk according to the coloring */
1194 local_best = fragment_chunk(env, col, c, tmp_chunks);
1196 /* search the best of the good list
1197 and make it the new best if it is better than the current */
1199 aff_chunk_assure_weight(env, local_best);
1201 DB((dbg, LEVEL_3, "\t\tlocal best chunk (id %d) for color %d: ", local_best->id, col));
1202 DBG_AFF_CHUNK(env, LEVEL_3, local_best);
1204 if (! best_chunk || best_chunk->weight < local_best->weight) {
1205 best_chunk = local_best;
1208 del_waitq(best_starts);
1209 best_starts = good_starts;
1210 DB((dbg, LEVEL_3, "\n\t\t... setting global best chunk (id %d), color %d\n", best_chunk->id, best_color));
1212 DB((dbg, LEVEL_3, "\n\t\t... omitting, global best is better\n"));
1213 del_waitq(good_starts);
1217 del_waitq(good_starts);
1220 /* if all nodes were recolored, bail out */
1221 if (n_succeeded == n_nodes)
1225 stat_ev_int("heur4_colors_tried", i);
1227 /* free all intermediate created chunks except best one */
1228 while (! waitq_empty(tmp_chunks)) {
1229 aff_chunk_t *tmp = waitq_get(tmp_chunks);
1230 if (tmp != best_chunk)
1231 delete_aff_chunk(env, tmp);
1233 del_waitq(tmp_chunks);
1235 /* return if coloring failed */
1238 del_waitq(best_starts);
1242 DB((dbg, LEVEL_2, "\tbest chunk #%d ", best_chunk->id));
1243 DBG_AFF_CHUNK(env, LEVEL_2, best_chunk);
1244 DB((dbg, LEVEL_2, "using color %d\n", best_color));
1246 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1247 const ir_node *irn = best_chunk->n[idx];
1248 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1251 /* bring the node to the color. */
1252 DB((dbg, LEVEL_4, "\tManifesting color %d for %+F, chunk #%d\n", best_color, node->irn, best_chunk->id));
1253 INIT_LIST_HEAD(&changed);
1255 res = change_node_color(env, node, best_color, &changed);
1256 stat_ev_tim_pop("heur4_recolor");
1258 materialize_coloring(&changed);
1261 assert(list_empty(&changed));
1264 /* remove the nodes in best chunk from original chunk */
1265 bitset_andnot(c->nodes, best_chunk->nodes);
1266 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1267 const ir_node *irn = c->n[idx];
1269 if (bitset_is_set(best_chunk->nodes, get_irn_idx(irn))) {
1270 int last = ARR_LEN(c->n) - 1;
1272 c->n[idx] = c->n[last];
1273 ARR_SHRINKLEN(c->n, last);
1278 /* we have to get the nodes back into the original chunk because they are scattered over temporary chunks */
1279 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1280 const ir_node *n = c->n[idx];
1281 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1285 /* fragment the remaining chunk */
1286 visited = bitset_irg_malloc(env->co->irg);
1287 bitset_or(visited, best_chunk->nodes);
1288 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1289 const ir_node *irn = c->n[idx];
1290 if (! bitset_is_set(visited, get_irn_idx(irn))) {
1291 aff_chunk_t *new_chunk = new_aff_chunk(env);
1292 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1294 expand_chunk_from(env, node, visited, new_chunk, c, decider_always_yes, 0);
1295 aff_chunk_assure_weight(env, new_chunk);
1296 pqueue_put(env->chunks, new_chunk, new_chunk->weight);
1300 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1301 const ir_node *n = best_chunk->n[idx];
1302 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1306 /* clear obsolete chunks and free some memory */
1307 delete_aff_chunk(env, best_chunk);
1308 bitset_free(visited);
1310 del_waitq(best_starts);
1312 stat_ev_ctx_pop("heur4_color_chunk");
1316 * Main driver for mst safe coalescing algorithm.
1318 int co_solve_heuristic_mst(copy_opt_t *co) {
1319 unsigned n_regs = co->cls->n_regs;
1320 bitset_t *ignore_regs = bitset_alloca(n_regs);
1323 co_mst_env_t mst_env;
1328 phase_init(&mst_env.ph, "co_mst", co->irg, PHASE_DEFAULT_GROWTH, co_mst_irn_init, &mst_env);
1330 k = be_put_ignore_regs(co->cenv->birg, co->cls, ignore_regs);
1333 mst_env.n_regs = n_regs;
1335 mst_env.chunks = new_pqueue();
1337 mst_env.ignore_regs = ignore_regs;
1338 mst_env.ifg = co->cenv->ifg;
1339 mst_env.aenv = co->aenv;
1340 mst_env.chunkset = pset_new_ptr(512);
1341 mst_env.chunk_visited = 0;
1342 mst_env.single_cols = phase_alloc(&mst_env.ph, sizeof(*mst_env.single_cols) * n_regs);
1344 for (i = 0; i < n_regs; ++i) {
1345 col_cost_t *vec = phase_alloc(&mst_env.ph, sizeof(*vec) * n_regs);
1347 mst_env.single_cols[i] = vec;
1348 for (j = 0; j < n_regs; ++j) {
1357 DBG((dbg, LEVEL_1, "==== Coloring %+F, class %s ====\n", co->irg, co->cls->name));
1359 /* build affinity chunks */
1361 build_affinity_chunks(&mst_env);
1362 stat_ev_tim_pop("heur4_initial_chunk");
1364 /* color chunks as long as there are some */
1365 while (! pqueue_empty(mst_env.chunks)) {
1366 aff_chunk_t *chunk = pqueue_get(mst_env.chunks);
1368 color_aff_chunk(&mst_env, chunk);
1369 DB((dbg, LEVEL_4, "<<<====== Coloring chunk (%d) done\n", chunk->id));
1370 delete_aff_chunk(&mst_env, chunk);
1373 /* apply coloring */
1374 foreach_phase_irn(&mst_env.ph, irn) {
1375 co_mst_irn_t *mirn = get_co_mst_irn(&mst_env, irn);
1376 const arch_register_t *reg;
1378 if (arch_irn_is(mst_env.aenv, irn, ignore))
1381 // assert(mirn->fixed && "Node should have fixed color");
1383 /* skip nodes where color hasn't changed */
1384 if (mirn->init_col == mirn->col)
1387 reg = arch_register_for_index(co->cls, mirn->col);
1388 arch_set_irn_register(co->aenv, irn, reg);
1389 DB((dbg, LEVEL_1, "%+F set color from %d to %d\n", irn, mirn->init_col, mirn->col));
1392 /* free allocated memory */
1393 del_pqueue(mst_env.chunks);
1394 phase_free(&mst_env.ph);
1395 del_pset(mst_env.chunkset);
1397 stat_ev_tim_pop("heur4_total");
1402 static const lc_opt_table_entry_t options[] = {
1403 LC_OPT_ENT_INT ("limit", "limit recoloring", &recolor_limit),
1404 LC_OPT_ENT_DBL ("di", "dislike influence", &dislike_influence),
1409 void be_init_copyheur4(void) {
1410 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
1411 lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
1412 lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
1413 lc_opt_entry_t *co_grp = lc_opt_get_grp(chordal_grp, "co");
1414 lc_opt_entry_t *heur4_grp = lc_opt_get_grp(co_grp, "heur4");
1416 lc_opt_add_table(heur4_grp, options);
1417 FIRM_DBG_REGISTER(dbg, "firm.be.co.heur4");
1421 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur4);