2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Simple copy minimization heuristics.
23 * @author Christian Wuerdig
27 * This is the C implementation of the mst algorithm
28 * originally written in Java by Sebastian Hack.
29 * (also known as "heur3" :)
30 * Performs simple copy minimization.
34 #endif /* HAVE_CONFIG_H */
41 #include "raw_bitset.h"
42 #include "irphase_t.h"
58 #include "becopyopt_t.h"
62 #define COL_COST_INFEASIBLE DBL_MAX
63 #define AFF_NEIGHBOUR_FIX_BENEFIT 128.0
64 #define NEIGHBOUR_CONSTR_COSTS 64.0
69 #define DBG_AFF_CHUNK(env, level, chunk) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_aff_chunk((env), (chunk)); } while(0)
70 #define DBG_COL_COST(env, level, cost) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_col_cost((env), (cost)); } while(0)
72 static firm_dbg_module_t *dbg = NULL;
76 #define DBG_AFF_CHUNK(env, level, chunk)
77 #define DBG_COL_COST(env, level, cost)
81 static int last_chunk_id = 0;
82 static int recolor_limit = 4;
83 static double dislike_influence = 0.1;
85 typedef struct _col_cost_t {
93 typedef struct _aff_chunk_t {
94 const ir_node **n; /**< An ARR_F containing all nodes of the chunk. */
95 bitset_t *nodes; /**< A bitset containing all nodes inside this chunk. */
96 bitset_t *interfere; /**< A bitset containing all interfering neighbours of the nodes in this chunk. */
97 int weight; /**< Weight of this chunk */
98 unsigned weight_consistent : 1; /**< Set if the weight is consistent. */
99 unsigned deleted : 1; /**< For debugging: Set if the was deleted. */
100 int id; /**< An id of this chunk. */
102 col_cost_t *color_affinity;
108 typedef struct _aff_edge_t {
109 const ir_node *src; /**< Source node. */
110 const ir_node *tgt; /**< Target node. */
111 double weight; /**< The weight of this edge. */
114 /* main coalescing environment */
115 typedef struct _co_mst_env_t {
116 int n_regs; /**< number of regs in class */
117 int k; /**< number of non-ignore registers in class */
118 bitset_t *ignore_regs; /**< set containing all global ignore registers */
119 ir_phase ph; /**< phase object holding data for nodes */
120 pqueue *chunks; /**< priority queue for chunks */
121 pset *chunkset; /**< set holding all chunks */
122 be_ifg_t *ifg; /**< the interference graph */
123 const arch_env_t *aenv; /**< the arch environment */
124 copy_opt_t *co; /**< the copy opt object */
126 col_cost_t **single_cols;
129 /* stores coalescing related information for a node */
130 typedef struct _co_mst_irn_t {
131 const ir_node *irn; /**< the irn this information belongs to */
132 aff_chunk_t *chunk; /**< the chunk this irn belongs to */
133 bitset_t *adm_colors; /**< set of admissible colors for this irn */
134 ir_node **int_neighs; /**< array of all interfering neighbours (cached for speed reasons) */
135 int n_neighs; /**< length of the interfering neighbours array. */
136 int int_aff_neigh; /**< number of interfering affinity neighbours */
137 int col; /**< color currently assigned */
138 int init_col; /**< the initial color */
139 int tmp_col; /**< a temporary assigned color */
140 unsigned fixed : 1; /**< the color is fixed */
141 struct list_head list; /**< Queue for coloring undo. */
142 double constr_factor;
145 #define get_co_mst_irn(mst_env, irn) (phase_get_or_set_irn_data(&(mst_env)->ph, (irn)))
147 typedef int decide_func_t(const co_mst_irn_t *node, int col);
152 * Write a chunk to stderr for debugging.
154 static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c) {
156 if (c->weight_consistent)
157 ir_fprintf(stderr, " $%d ", c->weight);
158 ir_fprintf(stderr, "{");
159 bitset_foreach(c->nodes, idx) {
160 ir_node *n = get_idx_irn(env->co->irg, idx);
161 ir_fprintf(stderr, " %+F,", n);
163 ir_fprintf(stderr, "}");
167 * Dump all admissible colors to stderr.
169 static void dbg_admissible_colors(const co_mst_env_t *env, const co_mst_irn_t *node) {
173 if (bitset_popcnt(node->adm_colors) < 1)
174 fprintf(stderr, "no admissible colors?!?");
176 bitset_foreach(node->adm_colors, idx)
177 fprintf(stderr, " %d", idx);
182 * Dump color-cost pairs to stderr.
184 static void dbg_col_cost(const co_mst_env_t *env, const col_cost_t *cost) {
186 for (i = 0; i < env->n_regs; ++i)
187 fprintf(stderr, " (%d, %.4f)", cost[i].col, cost[i].cost);
190 #endif /* DEBUG_libfirm */
192 static INLINE int get_mst_irn_col(const co_mst_irn_t *node) {
193 return node->tmp_col >= 0 ? node->tmp_col : node->col;
197 * @return 1 if node @p node has color @p col, 0 otherwise.
199 static int decider_has_color(const co_mst_irn_t *node, int col) {
200 return get_mst_irn_col(node) == col;
204 * @return 1 if node @p node has not color @p col, 0 otherwise.
206 static int decider_hasnot_color(const co_mst_irn_t *node, int col) {
207 return get_mst_irn_col(node) != col;
211 * Always returns true.
213 static int decider_always_yes(const co_mst_irn_t *node, int col) {
219 /** compares two affinity edges by its weight */
220 static int cmp_aff_edge(const void *a, const void *b) {
221 const aff_edge_t *e1 = a;
222 const aff_edge_t *e2 = b;
224 if (e2->weight == e1->weight) {
225 if (e2->src->node_idx == e1->src->node_idx)
226 return QSORT_CMP(e2->tgt->node_idx, e1->tgt->node_idx);
228 return QSORT_CMP(e2->src->node_idx, e1->src->node_idx);
230 /* sort in descending order */
231 return QSORT_CMP(e2->weight, e1->weight);
234 /** compares to color-cost pairs */
235 static __attribute__((unused)) int cmp_col_cost_lt(const void *a, const void *b) {
236 const col_cost_t *c1 = a;
237 const col_cost_t *c2 = b;
238 double diff = c1->cost - c2->cost;
239 return (diff > 0) - (diff < 0);
242 static int cmp_col_cost_gt(const void *a, const void *b) {
243 const col_cost_t *c1 = a;
244 const col_cost_t *c2 = b;
245 double diff = c2->cost - c1->cost;
246 return (diff > 0) - (diff < 0);
250 * Creates a new affinity chunk
252 static INLINE aff_chunk_t *new_aff_chunk(co_mst_env_t *env) {
253 aff_chunk_t *c = xmalloc(sizeof(*c));
255 c->weight_consistent = 0;
256 c->n = NEW_ARR_F(const ir_node *, 0);
257 c->nodes = bitset_irg_malloc(env->co->irg);
258 c->interfere = bitset_irg_malloc(env->co->irg);
259 c->color_affinity = xmalloc(env->n_regs * sizeof(c->color_affinity[0]));
261 c->id = last_chunk_id++;
263 pset_insert(env->chunkset, c, c->id);
268 * Frees all memory allocated by an affinity chunk.
270 static INLINE void delete_aff_chunk(co_mst_env_t *env, aff_chunk_t *c) {
271 pset_remove(env->chunkset, c, c->id);
272 bitset_free(c->nodes);
273 bitset_free(c->interfere);
274 xfree(c->color_affinity);
281 * Adds a node to an affinity chunk
283 static INLINE void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node) {
286 if (bitset_is_set(c->nodes, get_irn_idx(node->irn)))
289 c->weight_consistent = 0;
291 bitset_set(c->nodes, get_irn_idx(node->irn));
293 ARR_APP1(ir_node *, c->n, node->irn);
295 for (i = node->n_neighs - 1; i >= 0; --i) {
296 ir_node *neigh = node->int_neighs[i];
297 bitset_set(c->interfere, get_irn_idx(neigh));
302 * In case there is no phase information for irn, initialize it.
304 static void *co_mst_irn_init(ir_phase *ph, const ir_node *irn, void *old) {
305 co_mst_irn_t *res = old ? old : phase_alloc(ph, sizeof(res[0]));
306 co_mst_env_t *env = ph->priv;
309 const arch_register_req_t *req;
310 void *nodes_it = be_ifg_nodes_iter_alloca(env->ifg);
318 res->int_neighs = NULL;
319 res->int_aff_neigh = 0;
320 res->col = arch_register_get_index(arch_get_irn_register(env->aenv, irn));
321 res->init_col = res->col;
322 INIT_LIST_HEAD(&res->list);
324 DB((dbg, LEVEL_4, "Creating phase info for %+F\n", irn));
326 /* set admissible registers */
327 res->adm_colors = bitset_obstack_alloc(phase_obst(ph), env->n_regs);
329 /* Exclude colors not assignable to the irn */
330 req = arch_get_register_req(env->aenv, irn, -1);
331 if (arch_register_req_is(req, limited))
332 rbitset_copy_to_bitset(req->limited, res->adm_colors);
334 bitset_set_all(res->adm_colors);
336 /* exclude global ignore registers as well */
337 bitset_andnot(res->adm_colors, env->ignore_regs);
339 /* compute the constraint factor */
340 res->constr_factor = (double) (1 + env->n_regs - bitset_popcnt(res->adm_colors)) / env->n_regs;
342 /* set the number of interfering affinity neighbours to -1, they are calculated later */
343 res->int_aff_neigh = -1;
345 /* build list of interfering neighbours */
347 be_ifg_foreach_neighbour(env->ifg, nodes_it, irn, neigh) {
348 if (! arch_irn_is(env->aenv, neigh, ignore)) {
349 obstack_ptr_grow(phase_obst(ph), neigh);
353 res->int_neighs = obstack_finish(phase_obst(ph));
360 * Check if affinity chunk @p chunk interferes with node @p irn.
362 static INLINE int aff_chunk_interferes(co_mst_env_t *env, const aff_chunk_t *chunk, const ir_node *irn) {
364 return bitset_is_set(chunk->interfere, get_irn_idx(irn));
368 * Check if there are interference edges from c1 to c2.
369 * @param env The global co_mst environment
371 * @param c2 Another chunk
372 * @return 1 if there are interferences between nodes of c1 and c2, 0 otherwise.
374 static INLINE int aff_chunks_interfere(co_mst_env_t *env, const aff_chunk_t *c1, const aff_chunk_t *c2) {
379 /* check if there is a node in c2 having an interfering neighbor in c1 */
380 return bitset_intersect(c1->interfere, c2->nodes);
384 * Returns the affinity chunk of @p irn or creates a new
385 * one with @p irn as element if there is none assigned.
387 static INLINE aff_chunk_t *get_aff_chunk(co_mst_env_t *env, const ir_node *irn) {
388 co_mst_irn_t *node = get_co_mst_irn(env, irn);
393 * Let chunk(src) absorb the nodes of chunk(tgt) (only possible when there
394 * are no interference edges from chunk(src) to chunk(tgt)).
395 * @return 1 if successful, 0 if not possible
397 static int aff_chunk_absorb(co_mst_env_t *env, const ir_node *src, const ir_node *tgt) {
398 aff_chunk_t *c1 = get_aff_chunk(env, src);
399 aff_chunk_t *c2 = get_aff_chunk(env, tgt);
402 DB((dbg, LEVEL_4, "Attempt to let c1 (id %d): ", c1 ? c1->id : -1));
404 DBG_AFF_CHUNK(env, LEVEL_4, c1);
406 DB((dbg, LEVEL_4, "{%+F}", src));
408 DB((dbg, LEVEL_4, "\n\tabsorb c2 (id %d): ", c2 ? c2->id : -1));
410 DBG_AFF_CHUNK(env, LEVEL_4, c2);
412 DB((dbg, LEVEL_4, "{%+F}", tgt));
414 DB((dbg, LEVEL_4, "\n"));
419 /* no chunk exists */
420 co_mst_irn_t *mirn = get_co_mst_irn(env, src);
423 for (i = mirn->n_neighs - 1; i >= 0; --i) {
424 if (mirn->int_neighs[i] == tgt)
428 /* create one containing both nodes */
429 c1 = new_aff_chunk(env);
430 aff_chunk_add_node(c1, get_co_mst_irn(env, src));
431 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
435 /* c2 already exists */
436 if (! aff_chunk_interferes(env, c2, src)) {
437 aff_chunk_add_node(c2, get_co_mst_irn(env, src));
441 } else if (c2 == NULL) {
442 /* c1 already exists */
443 if (! aff_chunk_interferes(env, c1, tgt)) {
444 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
447 } else if (c1 != c2 && ! aff_chunks_interfere(env, c1, c2)) {
450 for (idx = 0, len = ARR_LEN(c2->n); idx < len; ++idx)
451 aff_chunk_add_node(c1, get_co_mst_irn(env, c2->n[idx]));
453 bitset_or(c1->interfere, c2->interfere);
454 c1->weight_consistent = 0;
456 delete_aff_chunk(env, c2);
459 DB((dbg, LEVEL_4, " ... c1 interferes with c2, skipped\n"));
463 DB((dbg, LEVEL_4, " ... absorbed\n"));
468 * Assures that the weight of the given chunk is consistent.
470 static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c) {
471 if (! c->weight_consistent) {
475 for (i = 0; i < env->n_regs; ++i) {
476 c->color_affinity[i].col = i;
477 c->color_affinity[i].cost = 0.0;
480 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
481 const ir_node *n = c->n[idx];
482 const affinity_node_t *an = get_affinity_info(env->co, n);
483 co_mst_irn_t *node = get_co_mst_irn(env, n);
486 if (node->constr_factor > 0.0) {
488 bitset_foreach (node->adm_colors, col)
489 c->color_affinity[col].cost += node->constr_factor;
494 co_gs_foreach_neighb(an, neigh) {
495 const ir_node *m = neigh->irn;
496 const int m_idx = get_irn_idx(m);
498 /* skip ignore nodes */
499 if (arch_irn_is(env->aenv, m, ignore))
502 w += bitset_is_set(c->nodes, m_idx) ? neigh->costs : 0;
507 for (i = 0; i < env->n_regs; ++i)
508 c->color_affinity[i].cost *= (1.0 / ARR_LEN(c->n));
511 // c->weight = bitset_popcnt(c->nodes);
512 c->weight_consistent = 1;
517 * Count the number of interfering affinity neighbours
519 static int count_interfering_aff_neighs(co_mst_env_t *env, const affinity_node_t *an) {
520 const neighb_t *neigh;
521 const ir_node *irn = an->irn;
522 const co_mst_irn_t *node = get_co_mst_irn(env, irn);
525 co_gs_foreach_neighb(an, neigh) {
526 const ir_node *n = neigh->irn;
529 /* skip ignore nodes */
530 if (arch_irn_is(env->aenv, n, ignore))
533 /* check if the affinity neighbour interfere */
534 for (i = 0; i < node->n_neighs; ++i) {
535 if (node->int_neighs[i] == n) {
546 * Build chunks of nodes connected by affinity edges.
547 * We start at the heaviest affinity edge.
548 * The chunks of the two edge-defining nodes will be
549 * merged if there are no interference edges from one
550 * chunk to the other.
552 static void build_affinity_chunks(co_mst_env_t *env) {
553 void *nodes_it = be_ifg_nodes_iter_alloca(env->ifg);
554 aff_edge_t *edges = NEW_ARR_F(aff_edge_t, 0);
557 aff_chunk_t *curr_chunk;
559 /* at first we create the affinity edge objects */
560 be_ifg_foreach_node(env->ifg, nodes_it, n) {
561 int n_idx = get_irn_idx(n);
565 /* skip ignore nodes */
566 if (arch_irn_is(env->aenv, n, ignore))
569 n1 = get_co_mst_irn(env, n);
570 an = get_affinity_info(env->co, n);
575 if (n1->int_aff_neigh < 0)
576 n1->int_aff_neigh = count_interfering_aff_neighs(env, an);
578 /* build the affinity edges */
579 co_gs_foreach_neighb(an, neigh) {
580 const ir_node *m = neigh->irn;
581 int m_idx = get_irn_idx(m);
583 /* record the edge in only one direction */
588 /* skip ignore nodes */
589 if (arch_irn_is(env->aenv, m, ignore))
595 n2 = get_co_mst_irn(env, m);
596 if (n2->int_aff_neigh < 0) {
597 affinity_node_t *am = get_affinity_info(env->co, m);
598 n2->int_aff_neigh = count_interfering_aff_neighs(env, am);
601 * these weights are pure hackery ;-).
602 * It's not chriswue's fault but mine.
604 edge.weight = neigh->costs;
605 ARR_APP1(aff_edge_t, edges, edge);
611 /* now: sort edges and build the affinity chunks */
612 len = ARR_LEN(edges);
613 qsort(edges, len, sizeof(edges[0]), cmp_aff_edge);
614 for (i = 0; i < len; ++i) {
615 DBG((dbg, LEVEL_1, "edge (%u,%u) %f\n", edges[i].src->node_idx, edges[i].tgt->node_idx, edges[i].weight));
617 (void)aff_chunk_absorb(env, edges[i].src, edges[i].tgt);
620 /* now insert all chunks into a priority queue */
621 foreach_pset(env->chunkset, curr_chunk) {
622 aff_chunk_assure_weight(env, curr_chunk);
624 DBG((dbg, LEVEL_1, "entry #%d", curr_chunk->id));
625 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
626 DBG((dbg, LEVEL_1, "\n"));
628 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
630 foreach_phase_irn(&env->ph, n) {
631 co_mst_irn_t *mirn = get_co_mst_irn(env, n);
633 if (mirn->chunk == NULL) {
634 /* no chunk is allocated so far, do it now */
635 aff_chunk_t *curr_chunk = new_aff_chunk(env);
636 aff_chunk_add_node(curr_chunk, mirn);
638 aff_chunk_assure_weight(env, curr_chunk);
640 DBG((dbg, LEVEL_1, "entry #%d", curr_chunk->id));
641 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
642 DBG((dbg, LEVEL_1, "\n"));
644 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
651 static __attribute__((unused)) void chunk_order_nodes(co_mst_env_t *env, aff_chunk_t *chunk)
653 pqueue *grow = new_pqueue();
654 const ir_node *max_node = NULL;
658 for (i = ARR_LEN(chunk->n) - 1; i >= 0; i--) {
659 const ir_node *irn = chunk->n[i];
660 affinity_node_t *an = get_affinity_info(env->co, irn);
664 if (arch_irn_is(env->aenv, irn, ignore))
668 co_gs_foreach_neighb(an, neigh)
671 if (w > max_weight) {
679 bitset_t *visited = bitset_irg_malloc(env->co->irg);
681 for (i = ARR_LEN(chunk->n) - 1; i >= 0; --i)
682 bitset_add_irn(visited, chunk->n[i]);
684 pqueue_put(grow, (void *) max_node, max_weight);
685 bitset_remv_irn(visited, max_node);
687 while (!pqueue_empty(grow)) {
688 ir_node *irn = pqueue_get(grow);
689 affinity_node_t *an = get_affinity_info(env->co, irn);
692 if (arch_irn_is(env->aenv, irn, ignore))
695 assert(i <= ARR_LEN(chunk->n));
700 /* build the affinity edges */
701 co_gs_foreach_neighb(an, neigh) {
702 co_mst_irn_t *node = get_co_mst_irn(env, neigh->irn);
704 if (bitset_contains_irn(visited, node->irn)) {
705 pqueue_put(grow, (void *) neigh->irn, neigh->costs);
706 bitset_remv_irn(visited, node->irn);
712 bitset_free(visited);
717 * Greedy collect affinity neighbours into thew new chunk @p chunk starting at node @p node.
719 static void expand_chunk_from(co_mst_env_t *env, co_mst_irn_t *node, bitset_t *visited,
720 aff_chunk_t *chunk, aff_chunk_t *orig_chunk, decide_func_t *decider, int col)
722 waitq *nodes = new_waitq();
724 DBG((dbg, LEVEL_1, "\n\tExpanding new chunk (#%d) from %+F, color %d:", chunk->id, node->irn, col));
726 /* init queue and chunk */
727 waitq_put(nodes, node);
728 bitset_set(visited, get_irn_idx(node->irn));
729 aff_chunk_add_node(chunk, node);
730 DB((dbg, LEVEL_1, " %+F", node->irn));
732 /* as long as there are nodes in the queue */
733 while (! waitq_empty(nodes)) {
734 co_mst_irn_t *n = waitq_get(nodes);
735 affinity_node_t *an = get_affinity_info(env->co, n->irn);
737 /* check all affinity neighbors */
740 co_gs_foreach_neighb(an, neigh) {
741 const ir_node *m = neigh->irn;
742 int m_idx = get_irn_idx(m);
745 /* skip ignore nodes */
746 if (arch_irn_is(env->aenv, m, ignore))
749 n2 = get_co_mst_irn(env, m);
751 if (! bitset_is_set(visited, m_idx) &&
754 ! aff_chunk_interferes(env, chunk, m) &&
755 bitset_is_set(orig_chunk->nodes, m_idx))
758 following conditions are met:
759 - neighbour is not visited
760 - neighbour likes the color
761 - neighbour has not yet a fixed color
762 - the new chunk doesn't interfere with the neighbour
763 - neighbour belongs or belonged once to the original chunk
765 bitset_set(visited, m_idx);
766 aff_chunk_add_node(chunk, n2);
767 DB((dbg, LEVEL_1, " %+F", n2->irn));
768 /* enqueue for further search */
769 waitq_put(nodes, n2);
775 DB((dbg, LEVEL_1, "\n"));
781 * Fragment the given chunk into chunks having given color and not having given color.
783 static aff_chunk_t *fragment_chunk(co_mst_env_t *env, int col, aff_chunk_t *c, waitq *tmp) {
784 bitset_t *visited = bitset_irg_malloc(env->co->irg);
786 aff_chunk_t *best = NULL;
788 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
791 aff_chunk_t *tmp_chunk;
792 decide_func_t *decider;
796 if (bitset_is_set(visited, get_irn_idx(irn)))
799 node = get_co_mst_irn(env, irn);
801 if (get_mst_irn_col(node) == col) {
802 decider = decider_has_color;
804 DBG((dbg, LEVEL_4, "\tcolor %d wanted", col));
807 decider = decider_hasnot_color;
809 DBG((dbg, LEVEL_4, "\tcolor %d forbidden", col));
812 /* create a new chunk starting at current node */
813 tmp_chunk = new_aff_chunk(env);
814 waitq_put(tmp, tmp_chunk);
815 expand_chunk_from(env, node, visited, tmp_chunk, c, decider, col);
816 assert(bitset_popcnt(tmp_chunk->nodes) > 0 && "No nodes added to chunk");
818 /* remember the local best */
819 aff_chunk_assure_weight(env, tmp_chunk);
820 if (check_for_best && (! best || best->weight < tmp_chunk->weight))
824 assert(best && "No chunk found?");
825 bitset_free(visited);
830 * Resets the temporary fixed color of all nodes within wait queue @p nodes.
831 * ATTENTION: the queue is empty after calling this function!
833 static INLINE void reject_coloring(struct list_head *nodes) {
834 co_mst_irn_t *n, *temp;
835 DB((dbg, LEVEL_4, "\treject coloring for"));
836 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
837 DB((dbg, LEVEL_4, " %+F", n->irn));
838 assert(n->tmp_col >= 0);
840 list_del_init(&n->list);
842 DB((dbg, LEVEL_4, "\n"));
845 static INLINE void materialize_coloring(struct list_head *nodes) {
846 co_mst_irn_t *n, *temp;
847 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
848 assert(n->tmp_col >= 0);
851 list_del_init(&n->list);
855 static INLINE void set_temp_color(co_mst_irn_t *node, int col, struct list_head *changed)
858 assert(!node->fixed);
859 assert(node->tmp_col < 0);
860 assert(node->list.next == &node->list && node->list.prev == &node->list);
861 assert(bitset_is_set(node->adm_colors, col));
863 list_add_tail(&node->list, changed);
867 static INLINE int is_loose(co_mst_irn_t *node)
869 return !node->fixed && node->tmp_col < 0;
873 * Determines the costs for each color if it would be assigned to node @p node.
875 static void determine_color_costs(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs) {
876 int *neigh_cols = alloca(env->n_regs * sizeof(*neigh_cols));
881 for (i = 0; i < env->n_regs; ++i) {
884 costs[i].cost = bitset_is_set(node->adm_colors, i) ? node->constr_factor : 0.0;
887 for (i = 0; i < node->n_neighs; ++i) {
888 co_mst_irn_t *n = get_co_mst_irn(env, node->int_neighs[i]);
889 int col = get_mst_irn_col(n);
896 costs[col].cost = 0.0;
900 coeff = 1.0 / n_loose;
901 for (i = 0; i < env->n_regs; ++i)
902 costs[i].cost *= 1.0 - coeff * neigh_cols[i];
906 /* need forward declaration due to recursive call */
907 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed_ones, int depth, int *max_depth, int *trip);
910 * Tries to change node to a color but @p explude_col.
911 * @return 1 if succeeded, 0 otherwise.
913 static int change_node_color_excluded(co_mst_env_t *env, co_mst_irn_t *node, int exclude_col, struct list_head *changed, int depth, int *max_depth, int *trip) {
914 int col = get_mst_irn_col(node);
917 /* neighbours has already a different color -> good, temporary fix it */
918 if (col != exclude_col) {
920 set_temp_color(node, col, changed);
924 /* The node has the color it should not have _and_ has not been visited yet. */
925 if (is_loose(node)) {
926 col_cost_t *costs = alloca(env->n_regs * sizeof(costs[0]));
928 /* Get the costs for giving the node a specific color. */
929 determine_color_costs(env, node, costs);
931 /* Since the node must not have the not_col, set the costs for that color to "infinity" */
932 costs[exclude_col].cost = 0.0;
934 /* sort the colors according costs, cheapest first. */
935 qsort(costs, env->n_regs, sizeof(costs[0]), cmp_col_cost_gt);
937 /* Try recoloring the node using the color list. */
938 res = recolor_nodes(env, node, costs, changed, depth + 1, max_depth, trip);
945 * Tries to bring node @p node to cheapest color and color all interfering neighbours with other colors.
946 * ATTENTION: Expect @p costs already sorted by increasing costs.
947 * @return 1 if coloring could be applied, 0 otherwise.
949 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed, int depth, int *max_depth, int *trip) {
951 struct list_head local_changed;
954 if (depth > *max_depth)
957 if (depth >= recolor_limit)
960 DBG((dbg, LEVEL_4, "\tRecoloring %+F with color-costs", node->irn));
961 DBG_COL_COST(env, LEVEL_4, costs);
962 DB((dbg, LEVEL_4, "\n"));
964 for (i = 0; i < env->n_regs; ++i) {
965 int tgt_col = costs[i].col;
969 /* If the costs for that color (and all successive) are infinite, bail out we won't make it anyway. */
970 if (costs[i].cost == 0.0)
973 /* Set the new color of the node and mark the node as temporarily fixed. */
974 assert(node->tmp_col < 0 && "Node must not have been temporary fixed.");
975 INIT_LIST_HEAD(&local_changed);
976 set_temp_color(node, tgt_col, &local_changed);
977 DBG((dbg, LEVEL_4, "\tTemporary setting %+F to color %d\n", node->irn, tgt_col));
979 /* try to color all interfering neighbours with current color forbidden */
980 for (j = 0; j < node->n_neighs; ++j) {
984 neigh = node->int_neighs[j];
986 /* skip ignore nodes */
987 if (arch_irn_is(env->aenv, neigh, ignore))
990 nn = get_co_mst_irn(env, neigh);
991 DB((dbg, LEVEL_4, "\tHandling neighbour %+F, at position %d (fixed: %d, tmp_col: %d, col: %d)\n",
992 neigh, j, nn->fixed, nn->tmp_col, nn->col));
995 Try to change the color of the neighbor and record all nodes which
996 get changed in the tmp list. Add this list to the "changed" list for
997 that color. If we did not succeed to change the color of the neighbor,
998 we bail out and try the next color.
1000 if (get_mst_irn_col(nn) == tgt_col) {
1001 /* try to color neighbour with tgt_col forbidden */
1002 neigh_ok = change_node_color_excluded(env, nn, tgt_col, &local_changed, depth + 1, max_depth, trip);
1010 We managed to assign the target color to all neighbors, so from the perspective
1011 of the current node, every thing was ok and we can return safely.
1014 /* append the local_changed ones to global ones */
1015 list_splice(&local_changed, changed);
1019 /* coloring of neighbours failed, so we try next color */
1020 reject_coloring(&local_changed);
1028 * Tries to bring node @p node and all it's neighbours to color @p tgt_col.
1029 * @return 1 if color @p col could be applied, 0 otherwise
1031 static int change_node_color(co_mst_env_t *env, co_mst_irn_t *node, int tgt_col, struct list_head *changed) {
1032 int col = get_mst_irn_col(node);
1034 /* if node already has the target color -> good, temporary fix it */
1035 if (col == tgt_col) {
1036 DBG((dbg, LEVEL_4, "\t\tCNC: %+F has already color %d, fix temporary\n", node->irn, tgt_col));
1038 set_temp_color(node, tgt_col, changed);
1043 Node has not yet a fixed color and target color is admissible
1044 -> try to recolor node and it's affinity neighbours
1046 if (is_loose(node) && bitset_is_set(node->adm_colors, tgt_col)) {
1047 col_cost_t *costs = env->single_cols[tgt_col];
1048 int res, max_depth, trip;
1053 DBG((dbg, LEVEL_4, "\t\tCNC: Attempt to recolor %+F ===>>\n", node->irn));
1054 res = recolor_nodes(env, node, costs, changed, 0, &max_depth, &trip);
1055 DBG((dbg, LEVEL_4, "\t\tCNC: <<=== Recoloring of %+F %s\n", node->irn, res ? "succeeded" : "failed"));
1056 stat_ev_int("heur4_recolor_depth_max", max_depth);
1057 stat_ev_int("heur4_recolor_trip", trip);
1063 #ifdef DEBUG_libfirm
1064 if (firm_dbg_get_mask(dbg) & LEVEL_4) {
1065 if (!is_loose(node))
1066 DB((dbg, LEVEL_4, "\t\tCNC: %+F has already fixed color %d\n", node->irn, col));
1068 DB((dbg, LEVEL_4, "\t\tCNC: color %d not admissible for %+F (", tgt_col, node->irn));
1069 dbg_admissible_colors(env, node);
1070 DB((dbg, LEVEL_4, ")\n"));
1079 * Tries to color an affinity chunk (or at least a part of it).
1080 * Inserts uncolored parts of the chunk as a new chunk into the priority queue.
1082 static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c) {
1083 aff_chunk_t *best_chunk = NULL;
1084 int n_nodes = ARR_LEN(c->n);
1085 int best_color = -1;
1086 int n_int_chunks = 0;
1087 waitq *tmp_chunks = new_waitq();
1088 waitq *best_starts = NULL;
1089 col_cost_t *order = alloca(env->n_regs * sizeof(order[0]));
1092 struct list_head changed;
1095 DB((dbg, LEVEL_2, "fragmentizing chunk #%d", c->id));
1096 DBG_AFF_CHUNK(env, LEVEL_2, c);
1097 DB((dbg, LEVEL_2, "\n"));
1099 stat_ev_ctx_push_fmt("heur4_color_chunk", "%d", c->id);
1101 ++env->chunk_visited;
1103 /* compute color preference */
1104 memset(order, 0, env->n_regs * sizeof(order[0]));
1106 bitset_foreach (c->interfere, pos) {
1107 ir_node *n = get_idx_irn(env->co->irg, pos);
1108 co_mst_irn_t *node = get_co_mst_irn(env, n);
1109 aff_chunk_t *chunk = node->chunk;
1111 if (is_loose(node) && chunk && chunk->visited < env->chunk_visited) {
1112 assert(!chunk->deleted);
1113 chunk->visited = env->chunk_visited;
1116 aff_chunk_assure_weight(env, chunk);
1117 for (i = 0; i < env->n_regs; ++i)
1118 order[i].cost += chunk->color_affinity[i].cost;
1122 for (i = 0; i < env->n_regs; ++i) {
1123 double dislike = n_int_chunks > 0 ? 1.0 - order[i].cost / n_int_chunks : 0.0;
1125 order[i].cost = (1.0 - dislike_influence) * c->color_affinity[i].cost + dislike_influence * dislike;
1128 qsort(order, env->n_regs, sizeof(order[0]), cmp_col_cost_gt);
1130 DBG_COL_COST(env, LEVEL_2, order);
1131 DB((dbg, LEVEL_2, "\n"));
1133 /* check which color is the "best" for the given chunk.
1134 * if we found a color which was ok for all nodes, we take it
1135 * and do not look further. (see did_all flag usage below.)
1136 * If we have many colors which fit all nodes it is hard to decide
1137 * which one to take anyway.
1138 * TODO Sebastian: Perhaps we should at all nodes and figure out
1139 * a suitable color using costs as done above (determine_color_costs).
1141 for (i = 0; i < env->k; ++i) {
1142 int col = order[i].col;
1143 waitq *good_starts = new_waitq();
1144 aff_chunk_t *local_best;
1147 /* skip ignore colors */
1148 if (bitset_is_set(env->ignore_regs, col))
1151 DB((dbg, LEVEL_2, "\ttrying color %d\n", col));
1155 /* try to bring all nodes of given chunk to the current color. */
1156 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1157 const ir_node *irn = c->n[idx];
1158 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1161 assert(! node->fixed && "Node must not have a fixed color.");
1162 DB((dbg, LEVEL_4, "\t\tBringing %+F from color %d to color %d ...\n", irn, node->col, col));
1165 The order of the colored nodes is important, so we record the successfully
1166 colored ones in the order they appeared.
1168 INIT_LIST_HEAD(&changed);
1170 good = change_node_color(env, node, col, &changed);
1171 stat_ev_tim_pop("heur4_recolor");
1173 waitq_put(good_starts, node);
1174 materialize_coloring(&changed);
1179 reject_coloring(&changed);
1181 n_succeeded += good;
1182 DB((dbg, LEVEL_4, "\t\t... %+F attempt from %d to %d %s\n", irn, node->col, col, good ? "succeeded" : "failed"));
1185 /* unfix all nodes */
1186 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1187 co_mst_irn_t *node = get_co_mst_irn(env, c->n[idx]);
1191 /* try next color when failed */
1192 if (n_succeeded == 0)
1195 /* fragment the chunk according to the coloring */
1196 local_best = fragment_chunk(env, col, c, tmp_chunks);
1198 /* search the best of the good list
1199 and make it the new best if it is better than the current */
1201 aff_chunk_assure_weight(env, local_best);
1203 DB((dbg, LEVEL_3, "\t\tlocal best chunk (id %d) for color %d: ", local_best->id, col));
1204 DBG_AFF_CHUNK(env, LEVEL_3, local_best);
1206 if (! best_chunk || best_chunk->weight < local_best->weight) {
1207 best_chunk = local_best;
1210 del_waitq(best_starts);
1211 best_starts = good_starts;
1212 DB((dbg, LEVEL_3, "\n\t\t... setting global best chunk (id %d), color %d\n", best_chunk->id, best_color));
1214 DB((dbg, LEVEL_3, "\n\t\t... omitting, global best is better\n"));
1215 del_waitq(good_starts);
1219 del_waitq(good_starts);
1222 /* if all nodes were recolored, bail out */
1223 if (n_succeeded == n_nodes)
1227 stat_ev_int("heur4_colors_tried", i);
1229 /* free all intermediate created chunks except best one */
1230 while (! waitq_empty(tmp_chunks)) {
1231 aff_chunk_t *tmp = waitq_get(tmp_chunks);
1232 if (tmp != best_chunk)
1233 delete_aff_chunk(env, tmp);
1235 del_waitq(tmp_chunks);
1237 /* return if coloring failed */
1240 del_waitq(best_starts);
1244 DB((dbg, LEVEL_2, "\tbest chunk #%d ", best_chunk->id));
1245 DBG_AFF_CHUNK(env, LEVEL_2, best_chunk);
1246 DB((dbg, LEVEL_2, "using color %d\n", best_color));
1248 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1249 const ir_node *irn = best_chunk->n[idx];
1250 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1253 /* bring the node to the color. */
1254 DB((dbg, LEVEL_4, "\tManifesting color %d for %+F, chunk #%d\n", best_color, node->irn, best_chunk->id));
1255 INIT_LIST_HEAD(&changed);
1257 res = change_node_color(env, node, best_color, &changed);
1258 stat_ev_tim_pop("heur4_recolor");
1260 materialize_coloring(&changed);
1263 assert(list_empty(&changed));
1266 /* remove the nodes in best chunk from original chunk */
1267 bitset_andnot(c->nodes, best_chunk->nodes);
1268 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1269 const ir_node *irn = c->n[idx];
1271 if (bitset_is_set(best_chunk->nodes, get_irn_idx(irn))) {
1272 int last = ARR_LEN(c->n) - 1;
1274 c->n[idx] = c->n[last];
1275 ARR_SHRINKLEN(c->n, last);
1280 /* we have to get the nodes back into the original chunk because they are scattered over temporary chunks */
1281 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1282 const ir_node *n = c->n[idx];
1283 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1287 /* fragment the remaining chunk */
1288 visited = bitset_irg_malloc(env->co->irg);
1289 bitset_or(visited, best_chunk->nodes);
1290 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1291 const ir_node *irn = c->n[idx];
1292 if (! bitset_is_set(visited, get_irn_idx(irn))) {
1293 aff_chunk_t *new_chunk = new_aff_chunk(env);
1294 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1296 expand_chunk_from(env, node, visited, new_chunk, c, decider_always_yes, 0);
1297 aff_chunk_assure_weight(env, new_chunk);
1298 pqueue_put(env->chunks, new_chunk, new_chunk->weight);
1302 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1303 const ir_node *n = best_chunk->n[idx];
1304 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1308 /* clear obsolete chunks and free some memory */
1309 delete_aff_chunk(env, best_chunk);
1310 bitset_free(visited);
1312 del_waitq(best_starts);
1314 stat_ev_ctx_pop("heur4_color_chunk");
1318 * Main driver for mst safe coalescing algorithm.
1320 int co_solve_heuristic_mst(copy_opt_t *co) {
1321 unsigned n_regs = co->cls->n_regs;
1322 bitset_t *ignore_regs = bitset_alloca(n_regs);
1325 co_mst_env_t mst_env;
1330 phase_init(&mst_env.ph, "co_mst", co->irg, PHASE_DEFAULT_GROWTH, co_mst_irn_init, &mst_env);
1332 k = be_put_ignore_regs(co->cenv->birg, co->cls, ignore_regs);
1335 mst_env.n_regs = n_regs;
1337 mst_env.chunks = new_pqueue();
1339 mst_env.ignore_regs = ignore_regs;
1340 mst_env.ifg = co->cenv->ifg;
1341 mst_env.aenv = co->aenv;
1342 mst_env.chunkset = pset_new_ptr(512);
1343 mst_env.chunk_visited = 0;
1344 mst_env.single_cols = phase_alloc(&mst_env.ph, sizeof(*mst_env.single_cols) * n_regs);
1346 for (i = 0; i < n_regs; ++i) {
1347 col_cost_t *vec = phase_alloc(&mst_env.ph, sizeof(*vec) * n_regs);
1349 mst_env.single_cols[i] = vec;
1350 for (j = 0; j < n_regs; ++j) {
1359 DBG((dbg, LEVEL_1, "==== Coloring %+F, class %s ====\n", co->irg, co->cls->name));
1361 /* build affinity chunks */
1363 build_affinity_chunks(&mst_env);
1364 stat_ev_tim_pop("heur4_initial_chunk");
1366 /* color chunks as long as there are some */
1367 while (! pqueue_empty(mst_env.chunks)) {
1368 aff_chunk_t *chunk = pqueue_get(mst_env.chunks);
1370 color_aff_chunk(&mst_env, chunk);
1371 DB((dbg, LEVEL_4, "<<<====== Coloring chunk (%d) done\n", chunk->id));
1372 delete_aff_chunk(&mst_env, chunk);
1375 /* apply coloring */
1376 foreach_phase_irn(&mst_env.ph, irn) {
1377 co_mst_irn_t *mirn = get_co_mst_irn(&mst_env, irn);
1378 const arch_register_t *reg;
1380 if (arch_irn_is(mst_env.aenv, irn, ignore))
1383 // assert(mirn->fixed && "Node should have fixed color");
1385 /* skip nodes where color hasn't changed */
1386 if (mirn->init_col == mirn->col)
1389 reg = arch_register_for_index(co->cls, mirn->col);
1390 arch_set_irn_register(co->aenv, irn, reg);
1391 DB((dbg, LEVEL_1, "%+F set color from %d to %d\n", irn, mirn->init_col, mirn->col));
1394 /* free allocated memory */
1395 del_pqueue(mst_env.chunks);
1396 phase_free(&mst_env.ph);
1397 del_pset(mst_env.chunkset);
1399 stat_ev_tim_pop("heur4_total");
1404 static const lc_opt_table_entry_t options[] = {
1405 LC_OPT_ENT_INT ("limit", "limit recoloring", &recolor_limit),
1406 LC_OPT_ENT_DBL ("di", "dislike influence", &dislike_influence),
1411 void be_init_copyheur4(void) {
1412 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
1413 lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
1414 lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
1415 lc_opt_entry_t *co_grp = lc_opt_get_grp(chordal_grp, "co");
1416 lc_opt_entry_t *heur4_grp = lc_opt_get_grp(co_grp, "heur4");
1418 lc_opt_add_table(heur4_grp, options);
1419 FIRM_DBG_REGISTER(dbg, "firm.be.co.heur4");
1423 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur4);