2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Simple copy minimization heuristics.
23 * @author Christian Wuerdig
27 * This is the C implementation of the mst algorithm
28 * originally written in Java by Sebastian Hack.
29 * (also known as "heur3" :)
30 * Performs simple copy minimization.
34 #define DISABLE_STATEV
41 #include "raw_bitset.h"
42 #include "irphase_t.h"
58 #include "becopyopt_t.h"
62 #define COL_COST_INFEASIBLE DBL_MAX
63 #define AFF_NEIGHBOUR_FIX_BENEFIT 128.0
64 #define NEIGHBOUR_CONSTR_COSTS 64.0
69 #define DBG_AFF_CHUNK(env, level, chunk) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_aff_chunk((env), (chunk)); } while (0)
70 #define DBG_COL_COST(env, level, cost) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_col_cost((env), (cost)); } while (0)
72 static firm_dbg_module_t *dbg = NULL;
76 #define DBG_AFF_CHUNK(env, level, chunk)
77 #define DBG_COL_COST(env, level, cost)
82 #define REAL(C) (C ## f)
84 static unsigned last_chunk_id = 0;
85 static int recolor_limit = 7;
86 static real_t dislike_influence = REAL(0.1);
88 typedef struct _col_cost_t {
96 typedef struct _aff_chunk_t {
97 const ir_node **n; /**< An ARR_F containing all nodes of the chunk. */
98 const ir_node **interfere; /**< An ARR_F containing all inference. */
99 int weight; /**< Weight of this chunk */
100 unsigned weight_consistent : 1; /**< Set if the weight is consistent. */
101 unsigned deleted : 1; /**< For debugging: Set if the was deleted. */
102 unsigned id; /**< An id of this chunk. */
104 col_cost_t color_affinity[1];
110 typedef struct _aff_edge_t {
111 const ir_node *src; /**< Source node. */
112 const ir_node *tgt; /**< Target node. */
113 int weight; /**< The weight of this edge. */
116 /* main coalescing environment */
117 typedef struct _co_mst_env_t {
118 int n_regs; /**< number of regs in class */
119 int k; /**< number of non-ignore registers in class */
120 bitset_t *ignore_regs; /**< set containing all global ignore registers */
121 ir_phase ph; /**< phase object holding data for nodes */
122 pqueue_t *chunks; /**< priority queue for chunks */
123 pset *chunkset; /**< set holding all chunks */
124 be_ifg_t *ifg; /**< the interference graph */
125 copy_opt_t *co; /**< the copy opt object */
126 unsigned chunk_visited;
127 col_cost_t **single_cols;
130 /* stores coalescing related information for a node */
131 typedef struct _co_mst_irn_t {
132 const ir_node *irn; /**< the irn this information belongs to */
133 aff_chunk_t *chunk; /**< the chunk this irn belongs to */
134 bitset_t *adm_colors; /**< set of admissible colors for this irn */
135 ir_node **int_neighs; /**< array of all interfering neighbours (cached for speed reasons) */
136 int n_neighs; /**< length of the interfering neighbours array. */
137 int int_aff_neigh; /**< number of interfering affinity neighbours */
138 int col; /**< color currently assigned */
139 int init_col; /**< the initial color */
140 int tmp_col; /**< a temporary assigned color */
141 unsigned fixed : 1; /**< the color is fixed */
142 struct list_head list; /**< Queue for coloring undo. */
143 real_t constr_factor;
146 #define get_co_mst_irn(mst_env, irn) (phase_get_or_set_irn_data(&(mst_env)->ph, (irn)))
148 typedef int decide_func_t(const co_mst_irn_t *node, int col);
153 * Write a chunk to stderr for debugging.
155 static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c)
159 if (c->weight_consistent)
160 ir_fprintf(stderr, " $%d ", c->weight);
161 ir_fprintf(stderr, "{");
162 for (i = 0, l = ARR_LEN(c->n); i < l; ++i) {
163 const ir_node *n = c->n[i];
164 ir_fprintf(stderr, " %+F,", n);
166 ir_fprintf(stderr, "}");
170 * Dump all admissible colors to stderr.
172 static void dbg_admissible_colors(const co_mst_env_t *env, const co_mst_irn_t *node)
177 if (bitset_popcnt(node->adm_colors) < 1)
178 fprintf(stderr, "no admissible colors?!?");
180 bitset_foreach(node->adm_colors, idx) {
181 fprintf(stderr, " %d", idx);
187 * Dump color-cost pairs to stderr.
189 static void dbg_col_cost(const co_mst_env_t *env, const col_cost_t *cost)
192 for (i = 0; i < env->n_regs; ++i)
193 fprintf(stderr, " (%d, %.4f)", cost[i].col, cost[i].cost);
196 #endif /* DEBUG_libfirm */
198 static inline int get_mst_irn_col(const co_mst_irn_t *node)
200 return node->tmp_col >= 0 ? node->tmp_col : node->col;
204 * @return 1 if node @p node has color @p col, 0 otherwise.
206 static int decider_has_color(const co_mst_irn_t *node, int col)
208 return get_mst_irn_col(node) == col;
212 * @return 1 if node @p node has not color @p col, 0 otherwise.
214 static int decider_hasnot_color(const co_mst_irn_t *node, int col)
216 return get_mst_irn_col(node) != col;
220 * Always returns true.
222 static int decider_always_yes(const co_mst_irn_t *node, int col)
229 /** compares two affinity edges by its weight */
230 static int cmp_aff_edge(const void *a, const void *b)
232 const aff_edge_t *e1 = a;
233 const aff_edge_t *e2 = b;
235 if (e2->weight == e1->weight) {
236 if (e2->src->node_idx == e1->src->node_idx)
237 return QSORT_CMP(e2->tgt->node_idx, e1->tgt->node_idx);
239 return QSORT_CMP(e2->src->node_idx, e1->src->node_idx);
241 /* sort in descending order */
242 return QSORT_CMP(e2->weight, e1->weight);
245 /** compares to color-cost pairs */
246 static __attribute__((unused)) int cmp_col_cost_lt(const void *a, const void *b)
248 const col_cost_t *c1 = a;
249 const col_cost_t *c2 = b;
250 real_t diff = c1->cost - c2->cost;
251 return (diff > 0) - (diff < 0);
254 static int cmp_col_cost_gt(const void *a, const void *b)
256 const col_cost_t *c1 = a;
257 const col_cost_t *c2 = b;
258 real_t diff = c2->cost - c1->cost;
259 return (diff > 0) - (diff < 0);
263 * Creates a new affinity chunk
265 static inline aff_chunk_t *new_aff_chunk(co_mst_env_t *env)
267 aff_chunk_t *c = XMALLOCF(aff_chunk_t, color_affinity, env->n_regs);
268 c->n = NEW_ARR_F(const ir_node *, 0);
269 c->interfere = NEW_ARR_F(const ir_node *, 0);
271 c->weight_consistent = 0;
273 c->id = ++last_chunk_id;
275 pset_insert(env->chunkset, c, c->id);
280 * Frees all memory allocated by an affinity chunk.
282 static inline void delete_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
284 pset_remove(env->chunkset, c, c->id);
285 DEL_ARR_F(c->interfere);
292 * binary search of sorted nodes.
294 * @return the position where n is found in the array arr or ~pos
295 * if the nodes is not here.
297 static inline int nodes_bsearch(const ir_node **arr, const ir_node *n)
299 int hi = ARR_LEN(arr);
303 int md = lo + ((hi - lo) >> 1);
316 /** Check if a node n can be found inside arr. */
317 static int node_contains(const ir_node **arr, const ir_node *n)
319 int i = nodes_bsearch(arr, n);
324 * Insert a node into the sorted nodes list.
326 * @return 1 if the node was inserted, 0 else
328 static int nodes_insert(const ir_node ***arr, const ir_node *irn)
330 int idx = nodes_bsearch(*arr, irn);
333 int i, n = ARR_LEN(*arr);
336 ARR_APP1(const ir_node *, *arr, irn);
341 for (i = n - 1; i >= idx; --i)
350 * Adds a node to an affinity chunk
352 static inline void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node)
356 if (! nodes_insert(&c->n, node->irn))
359 c->weight_consistent = 0;
362 for (i = node->n_neighs - 1; i >= 0; --i) {
363 ir_node *neigh = node->int_neighs[i];
364 nodes_insert(&c->interfere, neigh);
369 * In case there is no phase information for irn, initialize it.
371 static void *co_mst_irn_init(ir_phase *ph, const ir_node *irn, void *old)
373 co_mst_irn_t *res = old ? old : phase_alloc(ph, sizeof(res[0]));
374 co_mst_env_t *env = ph->priv;
377 const arch_register_req_t *req;
378 void *nodes_it = be_ifg_nodes_iter_alloca(env->ifg);
386 res->int_neighs = NULL;
387 res->int_aff_neigh = 0;
388 res->col = arch_register_get_index(arch_get_irn_register(irn));
389 res->init_col = res->col;
390 INIT_LIST_HEAD(&res->list);
392 DB((dbg, LEVEL_4, "Creating phase info for %+F\n", irn));
394 /* set admissible registers */
395 res->adm_colors = bitset_obstack_alloc(phase_obst(ph), env->n_regs);
397 /* Exclude colors not assignable to the irn */
398 req = arch_get_register_req_out(irn);
399 if (arch_register_req_is(req, limited))
400 rbitset_copy_to_bitset(req->limited, res->adm_colors);
402 bitset_set_all(res->adm_colors);
404 /* exclude global ignore registers as well */
405 bitset_andnot(res->adm_colors, env->ignore_regs);
407 /* compute the constraint factor */
408 res->constr_factor = (real_t) (1 + env->n_regs - bitset_popcnt(res->adm_colors)) / env->n_regs;
410 /* set the number of interfering affinity neighbours to -1, they are calculated later */
411 res->int_aff_neigh = -1;
413 /* build list of interfering neighbours */
415 be_ifg_foreach_neighbour(env->ifg, nodes_it, irn, neigh) {
416 if (!arch_irn_is_ignore(neigh)) {
417 obstack_ptr_grow(phase_obst(ph), neigh);
421 res->int_neighs = obstack_finish(phase_obst(ph));
428 * Check if affinity chunk @p chunk interferes with node @p irn.
430 static inline int aff_chunk_interferes(const aff_chunk_t *chunk, const ir_node *irn)
432 return node_contains(chunk->interfere, irn);
436 * Check if there are interference edges from c1 to c2.
438 * @param c2 Another chunk
439 * @return 1 if there are interferences between nodes of c1 and c2, 0 otherwise.
441 static inline int aff_chunks_interfere(const aff_chunk_t *c1, const aff_chunk_t *c2)
448 /* check if there is a node in c2 having an interfering neighbor in c1 */
449 for (i = ARR_LEN(c2->n) - 1; i >= 0; --i) {
450 const ir_node *irn = c2->n[i];
452 if (node_contains(c1->interfere, irn))
459 * Returns the affinity chunk of @p irn or creates a new
460 * one with @p irn as element if there is none assigned.
462 static inline aff_chunk_t *get_aff_chunk(co_mst_env_t *env, const ir_node *irn)
464 co_mst_irn_t *node = get_co_mst_irn(env, irn);
469 * Let chunk(src) absorb the nodes of chunk(tgt) (only possible when there
470 * are no interference edges from chunk(src) to chunk(tgt)).
471 * @return 1 if successful, 0 if not possible
473 static int aff_chunk_absorb(co_mst_env_t *env, const ir_node *src, const ir_node *tgt)
475 aff_chunk_t *c1 = get_aff_chunk(env, src);
476 aff_chunk_t *c2 = get_aff_chunk(env, tgt);
479 DB((dbg, LEVEL_4, "Attempt to let c1 (id %u): ", c1 ? c1->id : 0));
481 DBG_AFF_CHUNK(env, LEVEL_4, c1);
483 DB((dbg, LEVEL_4, "{%+F}", src));
485 DB((dbg, LEVEL_4, "\n\tabsorb c2 (id %u): ", c2 ? c2->id : 0));
487 DBG_AFF_CHUNK(env, LEVEL_4, c2);
489 DB((dbg, LEVEL_4, "{%+F}", tgt));
491 DB((dbg, LEVEL_4, "\n"));
496 /* no chunk exists */
497 co_mst_irn_t *mirn = get_co_mst_irn(env, src);
500 for (i = mirn->n_neighs - 1; i >= 0; --i) {
501 if (mirn->int_neighs[i] == tgt)
505 /* create one containing both nodes */
506 c1 = new_aff_chunk(env);
507 aff_chunk_add_node(c1, get_co_mst_irn(env, src));
508 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
512 /* c2 already exists */
513 if (! aff_chunk_interferes(c2, src)) {
514 aff_chunk_add_node(c2, get_co_mst_irn(env, src));
518 } else if (c2 == NULL) {
519 /* c1 already exists */
520 if (! aff_chunk_interferes(c1, tgt)) {
521 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
524 } else if (c1 != c2 && ! aff_chunks_interfere(c1, c2)) {
527 for (idx = 0, len = ARR_LEN(c2->n); idx < len; ++idx)
528 aff_chunk_add_node(c1, get_co_mst_irn(env, c2->n[idx]));
530 for (idx = 0, len = ARR_LEN(c2->interfere); idx < len; ++idx) {
531 const ir_node *irn = c2->interfere[idx];
532 nodes_insert(&c1->interfere, irn);
535 c1->weight_consistent = 0;
537 delete_aff_chunk(env, c2);
540 DB((dbg, LEVEL_4, " ... c1 interferes with c2, skipped\n"));
544 DB((dbg, LEVEL_4, " ... absorbed\n"));
549 * Assures that the weight of the given chunk is consistent.
551 static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c)
553 if (! c->weight_consistent) {
557 for (i = 0; i < env->n_regs; ++i) {
558 c->color_affinity[i].col = i;
559 c->color_affinity[i].cost = REAL(0.0);
562 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
563 const ir_node *n = c->n[idx];
564 const affinity_node_t *an = get_affinity_info(env->co, n);
565 co_mst_irn_t *node = get_co_mst_irn(env, n);
568 if (node->constr_factor > REAL(0.0)) {
570 bitset_foreach (node->adm_colors, col)
571 c->color_affinity[col].cost += node->constr_factor;
576 co_gs_foreach_neighb(an, neigh) {
577 const ir_node *m = neigh->irn;
579 if (arch_irn_is_ignore(m))
582 w += node_contains(c->n, m) ? neigh->costs : 0;
587 for (i = 0; i < env->n_regs; ++i)
588 c->color_affinity[i].cost *= (REAL(1.0) / ARR_LEN(c->n));
591 // c->weight = bitset_popcnt(c->nodes);
592 c->weight_consistent = 1;
597 * Count the number of interfering affinity neighbours
599 static int count_interfering_aff_neighs(co_mst_env_t *env, const affinity_node_t *an)
601 const neighb_t *neigh;
602 const ir_node *irn = an->irn;
603 const co_mst_irn_t *node = get_co_mst_irn(env, irn);
606 co_gs_foreach_neighb(an, neigh) {
607 const ir_node *n = neigh->irn;
610 if (arch_irn_is_ignore(n))
613 /* check if the affinity neighbour interfere */
614 for (i = 0; i < node->n_neighs; ++i) {
615 if (node->int_neighs[i] == n) {
626 * Build chunks of nodes connected by affinity edges.
627 * We start at the heaviest affinity edge.
628 * The chunks of the two edge-defining nodes will be
629 * merged if there are no interference edges from one
630 * chunk to the other.
632 static void build_affinity_chunks(co_mst_env_t *env)
634 void *nodes_it = be_ifg_nodes_iter_alloca(env->ifg);
635 aff_edge_t *edges = NEW_ARR_F(aff_edge_t, 0);
638 aff_chunk_t *curr_chunk;
640 /* at first we create the affinity edge objects */
641 be_ifg_foreach_node(env->ifg, nodes_it, n) {
642 int n_idx = get_irn_idx(n);
646 if (arch_irn_is_ignore(n))
649 n1 = get_co_mst_irn(env, n);
650 an = get_affinity_info(env->co, n);
655 if (n1->int_aff_neigh < 0)
656 n1->int_aff_neigh = count_interfering_aff_neighs(env, an);
658 /* build the affinity edges */
659 co_gs_foreach_neighb(an, neigh) {
660 const ir_node *m = neigh->irn;
661 int m_idx = get_irn_idx(m);
663 /* record the edge in only one direction */
668 /* skip ignore nodes */
669 if (arch_irn_is_ignore(m))
675 n2 = get_co_mst_irn(env, m);
676 if (n2->int_aff_neigh < 0) {
677 affinity_node_t *am = get_affinity_info(env->co, m);
678 n2->int_aff_neigh = count_interfering_aff_neighs(env, am);
681 * these weights are pure hackery ;-).
682 * It's not chriswue's fault but mine.
684 edge.weight = neigh->costs;
685 ARR_APP1(aff_edge_t, edges, edge);
691 /* now: sort edges and build the affinity chunks */
692 len = ARR_LEN(edges);
693 qsort(edges, len, sizeof(edges[0]), cmp_aff_edge);
694 for (i = 0; i < len; ++i) {
695 DBG((dbg, LEVEL_1, "edge (%u,%u) %f\n", edges[i].src->node_idx, edges[i].tgt->node_idx, edges[i].weight));
697 (void)aff_chunk_absorb(env, edges[i].src, edges[i].tgt);
700 /* now insert all chunks into a priority queue */
701 foreach_pset(env->chunkset, curr_chunk) {
702 aff_chunk_assure_weight(env, curr_chunk);
704 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
705 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
706 DBG((dbg, LEVEL_1, "\n"));
708 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
711 foreach_phase_irn(&env->ph, n) {
712 co_mst_irn_t *mirn = get_co_mst_irn(env, n);
714 if (mirn->chunk == NULL) {
715 /* no chunk is allocated so far, do it now */
716 aff_chunk_t *curr_chunk = new_aff_chunk(env);
717 aff_chunk_add_node(curr_chunk, mirn);
719 aff_chunk_assure_weight(env, curr_chunk);
721 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
722 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
723 DBG((dbg, LEVEL_1, "\n"));
725 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
732 static __attribute__((unused)) void chunk_order_nodes(co_mst_env_t *env, aff_chunk_t *chunk)
734 pqueue_t *grow = new_pqueue();
735 const ir_node *max_node = NULL;
739 for (i = ARR_LEN(chunk->n) - 1; i >= 0; i--) {
740 const ir_node *irn = chunk->n[i];
741 affinity_node_t *an = get_affinity_info(env->co, irn);
745 if (arch_irn_is_ignore(irn))
749 co_gs_foreach_neighb(an, neigh)
752 if (w > max_weight) {
760 bitset_t *visited = bitset_irg_malloc(env->co->irg);
762 for (i = ARR_LEN(chunk->n) - 1; i >= 0; --i)
763 bitset_add_irn(visited, chunk->n[i]);
765 pqueue_put(grow, (void *) max_node, max_weight);
766 bitset_remv_irn(visited, max_node);
768 while (!pqueue_empty(grow)) {
769 ir_node *irn = pqueue_pop_front(grow);
770 affinity_node_t *an = get_affinity_info(env->co, irn);
773 if (arch_irn_is_ignore(irn))
776 assert(i <= ARR_LEN(chunk->n));
781 /* build the affinity edges */
782 co_gs_foreach_neighb(an, neigh) {
783 co_mst_irn_t *node = get_co_mst_irn(env, neigh->irn);
785 if (bitset_contains_irn(visited, node->irn)) {
786 pqueue_put(grow, (void *) neigh->irn, neigh->costs);
787 bitset_remv_irn(visited, node->irn);
793 bitset_free(visited);
798 * Greedy collect affinity neighbours into thew new chunk @p chunk starting at node @p node.
800 static void expand_chunk_from(co_mst_env_t *env, co_mst_irn_t *node, bitset_t *visited,
801 aff_chunk_t *chunk, aff_chunk_t *orig_chunk, decide_func_t *decider, int col)
803 waitq *nodes = new_waitq();
805 DBG((dbg, LEVEL_1, "\n\tExpanding new chunk (#%u) from %+F, color %d:", chunk->id, node->irn, col));
807 /* init queue and chunk */
808 waitq_put(nodes, node);
809 bitset_set(visited, get_irn_idx(node->irn));
810 aff_chunk_add_node(chunk, node);
811 DB((dbg, LEVEL_1, " %+F", node->irn));
813 /* as long as there are nodes in the queue */
814 while (! waitq_empty(nodes)) {
815 co_mst_irn_t *n = waitq_get(nodes);
816 affinity_node_t *an = get_affinity_info(env->co, n->irn);
818 /* check all affinity neighbors */
821 co_gs_foreach_neighb(an, neigh) {
822 const ir_node *m = neigh->irn;
823 int m_idx = get_irn_idx(m);
826 if (arch_irn_is_ignore(m))
829 n2 = get_co_mst_irn(env, m);
831 if (! bitset_is_set(visited, m_idx) &&
834 ! aff_chunk_interferes(chunk, m) &&
835 node_contains(orig_chunk->n, m))
838 following conditions are met:
839 - neighbour is not visited
840 - neighbour likes the color
841 - neighbour has not yet a fixed color
842 - the new chunk doesn't interfere with the neighbour
843 - neighbour belongs or belonged once to the original chunk
845 bitset_set(visited, m_idx);
846 aff_chunk_add_node(chunk, n2);
847 DB((dbg, LEVEL_1, " %+F", n2->irn));
848 /* enqueue for further search */
849 waitq_put(nodes, n2);
855 DB((dbg, LEVEL_1, "\n"));
861 * Fragment the given chunk into chunks having given color and not having given color.
863 static aff_chunk_t *fragment_chunk(co_mst_env_t *env, int col, aff_chunk_t *c, waitq *tmp)
865 bitset_t *visited = bitset_irg_malloc(env->co->irg);
867 aff_chunk_t *best = NULL;
869 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
872 aff_chunk_t *tmp_chunk;
873 decide_func_t *decider;
877 if (bitset_is_set(visited, get_irn_idx(irn)))
880 node = get_co_mst_irn(env, irn);
882 if (get_mst_irn_col(node) == col) {
883 decider = decider_has_color;
885 DBG((dbg, LEVEL_4, "\tcolor %d wanted\n", col));
888 decider = decider_hasnot_color;
890 DBG((dbg, LEVEL_4, "\tcolor %d forbidden\n", col));
893 /* create a new chunk starting at current node */
894 tmp_chunk = new_aff_chunk(env);
895 waitq_put(tmp, tmp_chunk);
896 expand_chunk_from(env, node, visited, tmp_chunk, c, decider, col);
897 assert(ARR_LEN(tmp_chunk->n) > 0 && "No nodes added to chunk");
899 /* remember the local best */
900 aff_chunk_assure_weight(env, tmp_chunk);
901 if (check_for_best && (! best || best->weight < tmp_chunk->weight))
905 assert(best && "No chunk found?");
906 bitset_free(visited);
911 * Resets the temporary fixed color of all nodes within wait queue @p nodes.
912 * ATTENTION: the queue is empty after calling this function!
914 static inline void reject_coloring(struct list_head *nodes)
916 co_mst_irn_t *n, *temp;
917 DB((dbg, LEVEL_4, "\treject coloring for"));
918 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
919 DB((dbg, LEVEL_4, " %+F", n->irn));
920 assert(n->tmp_col >= 0);
922 list_del_init(&n->list);
924 DB((dbg, LEVEL_4, "\n"));
927 static inline void materialize_coloring(struct list_head *nodes)
929 co_mst_irn_t *n, *temp;
930 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
931 assert(n->tmp_col >= 0);
934 list_del_init(&n->list);
938 static inline void set_temp_color(co_mst_irn_t *node, int col, struct list_head *changed)
941 assert(!node->fixed);
942 assert(node->tmp_col < 0);
943 assert(node->list.next == &node->list && node->list.prev == &node->list);
944 assert(bitset_is_set(node->adm_colors, col));
946 list_add_tail(&node->list, changed);
950 static inline int is_loose(co_mst_irn_t *node)
952 return !node->fixed && node->tmp_col < 0;
956 * Determines the costs for each color if it would be assigned to node @p node.
958 static void determine_color_costs(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs)
960 int *neigh_cols = ALLOCAN(int, env->n_regs);
965 for (i = 0; i < env->n_regs; ++i) {
968 costs[i].cost = bitset_is_set(node->adm_colors, i) ? node->constr_factor : REAL(0.0);
971 for (i = 0; i < node->n_neighs; ++i) {
972 co_mst_irn_t *n = get_co_mst_irn(env, node->int_neighs[i]);
973 int col = get_mst_irn_col(n);
978 costs[col].cost = REAL(0.0);
982 coeff = REAL(1.0) / n_loose;
983 for (i = 0; i < env->n_regs; ++i)
984 costs[i].cost *= REAL(1.0) - coeff * neigh_cols[i];
988 /* need forward declaration due to recursive call */
989 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed_ones, int depth, int *max_depth, int *trip);
992 * Tries to change node to a color but @p explude_col.
993 * @return 1 if succeeded, 0 otherwise.
995 static int change_node_color_excluded(co_mst_env_t *env, co_mst_irn_t *node, int exclude_col, struct list_head *changed, int depth, int *max_depth, int *trip)
997 int col = get_mst_irn_col(node);
1000 /* neighbours has already a different color -> good, temporary fix it */
1001 if (col != exclude_col) {
1003 set_temp_color(node, col, changed);
1007 /* The node has the color it should not have _and_ has not been visited yet. */
1008 if (is_loose(node)) {
1009 col_cost_t *costs = ALLOCAN(col_cost_t, env->n_regs);
1011 /* Get the costs for giving the node a specific color. */
1012 determine_color_costs(env, node, costs);
1014 /* Since the node must not have the not_col, set the costs for that color to "infinity" */
1015 costs[exclude_col].cost = REAL(0.0);
1017 /* sort the colors according costs, cheapest first. */
1018 qsort(costs, env->n_regs, sizeof(costs[0]), cmp_col_cost_gt);
1020 /* Try recoloring the node using the color list. */
1021 res = recolor_nodes(env, node, costs, changed, depth + 1, max_depth, trip);
1028 * Tries to bring node @p node to cheapest color and color all interfering neighbours with other colors.
1029 * ATTENTION: Expect @p costs already sorted by increasing costs.
1030 * @return 1 if coloring could be applied, 0 otherwise.
1032 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed, int depth, int *max_depth, int *trip)
1035 struct list_head local_changed;
1038 if (depth > *max_depth)
1041 DBG((dbg, LEVEL_4, "\tRecoloring %+F with color-costs", node->irn));
1042 DBG_COL_COST(env, LEVEL_4, costs);
1043 DB((dbg, LEVEL_4, "\n"));
1045 if (depth >= recolor_limit) {
1046 DBG((dbg, LEVEL_4, "\tHit recolor limit\n"));
1050 for (i = 0; i < env->n_regs; ++i) {
1051 int tgt_col = costs[i].col;
1055 /* If the costs for that color (and all successive) are infinite, bail out we won't make it anyway. */
1056 if (costs[i].cost == REAL(0.0)) {
1057 DBG((dbg, LEVEL_4, "\tAll further colors forbidden\n"));
1061 /* Set the new color of the node and mark the node as temporarily fixed. */
1062 assert(node->tmp_col < 0 && "Node must not have been temporary fixed.");
1063 INIT_LIST_HEAD(&local_changed);
1064 set_temp_color(node, tgt_col, &local_changed);
1065 DBG((dbg, LEVEL_4, "\tTemporary setting %+F to color %d\n", node->irn, tgt_col));
1067 /* try to color all interfering neighbours with current color forbidden */
1068 for (j = 0; j < node->n_neighs; ++j) {
1072 neigh = node->int_neighs[j];
1074 if (arch_irn_is_ignore(neigh))
1077 nn = get_co_mst_irn(env, neigh);
1078 DB((dbg, LEVEL_4, "\tHandling neighbour %+F, at position %d (fixed: %d, tmp_col: %d, col: %d)\n",
1079 neigh, j, nn->fixed, nn->tmp_col, nn->col));
1082 Try to change the color of the neighbor and record all nodes which
1083 get changed in the tmp list. Add this list to the "changed" list for
1084 that color. If we did not succeed to change the color of the neighbor,
1085 we bail out and try the next color.
1087 if (get_mst_irn_col(nn) == tgt_col) {
1088 /* try to color neighbour with tgt_col forbidden */
1089 neigh_ok = change_node_color_excluded(env, nn, tgt_col, &local_changed, depth + 1, max_depth, trip);
1097 We managed to assign the target color to all neighbors, so from the perspective
1098 of the current node, every thing was ok and we can return safely.
1101 /* append the local_changed ones to global ones */
1102 list_splice(&local_changed, changed);
1106 /* coloring of neighbours failed, so we try next color */
1107 reject_coloring(&local_changed);
1111 DBG((dbg, LEVEL_4, "\tAll colors failed\n"));
1116 * Tries to bring node @p node and all it's neighbours to color @p tgt_col.
1117 * @return 1 if color @p col could be applied, 0 otherwise
1119 static int change_node_color(co_mst_env_t *env, co_mst_irn_t *node, int tgt_col, struct list_head *changed)
1121 int col = get_mst_irn_col(node);
1123 /* if node already has the target color -> good, temporary fix it */
1124 if (col == tgt_col) {
1125 DBG((dbg, LEVEL_4, "\t\tCNC: %+F has already color %d, fix temporary\n", node->irn, tgt_col));
1127 set_temp_color(node, tgt_col, changed);
1132 Node has not yet a fixed color and target color is admissible
1133 -> try to recolor node and it's affinity neighbours
1135 if (is_loose(node) && bitset_is_set(node->adm_colors, tgt_col)) {
1136 col_cost_t *costs = env->single_cols[tgt_col];
1137 int res, max_depth, trip;
1142 DBG((dbg, LEVEL_4, "\t\tCNC: Attempt to recolor %+F ===>>\n", node->irn));
1143 res = recolor_nodes(env, node, costs, changed, 0, &max_depth, &trip);
1144 DBG((dbg, LEVEL_4, "\t\tCNC: <<=== Recoloring of %+F %s\n", node->irn, res ? "succeeded" : "failed"));
1145 stat_ev_int("heur4_recolor_depth_max", max_depth);
1146 stat_ev_int("heur4_recolor_trip", trip);
1152 #ifdef DEBUG_libfirm
1153 if (firm_dbg_get_mask(dbg) & LEVEL_4) {
1154 if (!is_loose(node))
1155 DB((dbg, LEVEL_4, "\t\tCNC: %+F has already fixed color %d\n", node->irn, col));
1157 DB((dbg, LEVEL_4, "\t\tCNC: color %d not admissible for %+F (", tgt_col, node->irn));
1158 dbg_admissible_colors(env, node);
1159 DB((dbg, LEVEL_4, ")\n"));
1168 * Tries to color an affinity chunk (or at least a part of it).
1169 * Inserts uncolored parts of the chunk as a new chunk into the priority queue.
1171 static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
1173 aff_chunk_t *best_chunk = NULL;
1174 int n_nodes = ARR_LEN(c->n);
1175 int best_color = -1;
1176 int n_int_chunks = 0;
1177 waitq *tmp_chunks = new_waitq();
1178 waitq *best_starts = NULL;
1179 col_cost_t *order = ALLOCANZ(col_cost_t, env->n_regs);
1181 int idx, len, i, nidx, pos;
1182 struct list_head changed;
1184 DB((dbg, LEVEL_2, "fragmentizing chunk #%u", c->id));
1185 DBG_AFF_CHUNK(env, LEVEL_2, c);
1186 DB((dbg, LEVEL_2, "\n"));
1188 stat_ev_ctx_push_fmt("heur4_color_chunk", "%u", c->id);
1190 ++env->chunk_visited;
1192 /* compute color preference */
1193 for (pos = 0, len = ARR_LEN(c->interfere); pos < len; ++pos) {
1194 const ir_node *n = c->interfere[pos];
1195 co_mst_irn_t *node = get_co_mst_irn(env, n);
1196 aff_chunk_t *chunk = node->chunk;
1198 if (is_loose(node) && chunk && chunk->visited < env->chunk_visited) {
1199 assert(!chunk->deleted);
1200 chunk->visited = env->chunk_visited;
1203 aff_chunk_assure_weight(env, chunk);
1204 for (i = 0; i < env->n_regs; ++i)
1205 order[i].cost += chunk->color_affinity[i].cost;
1209 for (i = 0; i < env->n_regs; ++i) {
1210 real_t dislike = n_int_chunks > 0 ? REAL(1.0) - order[i].cost / n_int_chunks : REAL(0.0);
1212 order[i].cost = (REAL(1.0) - dislike_influence) * c->color_affinity[i].cost + dislike_influence * dislike;
1215 qsort(order, env->n_regs, sizeof(order[0]), cmp_col_cost_gt);
1217 DBG_COL_COST(env, LEVEL_2, order);
1218 DB((dbg, LEVEL_2, "\n"));
1220 /* check which color is the "best" for the given chunk.
1221 * if we found a color which was ok for all nodes, we take it
1222 * and do not look further. (see did_all flag usage below.)
1223 * If we have many colors which fit all nodes it is hard to decide
1224 * which one to take anyway.
1225 * TODO Sebastian: Perhaps we should at all nodes and figure out
1226 * a suitable color using costs as done above (determine_color_costs).
1228 for (i = 0; i < env->k; ++i) {
1229 int col = order[i].col;
1230 waitq *good_starts = new_waitq();
1231 aff_chunk_t *local_best;
1234 /* skip ignore colors */
1235 if (bitset_is_set(env->ignore_regs, col))
1238 DB((dbg, LEVEL_2, "\ttrying color %d\n", col));
1242 /* try to bring all nodes of given chunk to the current color. */
1243 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1244 const ir_node *irn = c->n[idx];
1245 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1248 assert(! node->fixed && "Node must not have a fixed color.");
1249 DB((dbg, LEVEL_4, "\t\tBringing %+F from color %d to color %d ...\n", irn, node->col, col));
1252 The order of the colored nodes is important, so we record the successfully
1253 colored ones in the order they appeared.
1255 INIT_LIST_HEAD(&changed);
1257 good = change_node_color(env, node, col, &changed);
1258 stat_ev_tim_pop("heur4_recolor");
1260 waitq_put(good_starts, node);
1261 materialize_coloring(&changed);
1266 reject_coloring(&changed);
1268 n_succeeded += good;
1269 DB((dbg, LEVEL_4, "\t\t... %+F attempt from %d to %d %s\n", irn, node->col, col, good ? "succeeded" : "failed"));
1272 /* unfix all nodes */
1273 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1274 co_mst_irn_t *node = get_co_mst_irn(env, c->n[idx]);
1278 /* try next color when failed */
1279 if (n_succeeded == 0)
1282 /* fragment the chunk according to the coloring */
1283 local_best = fragment_chunk(env, col, c, tmp_chunks);
1285 /* search the best of the good list
1286 and make it the new best if it is better than the current */
1288 aff_chunk_assure_weight(env, local_best);
1290 DB((dbg, LEVEL_3, "\t\tlocal best chunk (id %u) for color %d: ", local_best->id, col));
1291 DBG_AFF_CHUNK(env, LEVEL_3, local_best);
1293 if (! best_chunk || best_chunk->weight < local_best->weight) {
1294 best_chunk = local_best;
1297 del_waitq(best_starts);
1298 best_starts = good_starts;
1299 DB((dbg, LEVEL_3, "\n\t\t... setting global best chunk (id %u), color %d\n", best_chunk->id, best_color));
1301 DB((dbg, LEVEL_3, "\n\t\t... omitting, global best is better\n"));
1302 del_waitq(good_starts);
1306 del_waitq(good_starts);
1309 /* if all nodes were recolored, bail out */
1310 if (n_succeeded == n_nodes)
1314 stat_ev_int("heur4_colors_tried", i);
1316 /* free all intermediate created chunks except best one */
1317 while (! waitq_empty(tmp_chunks)) {
1318 aff_chunk_t *tmp = waitq_get(tmp_chunks);
1319 if (tmp != best_chunk)
1320 delete_aff_chunk(env, tmp);
1322 del_waitq(tmp_chunks);
1324 /* return if coloring failed */
1327 del_waitq(best_starts);
1331 DB((dbg, LEVEL_2, "\tbest chunk #%u ", best_chunk->id));
1332 DBG_AFF_CHUNK(env, LEVEL_2, best_chunk);
1333 DB((dbg, LEVEL_2, "using color %d\n", best_color));
1335 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1336 const ir_node *irn = best_chunk->n[idx];
1337 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1340 /* bring the node to the color. */
1341 DB((dbg, LEVEL_4, "\tManifesting color %d for %+F, chunk #%u\n", best_color, node->irn, best_chunk->id));
1342 INIT_LIST_HEAD(&changed);
1344 res = change_node_color(env, node, best_color, &changed);
1345 stat_ev_tim_pop("heur4_recolor");
1347 materialize_coloring(&changed);
1350 assert(list_empty(&changed));
1353 /* remove the nodes in best chunk from original chunk */
1354 len = ARR_LEN(best_chunk->n);
1355 for (idx = 0; idx < len; ++idx) {
1356 const ir_node *irn = best_chunk->n[idx];
1357 int pos = nodes_bsearch(c->n, irn);
1362 len = ARR_LEN(c->n);
1363 for (idx = nidx = 0; idx < len; ++idx) {
1364 const ir_node *irn = c->n[idx];
1370 ARR_SHRINKLEN(c->n, nidx);
1373 /* we have to get the nodes back into the original chunk because they are scattered over temporary chunks */
1374 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1375 const ir_node *n = c->n[idx];
1376 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1380 /* fragment the remaining chunk */
1381 visited = bitset_irg_malloc(env->co->irg);
1382 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx)
1383 bitset_set(visited, get_irn_idx(best_chunk->n[idx]));
1385 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1386 const ir_node *irn = c->n[idx];
1387 if (! bitset_is_set(visited, get_irn_idx(irn))) {
1388 aff_chunk_t *new_chunk = new_aff_chunk(env);
1389 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1391 expand_chunk_from(env, node, visited, new_chunk, c, decider_always_yes, 0);
1392 aff_chunk_assure_weight(env, new_chunk);
1393 pqueue_put(env->chunks, new_chunk, new_chunk->weight);
1397 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1398 const ir_node *n = best_chunk->n[idx];
1399 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1403 /* clear obsolete chunks and free some memory */
1404 delete_aff_chunk(env, best_chunk);
1405 bitset_free(visited);
1407 del_waitq(best_starts);
1409 stat_ev_ctx_pop("heur4_color_chunk");
1413 * Main driver for mst safe coalescing algorithm.
1415 static int co_solve_heuristic_mst(copy_opt_t *co)
1417 unsigned n_regs = co->cls->n_regs;
1418 bitset_t *ignore_regs = bitset_alloca(n_regs);
1421 co_mst_env_t mst_env;
1428 phase_init(&mst_env.ph, "co_mst", co->irg, PHASE_DEFAULT_GROWTH, co_mst_irn_init, &mst_env);
1430 k = be_put_ignore_regs(co->cenv->birg, co->cls, ignore_regs);
1433 mst_env.n_regs = n_regs;
1435 mst_env.chunks = new_pqueue();
1437 mst_env.ignore_regs = ignore_regs;
1438 mst_env.ifg = co->cenv->ifg;
1439 mst_env.chunkset = pset_new_ptr(512);
1440 mst_env.chunk_visited = 0;
1441 mst_env.single_cols = phase_alloc(&mst_env.ph, sizeof(*mst_env.single_cols) * n_regs);
1443 for (i = 0; i < n_regs; ++i) {
1444 col_cost_t *vec = phase_alloc(&mst_env.ph, sizeof(*vec) * n_regs);
1446 mst_env.single_cols[i] = vec;
1447 for (j = 0; j < n_regs; ++j) {
1449 vec[j].cost = REAL(0.0);
1453 vec[0].cost = REAL(1.0);
1456 DBG((dbg, LEVEL_1, "==== Coloring %+F, class %s ====\n", co->irg, co->cls->name));
1458 /* build affinity chunks */
1460 build_affinity_chunks(&mst_env);
1461 stat_ev_tim_pop("heur4_initial_chunk");
1463 /* color chunks as long as there are some */
1464 while (! pqueue_empty(mst_env.chunks)) {
1465 aff_chunk_t *chunk = pqueue_pop_front(mst_env.chunks);
1467 color_aff_chunk(&mst_env, chunk);
1468 DB((dbg, LEVEL_4, "<<<====== Coloring chunk (%u) done\n", chunk->id));
1469 delete_aff_chunk(&mst_env, chunk);
1472 /* apply coloring */
1473 foreach_phase_irn(&mst_env.ph, irn) {
1475 const arch_register_t *reg;
1477 if (arch_irn_is_ignore(irn))
1480 mirn = get_co_mst_irn(&mst_env, irn);
1481 // assert(mirn->fixed && "Node should have fixed color");
1483 /* skip nodes where color hasn't changed */
1484 if (mirn->init_col == mirn->col)
1487 reg = arch_register_for_index(co->cls, mirn->col);
1488 arch_set_irn_register(irn, reg);
1489 DB((dbg, LEVEL_1, "%+F set color from %d to %d\n", irn, mirn->init_col, mirn->col));
1492 /* free allocated memory */
1493 del_pqueue(mst_env.chunks);
1494 phase_free(&mst_env.ph);
1495 del_pset(mst_env.chunkset);
1497 stat_ev_tim_pop("heur4_total");
1502 static const lc_opt_table_entry_t options[] = {
1503 LC_OPT_ENT_INT ("limit", "limit recoloring", &recolor_limit),
1504 LC_OPT_ENT_DBL ("di", "dislike influence", &dislike_influence),
1508 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur4);
1509 void be_init_copyheur4(void)
1511 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
1512 lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
1513 lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
1514 lc_opt_entry_t *co_grp = lc_opt_get_grp(chordal_grp, "co");
1515 lc_opt_entry_t *heur4_grp = lc_opt_get_grp(co_grp, "heur4");
1517 static co_algo_info copyheur = {
1518 co_solve_heuristic_mst, 0
1521 lc_opt_add_table(heur4_grp, options);
1522 be_register_copyopt("heur4", ©heur);
1524 FIRM_DBG_REGISTER(dbg, "firm.be.co.heur4");