2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Simple copy minimization heuristics.
23 * @author Christian Wuerdig
26 * This is the C implementation of the mst algorithm
27 * originally written in Java by Sebastian Hack.
28 * (also known as "heur3" :)
29 * Performs simple copy minimization.
33 #define DISABLE_STATEV
40 #include "raw_bitset.h"
41 #include "irnodemap.h"
55 #include "becopyopt_t.h"
59 #define COL_COST_INFEASIBLE DBL_MAX
60 #define AFF_NEIGHBOUR_FIX_BENEFIT 128.0
61 #define NEIGHBOUR_CONSTR_COSTS 64.0
66 #define DBG_AFF_CHUNK(env, level, chunk) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_aff_chunk((env), (chunk)); } while (0)
67 #define DBG_COL_COST(env, level, cost) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_col_cost((env), (cost)); } while (0)
69 static firm_dbg_module_t *dbg = NULL;
73 #define DBG_AFF_CHUNK(env, level, chunk)
74 #define DBG_COL_COST(env, level, cost)
79 #define REAL(C) (C ## f)
81 static unsigned last_chunk_id = 0;
82 static int recolor_limit = 7;
83 static double dislike_influence = REAL(0.1);
85 typedef struct col_cost_t {
93 typedef struct aff_chunk_t {
94 const ir_node **n; /**< An ARR_F containing all nodes of the chunk. */
95 const ir_node **interfere; /**< An ARR_F containing all inference. */
96 int weight; /**< Weight of this chunk */
97 unsigned weight_consistent : 1; /**< Set if the weight is consistent. */
98 unsigned deleted : 1; /**< For debugging: Set if the was deleted. */
99 unsigned id; /**< An id of this chunk. */
102 col_cost_t color_affinity[1];
108 typedef struct aff_edge_t {
109 const ir_node *src; /**< Source node. */
110 const ir_node *tgt; /**< Target node. */
111 int weight; /**< The weight of this edge. */
114 /* main coalescing environment */
115 typedef struct co_mst_env_t {
116 int n_regs; /**< number of regs in class */
117 int k; /**< number of non-ignore registers in class */
118 bitset_t *allocatable_regs; /**< set containing all global ignore registers */
119 ir_nodemap map; /**< phase object holding data for nodes */
121 pqueue_t *chunks; /**< priority queue for chunks */
122 list_head chunklist; /**< list holding all chunks */
123 be_ifg_t *ifg; /**< the interference graph */
124 copy_opt_t *co; /**< the copy opt object */
125 unsigned chunk_visited;
126 col_cost_t **single_cols;
129 /* stores coalescing related information for a node */
130 typedef struct co_mst_irn_t {
131 const ir_node *irn; /**< the irn this information belongs to */
132 aff_chunk_t *chunk; /**< the chunk this irn belongs to */
133 bitset_t *adm_colors; /**< set of admissible colors for this irn */
134 ir_node **int_neighs; /**< array of all interfering neighbours (cached for speed reasons) */
135 int n_neighs; /**< length of the interfering neighbours array. */
136 int int_aff_neigh; /**< number of interfering affinity neighbours */
137 int col; /**< color currently assigned */
138 int init_col; /**< the initial color */
139 int tmp_col; /**< a temporary assigned color */
140 unsigned fixed : 1; /**< the color is fixed */
141 struct list_head list; /**< Queue for coloring undo. */
142 real_t constr_factor;
146 * In case there is no phase information for irn, initialize it.
148 static co_mst_irn_t *co_mst_irn_init(co_mst_env_t *env, const ir_node *irn)
150 co_mst_irn_t *res = OALLOC(&env->obst, co_mst_irn_t);
152 const arch_register_req_t *req;
153 neighbours_iter_t nodes_it;
161 res->int_neighs = NULL;
162 res->int_aff_neigh = 0;
163 res->col = arch_get_irn_register(irn)->index;
164 res->init_col = res->col;
165 INIT_LIST_HEAD(&res->list);
167 DB((dbg, LEVEL_4, "Creating phase info for %+F\n", irn));
169 /* set admissible registers */
170 res->adm_colors = bitset_obstack_alloc(&env->obst, env->n_regs);
172 /* Exclude colors not assignable to the irn */
173 req = arch_get_irn_register_req(irn);
174 if (arch_register_req_is(req, limited)) {
175 rbitset_copy_to_bitset(req->limited, res->adm_colors);
176 /* exclude global ignore registers as well */
177 bitset_and(res->adm_colors, env->allocatable_regs);
179 bitset_copy(res->adm_colors, env->allocatable_regs);
182 /* compute the constraint factor */
183 res->constr_factor = (real_t) (1 + env->n_regs - bitset_popcount(res->adm_colors)) / env->n_regs;
185 /* set the number of interfering affinity neighbours to -1, they are calculated later */
186 res->int_aff_neigh = -1;
188 /* build list of interfering neighbours */
190 be_ifg_foreach_neighbour(env->ifg, &nodes_it, irn, neigh) {
191 if (!arch_irn_is_ignore(neigh)) {
192 obstack_ptr_grow(&env->obst, neigh);
196 res->int_neighs = (ir_node**)obstack_finish(&env->obst);
201 static co_mst_irn_t *get_co_mst_irn(co_mst_env_t *env, const ir_node *node)
203 co_mst_irn_t *res = ir_nodemap_get(co_mst_irn_t, &env->map, node);
205 res = co_mst_irn_init(env, node);
206 ir_nodemap_insert(&env->map, node, res);
211 typedef int decide_func_t(const co_mst_irn_t *node, int col);
216 * Write a chunk to stderr for debugging.
218 static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c)
222 if (c->weight_consistent)
223 ir_fprintf(stderr, " $%d ", c->weight);
224 ir_fprintf(stderr, "{");
225 for (i = 0, l = ARR_LEN(c->n); i < l; ++i) {
226 const ir_node *n = c->n[i];
227 ir_fprintf(stderr, " %+F,", n);
229 ir_fprintf(stderr, "}");
233 * Dump all admissible colors to stderr.
235 static void dbg_admissible_colors(const co_mst_env_t *env, const co_mst_irn_t *node)
239 if (bitset_popcount(node->adm_colors) < 1)
240 fprintf(stderr, "no admissible colors?!?");
242 bitset_foreach(node->adm_colors, idx) {
243 ir_fprintf(stderr, " %zu", idx);
249 * Dump color-cost pairs to stderr.
251 static void dbg_col_cost(const co_mst_env_t *env, const col_cost_t *cost)
254 for (i = 0; i < env->n_regs; ++i)
255 fprintf(stderr, " (%d, %.4f)", cost[i].col, cost[i].cost);
258 #endif /* DEBUG_libfirm */
260 static inline int get_mst_irn_col(const co_mst_irn_t *node)
262 return node->tmp_col >= 0 ? node->tmp_col : node->col;
266 * @return 1 if node @p node has color @p col, 0 otherwise.
268 static int decider_has_color(const co_mst_irn_t *node, int col)
270 return get_mst_irn_col(node) == col;
274 * @return 1 if node @p node has not color @p col, 0 otherwise.
276 static int decider_hasnot_color(const co_mst_irn_t *node, int col)
278 return get_mst_irn_col(node) != col;
282 * Always returns true.
284 static int decider_always_yes(const co_mst_irn_t *node, int col)
291 /** compares two affinity edges by its weight */
292 static int cmp_aff_edge(const void *a, const void *b)
294 const aff_edge_t *e1 = (const aff_edge_t*)a;
295 const aff_edge_t *e2 = (const aff_edge_t*)b;
297 if (e2->weight == e1->weight) {
298 if (e2->src->node_idx == e1->src->node_idx)
299 return QSORT_CMP(e2->tgt->node_idx, e1->tgt->node_idx);
301 return QSORT_CMP(e2->src->node_idx, e1->src->node_idx);
303 /* sort in descending order */
304 return QSORT_CMP(e2->weight, e1->weight);
307 /** compares to color-cost pairs */
308 static __attribute__((unused)) int cmp_col_cost_lt(const void *a, const void *b)
310 const col_cost_t *c1 = (const col_cost_t*)a;
311 const col_cost_t *c2 = (const col_cost_t*)b;
312 real_t diff = c1->cost - c2->cost;
319 return QSORT_CMP(c1->col, c2->col);
322 static int cmp_col_cost_gt(const void *a, const void *b)
324 const col_cost_t *c1 = (const col_cost_t*)a;
325 const col_cost_t *c2 = (const col_cost_t*)b;
326 real_t diff = c2->cost - c1->cost;
333 return QSORT_CMP(c1->col, c2->col);
337 * Creates a new affinity chunk
339 static inline aff_chunk_t *new_aff_chunk(co_mst_env_t *env)
341 aff_chunk_t *c = XMALLOCF(aff_chunk_t, color_affinity, env->n_regs);
342 c->n = NEW_ARR_F(const ir_node *, 0);
343 c->interfere = NEW_ARR_F(const ir_node *, 0);
345 c->weight_consistent = 0;
347 c->id = ++last_chunk_id;
349 list_add(&c->list, &env->chunklist);
354 * Frees all memory allocated by an affinity chunk.
356 static inline void delete_aff_chunk(aff_chunk_t *c)
359 DEL_ARR_F(c->interfere);
366 * binary search of sorted nodes.
368 * @return the position where n is found in the array arr or ~pos
369 * if the nodes is not here.
371 static inline int nodes_bsearch(const ir_node **arr, const ir_node *n)
373 int hi = ARR_LEN(arr);
377 int md = lo + ((hi - lo) >> 1);
390 /** Check if a node n can be found inside arr. */
391 static int node_contains(const ir_node **arr, const ir_node *n)
393 int i = nodes_bsearch(arr, n);
398 * Insert a node into the sorted nodes list.
400 * @return 1 if the node was inserted, 0 else
402 static int nodes_insert(const ir_node ***arr, const ir_node *irn)
404 int idx = nodes_bsearch(*arr, irn);
407 int i, n = ARR_LEN(*arr);
410 ARR_APP1(const ir_node *, *arr, irn);
415 for (i = n - 1; i >= idx; --i)
424 * Adds a node to an affinity chunk
426 static inline void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node)
430 if (! nodes_insert(&c->n, node->irn))
433 c->weight_consistent = 0;
436 for (i = node->n_neighs - 1; i >= 0; --i) {
437 ir_node *neigh = node->int_neighs[i];
438 nodes_insert(&c->interfere, neigh);
443 * Check if affinity chunk @p chunk interferes with node @p irn.
445 static inline int aff_chunk_interferes(const aff_chunk_t *chunk, const ir_node *irn)
447 return node_contains(chunk->interfere, irn);
451 * Check if there are interference edges from c1 to c2.
453 * @param c2 Another chunk
454 * @return 1 if there are interferences between nodes of c1 and c2, 0 otherwise.
456 static inline int aff_chunks_interfere(const aff_chunk_t *c1, const aff_chunk_t *c2)
463 /* check if there is a node in c2 having an interfering neighbor in c1 */
464 for (i = ARR_LEN(c2->n) - 1; i >= 0; --i) {
465 const ir_node *irn = c2->n[i];
467 if (node_contains(c1->interfere, irn))
474 * Returns the affinity chunk of @p irn or creates a new
475 * one with @p irn as element if there is none assigned.
477 static inline aff_chunk_t *get_aff_chunk(co_mst_env_t *env, const ir_node *irn)
479 co_mst_irn_t *node = get_co_mst_irn(env, irn);
484 * Let chunk(src) absorb the nodes of chunk(tgt) (only possible when there
485 * are no interference edges from chunk(src) to chunk(tgt)).
486 * @return 1 if successful, 0 if not possible
488 static int aff_chunk_absorb(co_mst_env_t *env, const ir_node *src, const ir_node *tgt)
490 aff_chunk_t *c1 = get_aff_chunk(env, src);
491 aff_chunk_t *c2 = get_aff_chunk(env, tgt);
494 DB((dbg, LEVEL_4, "Attempt to let c1 (id %u): ", c1 ? c1->id : 0));
496 DBG_AFF_CHUNK(env, LEVEL_4, c1);
498 DB((dbg, LEVEL_4, "{%+F}", src));
500 DB((dbg, LEVEL_4, "\n\tabsorb c2 (id %u): ", c2 ? c2->id : 0));
502 DBG_AFF_CHUNK(env, LEVEL_4, c2);
504 DB((dbg, LEVEL_4, "{%+F}", tgt));
506 DB((dbg, LEVEL_4, "\n"));
511 /* no chunk exists */
512 co_mst_irn_t *mirn = get_co_mst_irn(env, src);
515 for (i = mirn->n_neighs - 1; i >= 0; --i) {
516 if (mirn->int_neighs[i] == tgt)
520 /* create one containing both nodes */
521 c1 = new_aff_chunk(env);
522 aff_chunk_add_node(c1, get_co_mst_irn(env, src));
523 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
527 /* c2 already exists */
528 if (! aff_chunk_interferes(c2, src)) {
529 aff_chunk_add_node(c2, get_co_mst_irn(env, src));
533 } else if (c2 == NULL) {
534 /* c1 already exists */
535 if (! aff_chunk_interferes(c1, tgt)) {
536 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
539 } else if (c1 != c2 && ! aff_chunks_interfere(c1, c2)) {
542 for (idx = 0, len = ARR_LEN(c2->n); idx < len; ++idx)
543 aff_chunk_add_node(c1, get_co_mst_irn(env, c2->n[idx]));
545 for (idx = 0, len = ARR_LEN(c2->interfere); idx < len; ++idx) {
546 const ir_node *irn = c2->interfere[idx];
547 nodes_insert(&c1->interfere, irn);
550 c1->weight_consistent = 0;
552 delete_aff_chunk(c2);
555 DB((dbg, LEVEL_4, " ... c1 interferes with c2, skipped\n"));
559 DB((dbg, LEVEL_4, " ... absorbed\n"));
564 * Assures that the weight of the given chunk is consistent.
566 static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c)
568 if (! c->weight_consistent) {
572 for (i = 0; i < env->n_regs; ++i) {
573 c->color_affinity[i].col = i;
574 c->color_affinity[i].cost = REAL(0.0);
577 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
578 const ir_node *n = c->n[idx];
579 const affinity_node_t *an = get_affinity_info(env->co, n);
580 co_mst_irn_t *node = get_co_mst_irn(env, n);
583 if (node->constr_factor > REAL(0.0)) {
584 bitset_foreach (node->adm_colors, col)
585 c->color_affinity[col].cost += node->constr_factor;
589 co_gs_foreach_neighb(an, neigh) {
590 const ir_node *m = neigh->irn;
592 if (arch_irn_is_ignore(m))
595 w += node_contains(c->n, m) ? neigh->costs : 0;
600 for (i = 0; i < env->n_regs; ++i)
601 c->color_affinity[i].cost *= (REAL(1.0) / ARR_LEN(c->n));
604 // c->weight = bitset_popcount(c->nodes);
605 c->weight_consistent = 1;
610 * Count the number of interfering affinity neighbours
612 static int count_interfering_aff_neighs(co_mst_env_t *env, const affinity_node_t *an)
614 const ir_node *irn = an->irn;
615 const co_mst_irn_t *node = get_co_mst_irn(env, irn);
618 co_gs_foreach_neighb(an, neigh) {
619 const ir_node *n = neigh->irn;
622 if (arch_irn_is_ignore(n))
625 /* check if the affinity neighbour interfere */
626 for (i = 0; i < node->n_neighs; ++i) {
627 if (node->int_neighs[i] == n) {
638 * Build chunks of nodes connected by affinity edges.
639 * We start at the heaviest affinity edge.
640 * The chunks of the two edge-defining nodes will be
641 * merged if there are no interference edges from one
642 * chunk to the other.
644 static void build_affinity_chunks(co_mst_env_t *env)
646 nodes_iter_t nodes_it;
647 aff_edge_t *edges = NEW_ARR_F(aff_edge_t, 0);
652 /* at first we create the affinity edge objects */
653 be_ifg_foreach_node(env->ifg, &nodes_it, n) {
654 int n_idx = get_irn_idx(n);
658 if (arch_irn_is_ignore(n))
661 n1 = get_co_mst_irn(env, n);
662 an = get_affinity_info(env->co, n);
665 if (n1->int_aff_neigh < 0)
666 n1->int_aff_neigh = count_interfering_aff_neighs(env, an);
668 /* build the affinity edges */
669 co_gs_foreach_neighb(an, neigh) {
670 const ir_node *m = neigh->irn;
671 int m_idx = get_irn_idx(m);
673 /* record the edge in only one direction */
678 /* skip ignore nodes */
679 if (arch_irn_is_ignore(m))
685 n2 = get_co_mst_irn(env, m);
686 if (n2->int_aff_neigh < 0) {
687 affinity_node_t *am = get_affinity_info(env->co, m);
688 n2->int_aff_neigh = count_interfering_aff_neighs(env, am);
691 * these weights are pure hackery ;-).
692 * It's not chriswue's fault but mine.
694 edge.weight = neigh->costs;
695 ARR_APP1(aff_edge_t, edges, edge);
701 /* now: sort edges and build the affinity chunks */
702 len = ARR_LEN(edges);
703 qsort(edges, len, sizeof(edges[0]), cmp_aff_edge);
704 for (i = 0; i < len; ++i) {
705 DBG((dbg, LEVEL_1, "edge (%u,%u) %f\n", edges[i].src->node_idx, edges[i].tgt->node_idx, edges[i].weight));
707 (void)aff_chunk_absorb(env, edges[i].src, edges[i].tgt);
710 /* now insert all chunks into a priority queue */
711 list_for_each_entry(aff_chunk_t, curr_chunk, &env->chunklist, list) {
712 aff_chunk_assure_weight(env, curr_chunk);
714 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
715 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
716 DBG((dbg, LEVEL_1, "\n"));
718 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
721 for (pn = 0; pn < ARR_LEN(env->map.data); ++pn) {
722 co_mst_irn_t *mirn = (co_mst_irn_t*)env->map.data[pn];
725 if (mirn->chunk != NULL)
728 /* no chunk is allocated so far, do it now */
729 aff_chunk_t *curr_chunk = new_aff_chunk(env);
730 aff_chunk_add_node(curr_chunk, mirn);
732 aff_chunk_assure_weight(env, curr_chunk);
734 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
735 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
736 DBG((dbg, LEVEL_1, "\n"));
738 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
744 static __attribute__((unused)) void chunk_order_nodes(co_mst_env_t *env, aff_chunk_t *chunk)
746 pqueue_t *grow = new_pqueue();
747 ir_node const *max_node = NULL;
751 for (i = ARR_LEN(chunk->n); i != 0;) {
752 const ir_node *irn = chunk->n[--i];
753 affinity_node_t *an = get_affinity_info(env->co, irn);
756 if (arch_irn_is_ignore(irn))
760 co_gs_foreach_neighb(an, neigh)
763 if (w > max_weight) {
771 bitset_t *visited = bitset_malloc(get_irg_last_idx(env->co->irg));
773 for (i = ARR_LEN(chunk->n); i != 0;)
774 bitset_set(visited, get_irn_idx(chunk->n[--i]));
776 pqueue_put(grow, (void *) max_node, max_weight);
777 bitset_clear(visited, get_irn_idx(max_node));
779 while (!pqueue_empty(grow)) {
780 ir_node *irn = (ir_node*)pqueue_pop_front(grow);
781 affinity_node_t *an = get_affinity_info(env->co, irn);
783 if (arch_irn_is_ignore(irn))
786 assert(i <= ARR_LEN(chunk->n));
791 /* build the affinity edges */
792 co_gs_foreach_neighb(an, neigh) {
793 co_mst_irn_t *node = get_co_mst_irn(env, neigh->irn);
795 if (bitset_is_set(visited, get_irn_idx(node->irn))) {
796 pqueue_put(grow, (void *) neigh->irn, neigh->costs);
797 bitset_clear(visited, get_irn_idx(node->irn));
803 bitset_free(visited);
808 * Greedy collect affinity neighbours into thew new chunk @p chunk starting at node @p node.
810 static void expand_chunk_from(co_mst_env_t *env, co_mst_irn_t *node, bitset_t *visited,
811 aff_chunk_t *chunk, aff_chunk_t *orig_chunk, decide_func_t *decider, int col)
813 waitq *nodes = new_waitq();
815 DBG((dbg, LEVEL_1, "\n\tExpanding new chunk (#%u) from %+F, color %d:", chunk->id, node->irn, col));
817 /* init queue and chunk */
818 waitq_put(nodes, node);
819 bitset_set(visited, get_irn_idx(node->irn));
820 aff_chunk_add_node(chunk, node);
821 DB((dbg, LEVEL_1, " %+F", node->irn));
823 /* as long as there are nodes in the queue */
824 while (! waitq_empty(nodes)) {
825 co_mst_irn_t *n = (co_mst_irn_t*)waitq_get(nodes);
826 affinity_node_t *an = get_affinity_info(env->co, n->irn);
828 /* check all affinity neighbors */
830 co_gs_foreach_neighb(an, neigh) {
831 const ir_node *m = neigh->irn;
832 int m_idx = get_irn_idx(m);
835 if (arch_irn_is_ignore(m))
838 n2 = get_co_mst_irn(env, m);
840 if (! bitset_is_set(visited, m_idx) &&
843 ! aff_chunk_interferes(chunk, m) &&
844 node_contains(orig_chunk->n, m))
847 following conditions are met:
848 - neighbour is not visited
849 - neighbour likes the color
850 - neighbour has not yet a fixed color
851 - the new chunk doesn't interfere with the neighbour
852 - neighbour belongs or belonged once to the original chunk
854 bitset_set(visited, m_idx);
855 aff_chunk_add_node(chunk, n2);
856 DB((dbg, LEVEL_1, " %+F", n2->irn));
857 /* enqueue for further search */
858 waitq_put(nodes, n2);
864 DB((dbg, LEVEL_1, "\n"));
870 * Fragment the given chunk into chunks having given color and not having given color.
872 static aff_chunk_t *fragment_chunk(co_mst_env_t *env, int col, aff_chunk_t *c, waitq *tmp)
874 bitset_t *visited = bitset_malloc(get_irg_last_idx(env->co->irg));
876 aff_chunk_t *best = NULL;
878 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
881 aff_chunk_t *tmp_chunk;
882 decide_func_t *decider;
886 if (bitset_is_set(visited, get_irn_idx(irn)))
889 node = get_co_mst_irn(env, irn);
891 if (get_mst_irn_col(node) == col) {
892 decider = decider_has_color;
894 DBG((dbg, LEVEL_4, "\tcolor %d wanted\n", col));
897 decider = decider_hasnot_color;
899 DBG((dbg, LEVEL_4, "\tcolor %d forbidden\n", col));
902 /* create a new chunk starting at current node */
903 tmp_chunk = new_aff_chunk(env);
904 waitq_put(tmp, tmp_chunk);
905 expand_chunk_from(env, node, visited, tmp_chunk, c, decider, col);
906 assert(ARR_LEN(tmp_chunk->n) > 0 && "No nodes added to chunk");
908 /* remember the local best */
909 aff_chunk_assure_weight(env, tmp_chunk);
910 if (check_for_best && (! best || best->weight < tmp_chunk->weight))
914 assert(best && "No chunk found?");
915 bitset_free(visited);
920 * Resets the temporary fixed color of all nodes within wait queue @p nodes.
921 * ATTENTION: the queue is empty after calling this function!
923 static inline void reject_coloring(struct list_head *nodes)
925 DB((dbg, LEVEL_4, "\treject coloring for"));
926 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
927 DB((dbg, LEVEL_4, " %+F", n->irn));
928 assert(n->tmp_col >= 0);
930 list_del_init(&n->list);
932 DB((dbg, LEVEL_4, "\n"));
935 static inline void materialize_coloring(struct list_head *nodes)
937 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
938 assert(n->tmp_col >= 0);
941 list_del_init(&n->list);
945 static inline void set_temp_color(co_mst_irn_t *node, int col, struct list_head *changed)
948 assert(!node->fixed);
949 assert(node->tmp_col < 0);
950 assert(node->list.next == &node->list && node->list.prev == &node->list);
951 assert(bitset_is_set(node->adm_colors, col));
953 list_add_tail(&node->list, changed);
957 static inline int is_loose(co_mst_irn_t *node)
959 return !node->fixed && node->tmp_col < 0;
963 * Determines the costs for each color if it would be assigned to node @p node.
965 static void determine_color_costs(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs)
967 int *neigh_cols = ALLOCAN(int, env->n_regs);
972 for (i = 0; i < env->n_regs; ++i) {
975 costs[i].cost = bitset_is_set(node->adm_colors, i) ? node->constr_factor : REAL(0.0);
978 for (i = 0; i < node->n_neighs; ++i) {
979 co_mst_irn_t *n = get_co_mst_irn(env, node->int_neighs[i]);
980 int col = get_mst_irn_col(n);
985 costs[col].cost = REAL(0.0);
989 coeff = REAL(1.0) / n_loose;
990 for (i = 0; i < env->n_regs; ++i)
991 costs[i].cost *= REAL(1.0) - coeff * neigh_cols[i];
995 /* need forward declaration due to recursive call */
996 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed_ones, int depth, int *max_depth, int *trip);
999 * Tries to change node to a color but @p explude_col.
1000 * @return 1 if succeeded, 0 otherwise.
1002 static int change_node_color_excluded(co_mst_env_t *env, co_mst_irn_t *node, int exclude_col, struct list_head *changed, int depth, int *max_depth, int *trip)
1004 int col = get_mst_irn_col(node);
1007 /* neighbours has already a different color -> good, temporary fix it */
1008 if (col != exclude_col) {
1010 set_temp_color(node, col, changed);
1014 /* The node has the color it should not have _and_ has not been visited yet. */
1015 if (is_loose(node)) {
1016 col_cost_t *costs = ALLOCAN(col_cost_t, env->n_regs);
1018 /* Get the costs for giving the node a specific color. */
1019 determine_color_costs(env, node, costs);
1021 /* Since the node must not have the not_col, set the costs for that color to "infinity" */
1022 costs[exclude_col].cost = REAL(0.0);
1024 /* sort the colors according costs, cheapest first. */
1025 qsort(costs, env->n_regs, sizeof(costs[0]), cmp_col_cost_gt);
1027 /* Try recoloring the node using the color list. */
1028 res = recolor_nodes(env, node, costs, changed, depth + 1, max_depth, trip);
1035 * Tries to bring node @p node to cheapest color and color all interfering neighbours with other colors.
1036 * ATTENTION: Expect @p costs already sorted by increasing costs.
1037 * @return 1 if coloring could be applied, 0 otherwise.
1039 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed, int depth, int *max_depth, int *trip)
1042 struct list_head local_changed;
1045 if (depth > *max_depth)
1048 DBG((dbg, LEVEL_4, "\tRecoloring %+F with color-costs", node->irn));
1049 DBG_COL_COST(env, LEVEL_4, costs);
1050 DB((dbg, LEVEL_4, "\n"));
1052 if (depth >= recolor_limit) {
1053 DBG((dbg, LEVEL_4, "\tHit recolor limit\n"));
1057 for (i = 0; i < env->n_regs; ++i) {
1058 int tgt_col = costs[i].col;
1062 /* If the costs for that color (and all successive) are infinite, bail out we won't make it anyway. */
1063 if (costs[i].cost == REAL(0.0)) {
1064 DBG((dbg, LEVEL_4, "\tAll further colors forbidden\n"));
1068 /* Set the new color of the node and mark the node as temporarily fixed. */
1069 assert(node->tmp_col < 0 && "Node must not have been temporary fixed.");
1070 INIT_LIST_HEAD(&local_changed);
1071 set_temp_color(node, tgt_col, &local_changed);
1072 DBG((dbg, LEVEL_4, "\tTemporary setting %+F to color %d\n", node->irn, tgt_col));
1074 /* try to color all interfering neighbours with current color forbidden */
1075 for (j = 0; j < node->n_neighs; ++j) {
1079 neigh = node->int_neighs[j];
1081 if (arch_irn_is_ignore(neigh))
1084 nn = get_co_mst_irn(env, neigh);
1085 DB((dbg, LEVEL_4, "\tHandling neighbour %+F, at position %d (fixed: %d, tmp_col: %d, col: %d)\n",
1086 neigh, j, nn->fixed, nn->tmp_col, nn->col));
1089 Try to change the color of the neighbor and record all nodes which
1090 get changed in the tmp list. Add this list to the "changed" list for
1091 that color. If we did not succeed to change the color of the neighbor,
1092 we bail out and try the next color.
1094 if (get_mst_irn_col(nn) == tgt_col) {
1095 /* try to color neighbour with tgt_col forbidden */
1096 neigh_ok = change_node_color_excluded(env, nn, tgt_col, &local_changed, depth + 1, max_depth, trip);
1104 We managed to assign the target color to all neighbors, so from the perspective
1105 of the current node, every thing was ok and we can return safely.
1108 /* append the local_changed ones to global ones */
1109 list_splice(&local_changed, changed);
1113 /* coloring of neighbours failed, so we try next color */
1114 reject_coloring(&local_changed);
1118 DBG((dbg, LEVEL_4, "\tAll colors failed\n"));
1123 * Tries to bring node @p node and all its neighbours to color @p tgt_col.
1124 * @return 1 if color @p col could be applied, 0 otherwise
1126 static int change_node_color(co_mst_env_t *env, co_mst_irn_t *node, int tgt_col, struct list_head *changed)
1128 int col = get_mst_irn_col(node);
1130 /* if node already has the target color -> good, temporary fix it */
1131 if (col == tgt_col) {
1132 DBG((dbg, LEVEL_4, "\t\tCNC: %+F has already color %d, fix temporary\n", node->irn, tgt_col));
1134 set_temp_color(node, tgt_col, changed);
1139 Node has not yet a fixed color and target color is admissible
1140 -> try to recolor node and its affinity neighbours
1142 if (is_loose(node) && bitset_is_set(node->adm_colors, tgt_col)) {
1143 col_cost_t *costs = env->single_cols[tgt_col];
1144 int res, max_depth, trip;
1149 DBG((dbg, LEVEL_4, "\t\tCNC: Attempt to recolor %+F ===>>\n", node->irn));
1150 res = recolor_nodes(env, node, costs, changed, 0, &max_depth, &trip);
1151 DBG((dbg, LEVEL_4, "\t\tCNC: <<=== Recoloring of %+F %s\n", node->irn, res ? "succeeded" : "failed"));
1152 stat_ev_int("heur4_recolor_depth_max", max_depth);
1153 stat_ev_int("heur4_recolor_trip", trip);
1159 #ifdef DEBUG_libfirm
1160 if (firm_dbg_get_mask(dbg) & LEVEL_4) {
1161 if (!is_loose(node))
1162 DB((dbg, LEVEL_4, "\t\tCNC: %+F has already fixed color %d\n", node->irn, col));
1164 DB((dbg, LEVEL_4, "\t\tCNC: color %d not admissible for %+F (", tgt_col, node->irn));
1165 dbg_admissible_colors(env, node);
1166 DB((dbg, LEVEL_4, ")\n"));
1175 * Tries to color an affinity chunk (or at least a part of it).
1176 * Inserts uncolored parts of the chunk as a new chunk into the priority queue.
1178 static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
1180 aff_chunk_t *best_chunk = NULL;
1181 int n_nodes = ARR_LEN(c->n);
1182 int best_color = -1;
1183 int n_int_chunks = 0;
1184 waitq *tmp_chunks = new_waitq();
1185 waitq *best_starts = NULL;
1186 col_cost_t *order = ALLOCANZ(col_cost_t, env->n_regs);
1193 struct list_head changed;
1195 DB((dbg, LEVEL_2, "fragmentizing chunk #%u", c->id));
1196 DBG_AFF_CHUNK(env, LEVEL_2, c);
1197 DB((dbg, LEVEL_2, "\n"));
1199 stat_ev_ctx_push_fmt("heur4_color_chunk", "%u", c->id);
1201 ++env->chunk_visited;
1203 /* compute color preference */
1204 for (pos = 0, len = ARR_LEN(c->interfere); pos < len; ++pos) {
1205 const ir_node *n = c->interfere[pos];
1206 co_mst_irn_t *node = get_co_mst_irn(env, n);
1207 aff_chunk_t *chunk = node->chunk;
1209 if (is_loose(node) && chunk && chunk->visited < env->chunk_visited) {
1210 assert(!chunk->deleted);
1211 chunk->visited = env->chunk_visited;
1214 aff_chunk_assure_weight(env, chunk);
1215 for (i = 0; i < env->n_regs; ++i)
1216 order[i].cost += chunk->color_affinity[i].cost;
1220 for (i = 0; i < env->n_regs; ++i) {
1221 real_t dislike = n_int_chunks > 0 ? REAL(1.0) - order[i].cost / n_int_chunks : REAL(0.0);
1223 order[i].cost = (REAL(1.0) - dislike_influence) * c->color_affinity[i].cost + dislike_influence * dislike;
1226 qsort(order, env->n_regs, sizeof(order[0]), cmp_col_cost_gt);
1228 DBG_COL_COST(env, LEVEL_2, order);
1229 DB((dbg, LEVEL_2, "\n"));
1231 /* check which color is the "best" for the given chunk.
1232 * if we found a color which was ok for all nodes, we take it
1233 * and do not look further. (see did_all flag usage below.)
1234 * If we have many colors which fit all nodes it is hard to decide
1235 * which one to take anyway.
1236 * TODO Sebastian: Perhaps we should at all nodes and figure out
1237 * a suitable color using costs as done above (determine_color_costs).
1239 for (i = 0; i < env->k; ++i) {
1240 int col = order[i].col;
1242 aff_chunk_t *local_best;
1245 /* skip ignore colors */
1246 if (!bitset_is_set(env->allocatable_regs, col))
1249 DB((dbg, LEVEL_2, "\ttrying color %d\n", col));
1252 good_starts = new_waitq();
1254 /* try to bring all nodes of given chunk to the current color. */
1255 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1256 const ir_node *irn = c->n[idx];
1257 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1260 assert(! node->fixed && "Node must not have a fixed color.");
1261 DB((dbg, LEVEL_4, "\t\tBringing %+F from color %d to color %d ...\n", irn, node->col, col));
1264 The order of the colored nodes is important, so we record the successfully
1265 colored ones in the order they appeared.
1267 INIT_LIST_HEAD(&changed);
1269 good = change_node_color(env, node, col, &changed);
1270 stat_ev_tim_pop("heur4_recolor");
1272 waitq_put(good_starts, node);
1273 materialize_coloring(&changed);
1278 reject_coloring(&changed);
1280 n_succeeded += good;
1281 DB((dbg, LEVEL_4, "\t\t... %+F attempt from %d to %d %s\n", irn, node->col, col, good ? "succeeded" : "failed"));
1284 /* unfix all nodes */
1285 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1286 co_mst_irn_t *node = get_co_mst_irn(env, c->n[idx]);
1290 /* try next color when failed */
1291 if (n_succeeded == 0) {
1292 del_waitq(good_starts);
1296 /* fragment the chunk according to the coloring */
1297 local_best = fragment_chunk(env, col, c, tmp_chunks);
1299 /* search the best of the good list
1300 and make it the new best if it is better than the current */
1302 aff_chunk_assure_weight(env, local_best);
1304 DB((dbg, LEVEL_3, "\t\tlocal best chunk (id %u) for color %d: ", local_best->id, col));
1305 DBG_AFF_CHUNK(env, LEVEL_3, local_best);
1307 if (! best_chunk || best_chunk->weight < local_best->weight) {
1308 best_chunk = local_best;
1311 del_waitq(best_starts);
1312 best_starts = good_starts;
1313 DB((dbg, LEVEL_3, "\n\t\t... setting global best chunk (id %u), color %d\n", best_chunk->id, best_color));
1315 DB((dbg, LEVEL_3, "\n\t\t... omitting, global best is better\n"));
1316 del_waitq(good_starts);
1320 del_waitq(good_starts);
1323 /* if all nodes were recolored, bail out */
1324 if (n_succeeded == n_nodes)
1328 stat_ev_int("heur4_colors_tried", i);
1330 /* free all intermediate created chunks except best one */
1331 while (! waitq_empty(tmp_chunks)) {
1332 aff_chunk_t *tmp = (aff_chunk_t*)waitq_get(tmp_chunks);
1333 if (tmp != best_chunk)
1334 delete_aff_chunk(tmp);
1336 del_waitq(tmp_chunks);
1338 /* return if coloring failed */
1341 del_waitq(best_starts);
1345 DB((dbg, LEVEL_2, "\tbest chunk #%u ", best_chunk->id));
1346 DBG_AFF_CHUNK(env, LEVEL_2, best_chunk);
1347 DB((dbg, LEVEL_2, "using color %d\n", best_color));
1349 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1350 const ir_node *irn = best_chunk->n[idx];
1351 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1354 /* bring the node to the color. */
1355 DB((dbg, LEVEL_4, "\tManifesting color %d for %+F, chunk #%u\n", best_color, node->irn, best_chunk->id));
1356 INIT_LIST_HEAD(&changed);
1358 res = change_node_color(env, node, best_color, &changed);
1359 stat_ev_tim_pop("heur4_recolor");
1361 materialize_coloring(&changed);
1364 assert(list_empty(&changed));
1367 /* remove the nodes in best chunk from original chunk */
1368 len = ARR_LEN(best_chunk->n);
1369 for (idx = 0; idx < len; ++idx) {
1370 const ir_node *irn = best_chunk->n[idx];
1371 int pos = nodes_bsearch(c->n, irn);
1376 len = ARR_LEN(c->n);
1377 for (idx = nidx = 0; idx < len; ++idx) {
1378 const ir_node *irn = c->n[idx];
1384 ARR_SHRINKLEN(c->n, nidx);
1387 /* we have to get the nodes back into the original chunk because they are scattered over temporary chunks */
1388 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1389 const ir_node *n = c->n[idx];
1390 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1394 /* fragment the remaining chunk */
1395 visited = bitset_malloc(get_irg_last_idx(env->co->irg));
1396 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx)
1397 bitset_set(visited, get_irn_idx(best_chunk->n[idx]));
1399 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1400 const ir_node *irn = c->n[idx];
1401 if (! bitset_is_set(visited, get_irn_idx(irn))) {
1402 aff_chunk_t *new_chunk = new_aff_chunk(env);
1403 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1405 expand_chunk_from(env, node, visited, new_chunk, c, decider_always_yes, 0);
1406 aff_chunk_assure_weight(env, new_chunk);
1407 pqueue_put(env->chunks, new_chunk, new_chunk->weight);
1411 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1412 const ir_node *n = best_chunk->n[idx];
1413 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1417 /* clear obsolete chunks and free some memory */
1418 delete_aff_chunk(best_chunk);
1419 bitset_free(visited);
1421 del_waitq(best_starts);
1423 stat_ev_ctx_pop("heur4_color_chunk");
1427 * Main driver for mst safe coalescing algorithm.
1429 static int co_solve_heuristic_mst(copy_opt_t *co)
1431 unsigned n_regs = co->cls->n_regs;
1432 bitset_t *allocatable_regs = bitset_alloca(n_regs);
1437 co_mst_env_t mst_env;
1444 ir_nodemap_init(&mst_env.map, co->irg);
1445 obstack_init(&mst_env.obst);
1447 be_put_allocatable_regs(co->cenv->irg, co->cls, allocatable_regs);
1448 k = bitset_popcount(allocatable_regs);
1450 mst_env.n_regs = n_regs;
1452 mst_env.chunks = new_pqueue();
1454 mst_env.allocatable_regs = allocatable_regs;
1455 mst_env.ifg = co->cenv->ifg;
1456 INIT_LIST_HEAD(&mst_env.chunklist);
1457 mst_env.chunk_visited = 0;
1458 mst_env.single_cols = OALLOCN(&mst_env.obst, col_cost_t*, n_regs);
1460 for (i = 0; i < n_regs; ++i) {
1461 col_cost_t *vec = OALLOCN(&mst_env.obst, col_cost_t, n_regs);
1463 mst_env.single_cols[i] = vec;
1464 for (j = 0; j < n_regs; ++j) {
1466 vec[j].cost = REAL(0.0);
1470 vec[0].cost = REAL(1.0);
1473 DBG((dbg, LEVEL_1, "==== Coloring %+F, class %s ====\n", co->irg, co->cls->name));
1475 /* build affinity chunks */
1477 build_affinity_chunks(&mst_env);
1478 stat_ev_tim_pop("heur4_initial_chunk");
1480 /* color chunks as long as there are some */
1481 while (! pqueue_empty(mst_env.chunks)) {
1482 aff_chunk_t *chunk = (aff_chunk_t*)pqueue_pop_front(mst_env.chunks);
1484 color_aff_chunk(&mst_env, chunk);
1485 DB((dbg, LEVEL_4, "<<<====== Coloring chunk (%u) done\n", chunk->id));
1486 delete_aff_chunk(chunk);
1489 /* apply coloring */
1490 for (pn = 0; pn < ARR_LEN(mst_env.map.data); ++pn) {
1491 co_mst_irn_t *mirn = (co_mst_irn_t*)mst_env.map.data[pn];
1492 const arch_register_t *reg;
1495 irn = get_idx_irn(co->irg, pn);
1496 if (arch_irn_is_ignore(irn))
1499 /* skip nodes where color hasn't changed */
1500 if (mirn->init_col == mirn->col)
1503 reg = arch_register_for_index(co->cls, mirn->col);
1504 arch_set_irn_register(irn, reg);
1505 DB((dbg, LEVEL_1, "%+F set color from %d to %d\n", irn, mirn->init_col, mirn->col));
1508 /* free allocated memory */
1509 del_pqueue(mst_env.chunks);
1510 obstack_free(&mst_env.obst, NULL);
1511 ir_nodemap_destroy(&mst_env.map);
1513 stat_ev_tim_pop("heur4_total");
1518 static const lc_opt_table_entry_t options[] = {
1519 LC_OPT_ENT_INT ("limit", "limit recoloring", &recolor_limit),
1520 LC_OPT_ENT_DBL ("di", "dislike influence", &dislike_influence),
1524 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur4)
1525 void be_init_copyheur4(void)
1527 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
1528 lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
1529 lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
1530 lc_opt_entry_t *co_grp = lc_opt_get_grp(chordal_grp, "co");
1531 lc_opt_entry_t *heur4_grp = lc_opt_get_grp(co_grp, "heur4");
1533 static co_algo_info copyheur = {
1534 co_solve_heuristic_mst, 0
1537 lc_opt_add_table(heur4_grp, options);
1538 be_register_copyopt("heur4", ©heur);
1540 FIRM_DBG_REGISTER(dbg, "firm.be.co.heur4");