2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Simple copy minimization heuristics.
23 * @author Christian Wuerdig
26 * This is the C implementation of the mst algorithm
27 * originally written in Java by Sebastian Hack.
28 * (also known as "heur3" :)
29 * Performs simple copy minimization.
33 #define DISABLE_STATEV
40 #include "raw_bitset.h"
41 #include "irnodemap.h"
55 #include "becopyopt_t.h"
59 #define COL_COST_INFEASIBLE DBL_MAX
60 #define AFF_NEIGHBOUR_FIX_BENEFIT 128.0
61 #define NEIGHBOUR_CONSTR_COSTS 64.0
66 #define DBG_AFF_CHUNK(env, level, chunk) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_aff_chunk((env), (chunk)); } while (0)
67 #define DBG_COL_COST(env, level, cost) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_col_cost((env), (cost)); } while (0)
69 static firm_dbg_module_t *dbg = NULL;
73 #define DBG_AFF_CHUNK(env, level, chunk)
74 #define DBG_COL_COST(env, level, cost)
79 #define REAL(C) (C ## f)
81 static unsigned last_chunk_id = 0;
82 static int recolor_limit = 7;
83 static double dislike_influence = REAL(0.1);
85 typedef struct col_cost_t {
93 typedef struct aff_chunk_t {
94 const ir_node **n; /**< An ARR_F containing all nodes of the chunk. */
95 const ir_node **interfere; /**< An ARR_F containing all inference. */
96 int weight; /**< Weight of this chunk */
97 unsigned weight_consistent : 1; /**< Set if the weight is consistent. */
98 unsigned deleted : 1; /**< For debugging: Set if the was deleted. */
99 unsigned id; /**< An id of this chunk. */
102 col_cost_t color_affinity[1];
108 typedef struct aff_edge_t {
109 const ir_node *src; /**< Source node. */
110 const ir_node *tgt; /**< Target node. */
111 int weight; /**< The weight of this edge. */
114 /* main coalescing environment */
115 typedef struct co_mst_env_t {
116 int n_regs; /**< number of regs in class */
117 int k; /**< number of non-ignore registers in class */
118 bitset_t *allocatable_regs; /**< set containing all global ignore registers */
119 ir_nodemap map; /**< phase object holding data for nodes */
121 pqueue_t *chunks; /**< priority queue for chunks */
122 list_head chunklist; /**< list holding all chunks */
123 be_ifg_t *ifg; /**< the interference graph */
124 copy_opt_t *co; /**< the copy opt object */
125 unsigned chunk_visited;
126 col_cost_t **single_cols;
129 /* stores coalescing related information for a node */
130 typedef struct co_mst_irn_t {
131 const ir_node *irn; /**< the irn this information belongs to */
132 aff_chunk_t *chunk; /**< the chunk this irn belongs to */
133 bitset_t *adm_colors; /**< set of admissible colors for this irn */
134 ir_node **int_neighs; /**< array of all interfering neighbours (cached for speed reasons) */
135 int n_neighs; /**< length of the interfering neighbours array. */
136 int int_aff_neigh; /**< number of interfering affinity neighbours */
137 int col; /**< color currently assigned */
138 int init_col; /**< the initial color */
139 int tmp_col; /**< a temporary assigned color */
140 unsigned fixed : 1; /**< the color is fixed */
141 struct list_head list; /**< Queue for coloring undo. */
142 real_t constr_factor;
146 * In case there is no phase information for irn, initialize it.
148 static co_mst_irn_t *co_mst_irn_init(co_mst_env_t *env, const ir_node *irn)
150 co_mst_irn_t *res = OALLOC(&env->obst, co_mst_irn_t);
152 const arch_register_req_t *req;
153 neighbours_iter_t nodes_it;
161 res->int_neighs = NULL;
162 res->int_aff_neigh = 0;
163 res->col = arch_register_get_index(arch_get_irn_register(irn));
164 res->init_col = res->col;
165 INIT_LIST_HEAD(&res->list);
167 DB((dbg, LEVEL_4, "Creating phase info for %+F\n", irn));
169 /* set admissible registers */
170 res->adm_colors = bitset_obstack_alloc(&env->obst, env->n_regs);
172 /* Exclude colors not assignable to the irn */
173 req = arch_get_irn_register_req(irn);
174 if (arch_register_req_is(req, limited)) {
175 rbitset_copy_to_bitset(req->limited, res->adm_colors);
177 bitset_set_all(res->adm_colors);
180 /* exclude global ignore registers as well */
181 bitset_and(res->adm_colors, env->allocatable_regs);
183 /* compute the constraint factor */
184 res->constr_factor = (real_t) (1 + env->n_regs - bitset_popcount(res->adm_colors)) / env->n_regs;
186 /* set the number of interfering affinity neighbours to -1, they are calculated later */
187 res->int_aff_neigh = -1;
189 /* build list of interfering neighbours */
191 be_ifg_foreach_neighbour(env->ifg, &nodes_it, irn, neigh) {
192 if (!arch_irn_is_ignore(neigh)) {
193 obstack_ptr_grow(&env->obst, neigh);
197 res->int_neighs = (ir_node**)obstack_finish(&env->obst);
202 static co_mst_irn_t *get_co_mst_irn(co_mst_env_t *env, const ir_node *node)
204 co_mst_irn_t *res = (co_mst_irn_t*)ir_nodemap_get(&env->map, node);
206 res = co_mst_irn_init(env, node);
207 ir_nodemap_insert(&env->map, node, res);
212 typedef int decide_func_t(const co_mst_irn_t *node, int col);
217 * Write a chunk to stderr for debugging.
219 static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c)
223 if (c->weight_consistent)
224 ir_fprintf(stderr, " $%d ", c->weight);
225 ir_fprintf(stderr, "{");
226 for (i = 0, l = ARR_LEN(c->n); i < l; ++i) {
227 const ir_node *n = c->n[i];
228 ir_fprintf(stderr, " %+F,", n);
230 ir_fprintf(stderr, "}");
234 * Dump all admissible colors to stderr.
236 static void dbg_admissible_colors(const co_mst_env_t *env, const co_mst_irn_t *node)
241 if (bitset_popcount(node->adm_colors) < 1)
242 fprintf(stderr, "no admissible colors?!?");
244 bitset_foreach(node->adm_colors, idx) {
245 ir_fprintf(stderr, " %zu", idx);
251 * Dump color-cost pairs to stderr.
253 static void dbg_col_cost(const co_mst_env_t *env, const col_cost_t *cost)
256 for (i = 0; i < env->n_regs; ++i)
257 fprintf(stderr, " (%d, %.4f)", cost[i].col, cost[i].cost);
260 #endif /* DEBUG_libfirm */
262 static inline int get_mst_irn_col(const co_mst_irn_t *node)
264 return node->tmp_col >= 0 ? node->tmp_col : node->col;
268 * @return 1 if node @p node has color @p col, 0 otherwise.
270 static int decider_has_color(const co_mst_irn_t *node, int col)
272 return get_mst_irn_col(node) == col;
276 * @return 1 if node @p node has not color @p col, 0 otherwise.
278 static int decider_hasnot_color(const co_mst_irn_t *node, int col)
280 return get_mst_irn_col(node) != col;
284 * Always returns true.
286 static int decider_always_yes(const co_mst_irn_t *node, int col)
293 /** compares two affinity edges by its weight */
294 static int cmp_aff_edge(const void *a, const void *b)
296 const aff_edge_t *e1 = (const aff_edge_t*)a;
297 const aff_edge_t *e2 = (const aff_edge_t*)b;
299 if (e2->weight == e1->weight) {
300 if (e2->src->node_idx == e1->src->node_idx)
301 return QSORT_CMP(e2->tgt->node_idx, e1->tgt->node_idx);
303 return QSORT_CMP(e2->src->node_idx, e1->src->node_idx);
305 /* sort in descending order */
306 return QSORT_CMP(e2->weight, e1->weight);
309 /** compares to color-cost pairs */
310 static __attribute__((unused)) int cmp_col_cost_lt(const void *a, const void *b)
312 const col_cost_t *c1 = (const col_cost_t*)a;
313 const col_cost_t *c2 = (const col_cost_t*)b;
314 real_t diff = c1->cost - c2->cost;
315 return (diff > 0) - (diff < 0);
318 static int cmp_col_cost_gt(const void *a, const void *b)
320 const col_cost_t *c1 = (const col_cost_t*)a;
321 const col_cost_t *c2 = (const col_cost_t*)b;
322 real_t diff = c2->cost - c1->cost;
325 return QSORT_CMP(c1->col, c2->col);
327 return (diff > 0) - (diff < 0);
331 * Creates a new affinity chunk
333 static inline aff_chunk_t *new_aff_chunk(co_mst_env_t *env)
335 aff_chunk_t *c = XMALLOCF(aff_chunk_t, color_affinity, env->n_regs);
336 c->n = NEW_ARR_F(const ir_node *, 0);
337 c->interfere = NEW_ARR_F(const ir_node *, 0);
339 c->weight_consistent = 0;
341 c->id = ++last_chunk_id;
343 list_add(&c->list, &env->chunklist);
348 * Frees all memory allocated by an affinity chunk.
350 static inline void delete_aff_chunk(aff_chunk_t *c)
353 DEL_ARR_F(c->interfere);
360 * binary search of sorted nodes.
362 * @return the position where n is found in the array arr or ~pos
363 * if the nodes is not here.
365 static inline int nodes_bsearch(const ir_node **arr, const ir_node *n)
367 int hi = ARR_LEN(arr);
371 int md = lo + ((hi - lo) >> 1);
384 /** Check if a node n can be found inside arr. */
385 static int node_contains(const ir_node **arr, const ir_node *n)
387 int i = nodes_bsearch(arr, n);
392 * Insert a node into the sorted nodes list.
394 * @return 1 if the node was inserted, 0 else
396 static int nodes_insert(const ir_node ***arr, const ir_node *irn)
398 int idx = nodes_bsearch(*arr, irn);
401 int i, n = ARR_LEN(*arr);
404 ARR_APP1(const ir_node *, *arr, irn);
409 for (i = n - 1; i >= idx; --i)
418 * Adds a node to an affinity chunk
420 static inline void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node)
424 if (! nodes_insert(&c->n, node->irn))
427 c->weight_consistent = 0;
430 for (i = node->n_neighs - 1; i >= 0; --i) {
431 ir_node *neigh = node->int_neighs[i];
432 nodes_insert(&c->interfere, neigh);
437 * Check if affinity chunk @p chunk interferes with node @p irn.
439 static inline int aff_chunk_interferes(const aff_chunk_t *chunk, const ir_node *irn)
441 return node_contains(chunk->interfere, irn);
445 * Check if there are interference edges from c1 to c2.
447 * @param c2 Another chunk
448 * @return 1 if there are interferences between nodes of c1 and c2, 0 otherwise.
450 static inline int aff_chunks_interfere(const aff_chunk_t *c1, const aff_chunk_t *c2)
457 /* check if there is a node in c2 having an interfering neighbor in c1 */
458 for (i = ARR_LEN(c2->n) - 1; i >= 0; --i) {
459 const ir_node *irn = c2->n[i];
461 if (node_contains(c1->interfere, irn))
468 * Returns the affinity chunk of @p irn or creates a new
469 * one with @p irn as element if there is none assigned.
471 static inline aff_chunk_t *get_aff_chunk(co_mst_env_t *env, const ir_node *irn)
473 co_mst_irn_t *node = get_co_mst_irn(env, irn);
478 * Let chunk(src) absorb the nodes of chunk(tgt) (only possible when there
479 * are no interference edges from chunk(src) to chunk(tgt)).
480 * @return 1 if successful, 0 if not possible
482 static int aff_chunk_absorb(co_mst_env_t *env, const ir_node *src, const ir_node *tgt)
484 aff_chunk_t *c1 = get_aff_chunk(env, src);
485 aff_chunk_t *c2 = get_aff_chunk(env, tgt);
488 DB((dbg, LEVEL_4, "Attempt to let c1 (id %u): ", c1 ? c1->id : 0));
490 DBG_AFF_CHUNK(env, LEVEL_4, c1);
492 DB((dbg, LEVEL_4, "{%+F}", src));
494 DB((dbg, LEVEL_4, "\n\tabsorb c2 (id %u): ", c2 ? c2->id : 0));
496 DBG_AFF_CHUNK(env, LEVEL_4, c2);
498 DB((dbg, LEVEL_4, "{%+F}", tgt));
500 DB((dbg, LEVEL_4, "\n"));
505 /* no chunk exists */
506 co_mst_irn_t *mirn = get_co_mst_irn(env, src);
509 for (i = mirn->n_neighs - 1; i >= 0; --i) {
510 if (mirn->int_neighs[i] == tgt)
514 /* create one containing both nodes */
515 c1 = new_aff_chunk(env);
516 aff_chunk_add_node(c1, get_co_mst_irn(env, src));
517 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
521 /* c2 already exists */
522 if (! aff_chunk_interferes(c2, src)) {
523 aff_chunk_add_node(c2, get_co_mst_irn(env, src));
527 } else if (c2 == NULL) {
528 /* c1 already exists */
529 if (! aff_chunk_interferes(c1, tgt)) {
530 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
533 } else if (c1 != c2 && ! aff_chunks_interfere(c1, c2)) {
536 for (idx = 0, len = ARR_LEN(c2->n); idx < len; ++idx)
537 aff_chunk_add_node(c1, get_co_mst_irn(env, c2->n[idx]));
539 for (idx = 0, len = ARR_LEN(c2->interfere); idx < len; ++idx) {
540 const ir_node *irn = c2->interfere[idx];
541 nodes_insert(&c1->interfere, irn);
544 c1->weight_consistent = 0;
546 delete_aff_chunk(c2);
549 DB((dbg, LEVEL_4, " ... c1 interferes with c2, skipped\n"));
553 DB((dbg, LEVEL_4, " ... absorbed\n"));
558 * Assures that the weight of the given chunk is consistent.
560 static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c)
562 if (! c->weight_consistent) {
566 for (i = 0; i < env->n_regs; ++i) {
567 c->color_affinity[i].col = i;
568 c->color_affinity[i].cost = REAL(0.0);
571 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
572 const ir_node *n = c->n[idx];
573 const affinity_node_t *an = get_affinity_info(env->co, n);
574 co_mst_irn_t *node = get_co_mst_irn(env, n);
577 if (node->constr_factor > REAL(0.0)) {
579 bitset_foreach (node->adm_colors, col)
580 c->color_affinity[col].cost += node->constr_factor;
585 co_gs_foreach_neighb(an, neigh) {
586 const ir_node *m = neigh->irn;
588 if (arch_irn_is_ignore(m))
591 w += node_contains(c->n, m) ? neigh->costs : 0;
596 for (i = 0; i < env->n_regs; ++i)
597 c->color_affinity[i].cost *= (REAL(1.0) / ARR_LEN(c->n));
600 // c->weight = bitset_popcount(c->nodes);
601 c->weight_consistent = 1;
606 * Count the number of interfering affinity neighbours
608 static int count_interfering_aff_neighs(co_mst_env_t *env, const affinity_node_t *an)
610 const neighb_t *neigh;
611 const ir_node *irn = an->irn;
612 const co_mst_irn_t *node = get_co_mst_irn(env, irn);
615 co_gs_foreach_neighb(an, neigh) {
616 const ir_node *n = neigh->irn;
619 if (arch_irn_is_ignore(n))
622 /* check if the affinity neighbour interfere */
623 for (i = 0; i < node->n_neighs; ++i) {
624 if (node->int_neighs[i] == n) {
635 * Build chunks of nodes connected by affinity edges.
636 * We start at the heaviest affinity edge.
637 * The chunks of the two edge-defining nodes will be
638 * merged if there are no interference edges from one
639 * chunk to the other.
641 static void build_affinity_chunks(co_mst_env_t *env)
643 nodes_iter_t nodes_it;
644 aff_edge_t *edges = NEW_ARR_F(aff_edge_t, 0);
647 aff_chunk_t *curr_chunk;
650 /* at first we create the affinity edge objects */
651 be_ifg_foreach_node(env->ifg, &nodes_it, n) {
652 int n_idx = get_irn_idx(n);
656 if (arch_irn_is_ignore(n))
659 n1 = get_co_mst_irn(env, n);
660 an = get_affinity_info(env->co, n);
665 if (n1->int_aff_neigh < 0)
666 n1->int_aff_neigh = count_interfering_aff_neighs(env, an);
668 /* build the affinity edges */
669 co_gs_foreach_neighb(an, neigh) {
670 const ir_node *m = neigh->irn;
671 int m_idx = get_irn_idx(m);
673 /* record the edge in only one direction */
678 /* skip ignore nodes */
679 if (arch_irn_is_ignore(m))
685 n2 = get_co_mst_irn(env, m);
686 if (n2->int_aff_neigh < 0) {
687 affinity_node_t *am = get_affinity_info(env->co, m);
688 n2->int_aff_neigh = count_interfering_aff_neighs(env, am);
691 * these weights are pure hackery ;-).
692 * It's not chriswue's fault but mine.
694 edge.weight = neigh->costs;
695 ARR_APP1(aff_edge_t, edges, edge);
701 /* now: sort edges and build the affinity chunks */
702 len = ARR_LEN(edges);
703 qsort(edges, len, sizeof(edges[0]), cmp_aff_edge);
704 for (i = 0; i < len; ++i) {
705 DBG((dbg, LEVEL_1, "edge (%u,%u) %f\n", edges[i].src->node_idx, edges[i].tgt->node_idx, edges[i].weight));
707 (void)aff_chunk_absorb(env, edges[i].src, edges[i].tgt);
710 /* now insert all chunks into a priority queue */
711 list_for_each_entry(aff_chunk_t, curr_chunk, &env->chunklist, list) {
712 aff_chunk_assure_weight(env, curr_chunk);
714 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
715 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
716 DBG((dbg, LEVEL_1, "\n"));
718 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
721 for (pn = 0; pn < ARR_LEN(env->map.data); ++pn) {
722 co_mst_irn_t *mirn = env->map.data[pn];
725 if (mirn->chunk != NULL)
728 /* no chunk is allocated so far, do it now */
729 aff_chunk_t *curr_chunk = new_aff_chunk(env);
730 aff_chunk_add_node(curr_chunk, mirn);
732 aff_chunk_assure_weight(env, curr_chunk);
734 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
735 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
736 DBG((dbg, LEVEL_1, "\n"));
738 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
744 static __attribute__((unused)) void chunk_order_nodes(co_mst_env_t *env, aff_chunk_t *chunk)
746 pqueue_t *grow = new_pqueue();
747 ir_node const *max_node = NULL;
751 for (i = ARR_LEN(chunk->n); i != 0;) {
752 const ir_node *irn = chunk->n[--i];
753 affinity_node_t *an = get_affinity_info(env->co, irn);
757 if (arch_irn_is_ignore(irn))
761 co_gs_foreach_neighb(an, neigh)
764 if (w > max_weight) {
772 bitset_t *visited = bitset_malloc(get_irg_last_idx(env->co->irg));
774 for (i = ARR_LEN(chunk->n); i != 0;)
775 bitset_set(visited, get_irn_idx(chunk->n[--i]));
777 pqueue_put(grow, (void *) max_node, max_weight);
778 bitset_clear(visited, get_irn_idx(max_node));
780 while (!pqueue_empty(grow)) {
781 ir_node *irn = (ir_node*)pqueue_pop_front(grow);
782 affinity_node_t *an = get_affinity_info(env->co, irn);
785 if (arch_irn_is_ignore(irn))
788 assert(i <= ARR_LEN(chunk->n));
793 /* build the affinity edges */
794 co_gs_foreach_neighb(an, neigh) {
795 co_mst_irn_t *node = get_co_mst_irn(env, neigh->irn);
797 if (bitset_is_set(visited, get_irn_idx(node->irn))) {
798 pqueue_put(grow, (void *) neigh->irn, neigh->costs);
799 bitset_clear(visited, get_irn_idx(node->irn));
805 bitset_free(visited);
810 * Greedy collect affinity neighbours into thew new chunk @p chunk starting at node @p node.
812 static void expand_chunk_from(co_mst_env_t *env, co_mst_irn_t *node, bitset_t *visited,
813 aff_chunk_t *chunk, aff_chunk_t *orig_chunk, decide_func_t *decider, int col)
815 waitq *nodes = new_waitq();
817 DBG((dbg, LEVEL_1, "\n\tExpanding new chunk (#%u) from %+F, color %d:", chunk->id, node->irn, col));
819 /* init queue and chunk */
820 waitq_put(nodes, node);
821 bitset_set(visited, get_irn_idx(node->irn));
822 aff_chunk_add_node(chunk, node);
823 DB((dbg, LEVEL_1, " %+F", node->irn));
825 /* as long as there are nodes in the queue */
826 while (! waitq_empty(nodes)) {
827 co_mst_irn_t *n = (co_mst_irn_t*)waitq_get(nodes);
828 affinity_node_t *an = get_affinity_info(env->co, n->irn);
830 /* check all affinity neighbors */
833 co_gs_foreach_neighb(an, neigh) {
834 const ir_node *m = neigh->irn;
835 int m_idx = get_irn_idx(m);
838 if (arch_irn_is_ignore(m))
841 n2 = get_co_mst_irn(env, m);
843 if (! bitset_is_set(visited, m_idx) &&
846 ! aff_chunk_interferes(chunk, m) &&
847 node_contains(orig_chunk->n, m))
850 following conditions are met:
851 - neighbour is not visited
852 - neighbour likes the color
853 - neighbour has not yet a fixed color
854 - the new chunk doesn't interfere with the neighbour
855 - neighbour belongs or belonged once to the original chunk
857 bitset_set(visited, m_idx);
858 aff_chunk_add_node(chunk, n2);
859 DB((dbg, LEVEL_1, " %+F", n2->irn));
860 /* enqueue for further search */
861 waitq_put(nodes, n2);
867 DB((dbg, LEVEL_1, "\n"));
873 * Fragment the given chunk into chunks having given color and not having given color.
875 static aff_chunk_t *fragment_chunk(co_mst_env_t *env, int col, aff_chunk_t *c, waitq *tmp)
877 bitset_t *visited = bitset_malloc(get_irg_last_idx(env->co->irg));
879 aff_chunk_t *best = NULL;
881 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
884 aff_chunk_t *tmp_chunk;
885 decide_func_t *decider;
889 if (bitset_is_set(visited, get_irn_idx(irn)))
892 node = get_co_mst_irn(env, irn);
894 if (get_mst_irn_col(node) == col) {
895 decider = decider_has_color;
897 DBG((dbg, LEVEL_4, "\tcolor %d wanted\n", col));
900 decider = decider_hasnot_color;
902 DBG((dbg, LEVEL_4, "\tcolor %d forbidden\n", col));
905 /* create a new chunk starting at current node */
906 tmp_chunk = new_aff_chunk(env);
907 waitq_put(tmp, tmp_chunk);
908 expand_chunk_from(env, node, visited, tmp_chunk, c, decider, col);
909 assert(ARR_LEN(tmp_chunk->n) > 0 && "No nodes added to chunk");
911 /* remember the local best */
912 aff_chunk_assure_weight(env, tmp_chunk);
913 if (check_for_best && (! best || best->weight < tmp_chunk->weight))
917 assert(best && "No chunk found?");
918 bitset_free(visited);
923 * Resets the temporary fixed color of all nodes within wait queue @p nodes.
924 * ATTENTION: the queue is empty after calling this function!
926 static inline void reject_coloring(struct list_head *nodes)
928 co_mst_irn_t *n, *temp;
929 DB((dbg, LEVEL_4, "\treject coloring for"));
930 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
931 DB((dbg, LEVEL_4, " %+F", n->irn));
932 assert(n->tmp_col >= 0);
934 list_del_init(&n->list);
936 DB((dbg, LEVEL_4, "\n"));
939 static inline void materialize_coloring(struct list_head *nodes)
941 co_mst_irn_t *n, *temp;
942 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
943 assert(n->tmp_col >= 0);
946 list_del_init(&n->list);
950 static inline void set_temp_color(co_mst_irn_t *node, int col, struct list_head *changed)
953 assert(!node->fixed);
954 assert(node->tmp_col < 0);
955 assert(node->list.next == &node->list && node->list.prev == &node->list);
956 assert(bitset_is_set(node->adm_colors, col));
958 list_add_tail(&node->list, changed);
962 static inline int is_loose(co_mst_irn_t *node)
964 return !node->fixed && node->tmp_col < 0;
968 * Determines the costs for each color if it would be assigned to node @p node.
970 static void determine_color_costs(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs)
972 int *neigh_cols = ALLOCAN(int, env->n_regs);
977 for (i = 0; i < env->n_regs; ++i) {
980 costs[i].cost = bitset_is_set(node->adm_colors, i) ? node->constr_factor : REAL(0.0);
983 for (i = 0; i < node->n_neighs; ++i) {
984 co_mst_irn_t *n = get_co_mst_irn(env, node->int_neighs[i]);
985 int col = get_mst_irn_col(n);
990 costs[col].cost = REAL(0.0);
994 coeff = REAL(1.0) / n_loose;
995 for (i = 0; i < env->n_regs; ++i)
996 costs[i].cost *= REAL(1.0) - coeff * neigh_cols[i];
1000 /* need forward declaration due to recursive call */
1001 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed_ones, int depth, int *max_depth, int *trip);
1004 * Tries to change node to a color but @p explude_col.
1005 * @return 1 if succeeded, 0 otherwise.
1007 static int change_node_color_excluded(co_mst_env_t *env, co_mst_irn_t *node, int exclude_col, struct list_head *changed, int depth, int *max_depth, int *trip)
1009 int col = get_mst_irn_col(node);
1012 /* neighbours has already a different color -> good, temporary fix it */
1013 if (col != exclude_col) {
1015 set_temp_color(node, col, changed);
1019 /* The node has the color it should not have _and_ has not been visited yet. */
1020 if (is_loose(node)) {
1021 col_cost_t *costs = ALLOCAN(col_cost_t, env->n_regs);
1023 /* Get the costs for giving the node a specific color. */
1024 determine_color_costs(env, node, costs);
1026 /* Since the node must not have the not_col, set the costs for that color to "infinity" */
1027 costs[exclude_col].cost = REAL(0.0);
1029 /* sort the colors according costs, cheapest first. */
1030 qsort(costs, env->n_regs, sizeof(costs[0]), cmp_col_cost_gt);
1032 /* Try recoloring the node using the color list. */
1033 res = recolor_nodes(env, node, costs, changed, depth + 1, max_depth, trip);
1040 * Tries to bring node @p node to cheapest color and color all interfering neighbours with other colors.
1041 * ATTENTION: Expect @p costs already sorted by increasing costs.
1042 * @return 1 if coloring could be applied, 0 otherwise.
1044 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed, int depth, int *max_depth, int *trip)
1047 struct list_head local_changed;
1050 if (depth > *max_depth)
1053 DBG((dbg, LEVEL_4, "\tRecoloring %+F with color-costs", node->irn));
1054 DBG_COL_COST(env, LEVEL_4, costs);
1055 DB((dbg, LEVEL_4, "\n"));
1057 if (depth >= recolor_limit) {
1058 DBG((dbg, LEVEL_4, "\tHit recolor limit\n"));
1062 for (i = 0; i < env->n_regs; ++i) {
1063 int tgt_col = costs[i].col;
1067 /* If the costs for that color (and all successive) are infinite, bail out we won't make it anyway. */
1068 if (costs[i].cost == REAL(0.0)) {
1069 DBG((dbg, LEVEL_4, "\tAll further colors forbidden\n"));
1073 /* Set the new color of the node and mark the node as temporarily fixed. */
1074 assert(node->tmp_col < 0 && "Node must not have been temporary fixed.");
1075 INIT_LIST_HEAD(&local_changed);
1076 set_temp_color(node, tgt_col, &local_changed);
1077 DBG((dbg, LEVEL_4, "\tTemporary setting %+F to color %d\n", node->irn, tgt_col));
1079 /* try to color all interfering neighbours with current color forbidden */
1080 for (j = 0; j < node->n_neighs; ++j) {
1084 neigh = node->int_neighs[j];
1086 if (arch_irn_is_ignore(neigh))
1089 nn = get_co_mst_irn(env, neigh);
1090 DB((dbg, LEVEL_4, "\tHandling neighbour %+F, at position %d (fixed: %d, tmp_col: %d, col: %d)\n",
1091 neigh, j, nn->fixed, nn->tmp_col, nn->col));
1094 Try to change the color of the neighbor and record all nodes which
1095 get changed in the tmp list. Add this list to the "changed" list for
1096 that color. If we did not succeed to change the color of the neighbor,
1097 we bail out and try the next color.
1099 if (get_mst_irn_col(nn) == tgt_col) {
1100 /* try to color neighbour with tgt_col forbidden */
1101 neigh_ok = change_node_color_excluded(env, nn, tgt_col, &local_changed, depth + 1, max_depth, trip);
1109 We managed to assign the target color to all neighbors, so from the perspective
1110 of the current node, every thing was ok and we can return safely.
1113 /* append the local_changed ones to global ones */
1114 list_splice(&local_changed, changed);
1118 /* coloring of neighbours failed, so we try next color */
1119 reject_coloring(&local_changed);
1123 DBG((dbg, LEVEL_4, "\tAll colors failed\n"));
1128 * Tries to bring node @p node and all its neighbours to color @p tgt_col.
1129 * @return 1 if color @p col could be applied, 0 otherwise
1131 static int change_node_color(co_mst_env_t *env, co_mst_irn_t *node, int tgt_col, struct list_head *changed)
1133 int col = get_mst_irn_col(node);
1135 /* if node already has the target color -> good, temporary fix it */
1136 if (col == tgt_col) {
1137 DBG((dbg, LEVEL_4, "\t\tCNC: %+F has already color %d, fix temporary\n", node->irn, tgt_col));
1139 set_temp_color(node, tgt_col, changed);
1144 Node has not yet a fixed color and target color is admissible
1145 -> try to recolor node and its affinity neighbours
1147 if (is_loose(node) && bitset_is_set(node->adm_colors, tgt_col)) {
1148 col_cost_t *costs = env->single_cols[tgt_col];
1149 int res, max_depth, trip;
1154 DBG((dbg, LEVEL_4, "\t\tCNC: Attempt to recolor %+F ===>>\n", node->irn));
1155 res = recolor_nodes(env, node, costs, changed, 0, &max_depth, &trip);
1156 DBG((dbg, LEVEL_4, "\t\tCNC: <<=== Recoloring of %+F %s\n", node->irn, res ? "succeeded" : "failed"));
1157 stat_ev_int("heur4_recolor_depth_max", max_depth);
1158 stat_ev_int("heur4_recolor_trip", trip);
1164 #ifdef DEBUG_libfirm
1165 if (firm_dbg_get_mask(dbg) & LEVEL_4) {
1166 if (!is_loose(node))
1167 DB((dbg, LEVEL_4, "\t\tCNC: %+F has already fixed color %d\n", node->irn, col));
1169 DB((dbg, LEVEL_4, "\t\tCNC: color %d not admissible for %+F (", tgt_col, node->irn));
1170 dbg_admissible_colors(env, node);
1171 DB((dbg, LEVEL_4, ")\n"));
1180 * Tries to color an affinity chunk (or at least a part of it).
1181 * Inserts uncolored parts of the chunk as a new chunk into the priority queue.
1183 static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
1185 aff_chunk_t *best_chunk = NULL;
1186 int n_nodes = ARR_LEN(c->n);
1187 int best_color = -1;
1188 int n_int_chunks = 0;
1189 waitq *tmp_chunks = new_waitq();
1190 waitq *best_starts = NULL;
1191 col_cost_t *order = ALLOCANZ(col_cost_t, env->n_regs);
1198 struct list_head changed;
1200 DB((dbg, LEVEL_2, "fragmentizing chunk #%u", c->id));
1201 DBG_AFF_CHUNK(env, LEVEL_2, c);
1202 DB((dbg, LEVEL_2, "\n"));
1204 stat_ev_ctx_push_fmt("heur4_color_chunk", "%u", c->id);
1206 ++env->chunk_visited;
1208 /* compute color preference */
1209 for (pos = 0, len = ARR_LEN(c->interfere); pos < len; ++pos) {
1210 const ir_node *n = c->interfere[pos];
1211 co_mst_irn_t *node = get_co_mst_irn(env, n);
1212 aff_chunk_t *chunk = node->chunk;
1214 if (is_loose(node) && chunk && chunk->visited < env->chunk_visited) {
1215 assert(!chunk->deleted);
1216 chunk->visited = env->chunk_visited;
1219 aff_chunk_assure_weight(env, chunk);
1220 for (i = 0; i < env->n_regs; ++i)
1221 order[i].cost += chunk->color_affinity[i].cost;
1225 for (i = 0; i < env->n_regs; ++i) {
1226 real_t dislike = n_int_chunks > 0 ? REAL(1.0) - order[i].cost / n_int_chunks : REAL(0.0);
1228 order[i].cost = (REAL(1.0) - dislike_influence) * c->color_affinity[i].cost + dislike_influence * dislike;
1231 qsort(order, env->n_regs, sizeof(order[0]), cmp_col_cost_gt);
1233 DBG_COL_COST(env, LEVEL_2, order);
1234 DB((dbg, LEVEL_2, "\n"));
1236 /* check which color is the "best" for the given chunk.
1237 * if we found a color which was ok for all nodes, we take it
1238 * and do not look further. (see did_all flag usage below.)
1239 * If we have many colors which fit all nodes it is hard to decide
1240 * which one to take anyway.
1241 * TODO Sebastian: Perhaps we should at all nodes and figure out
1242 * a suitable color using costs as done above (determine_color_costs).
1244 for (i = 0; i < env->k; ++i) {
1245 int col = order[i].col;
1246 waitq *good_starts = new_waitq();
1247 aff_chunk_t *local_best;
1250 /* skip ignore colors */
1251 if (!bitset_is_set(env->allocatable_regs, col))
1254 DB((dbg, LEVEL_2, "\ttrying color %d\n", col));
1258 /* try to bring all nodes of given chunk to the current color. */
1259 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1260 const ir_node *irn = c->n[idx];
1261 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1264 assert(! node->fixed && "Node must not have a fixed color.");
1265 DB((dbg, LEVEL_4, "\t\tBringing %+F from color %d to color %d ...\n", irn, node->col, col));
1268 The order of the colored nodes is important, so we record the successfully
1269 colored ones in the order they appeared.
1271 INIT_LIST_HEAD(&changed);
1273 good = change_node_color(env, node, col, &changed);
1274 stat_ev_tim_pop("heur4_recolor");
1276 waitq_put(good_starts, node);
1277 materialize_coloring(&changed);
1282 reject_coloring(&changed);
1284 n_succeeded += good;
1285 DB((dbg, LEVEL_4, "\t\t... %+F attempt from %d to %d %s\n", irn, node->col, col, good ? "succeeded" : "failed"));
1288 /* unfix all nodes */
1289 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1290 co_mst_irn_t *node = get_co_mst_irn(env, c->n[idx]);
1294 /* try next color when failed */
1295 if (n_succeeded == 0)
1298 /* fragment the chunk according to the coloring */
1299 local_best = fragment_chunk(env, col, c, tmp_chunks);
1301 /* search the best of the good list
1302 and make it the new best if it is better than the current */
1304 aff_chunk_assure_weight(env, local_best);
1306 DB((dbg, LEVEL_3, "\t\tlocal best chunk (id %u) for color %d: ", local_best->id, col));
1307 DBG_AFF_CHUNK(env, LEVEL_3, local_best);
1309 if (! best_chunk || best_chunk->weight < local_best->weight) {
1310 best_chunk = local_best;
1313 del_waitq(best_starts);
1314 best_starts = good_starts;
1315 DB((dbg, LEVEL_3, "\n\t\t... setting global best chunk (id %u), color %d\n", best_chunk->id, best_color));
1317 DB((dbg, LEVEL_3, "\n\t\t... omitting, global best is better\n"));
1318 del_waitq(good_starts);
1322 del_waitq(good_starts);
1325 /* if all nodes were recolored, bail out */
1326 if (n_succeeded == n_nodes)
1330 stat_ev_int("heur4_colors_tried", i);
1332 /* free all intermediate created chunks except best one */
1333 while (! waitq_empty(tmp_chunks)) {
1334 aff_chunk_t *tmp = (aff_chunk_t*)waitq_get(tmp_chunks);
1335 if (tmp != best_chunk)
1336 delete_aff_chunk(tmp);
1338 del_waitq(tmp_chunks);
1340 /* return if coloring failed */
1343 del_waitq(best_starts);
1347 DB((dbg, LEVEL_2, "\tbest chunk #%u ", best_chunk->id));
1348 DBG_AFF_CHUNK(env, LEVEL_2, best_chunk);
1349 DB((dbg, LEVEL_2, "using color %d\n", best_color));
1351 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1352 const ir_node *irn = best_chunk->n[idx];
1353 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1356 /* bring the node to the color. */
1357 DB((dbg, LEVEL_4, "\tManifesting color %d for %+F, chunk #%u\n", best_color, node->irn, best_chunk->id));
1358 INIT_LIST_HEAD(&changed);
1360 res = change_node_color(env, node, best_color, &changed);
1361 stat_ev_tim_pop("heur4_recolor");
1363 materialize_coloring(&changed);
1366 assert(list_empty(&changed));
1369 /* remove the nodes in best chunk from original chunk */
1370 len = ARR_LEN(best_chunk->n);
1371 for (idx = 0; idx < len; ++idx) {
1372 const ir_node *irn = best_chunk->n[idx];
1373 int pos = nodes_bsearch(c->n, irn);
1378 len = ARR_LEN(c->n);
1379 for (idx = nidx = 0; idx < len; ++idx) {
1380 const ir_node *irn = c->n[idx];
1386 ARR_SHRINKLEN(c->n, nidx);
1389 /* we have to get the nodes back into the original chunk because they are scattered over temporary chunks */
1390 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1391 const ir_node *n = c->n[idx];
1392 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1396 /* fragment the remaining chunk */
1397 visited = bitset_malloc(get_irg_last_idx(env->co->irg));
1398 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx)
1399 bitset_set(visited, get_irn_idx(best_chunk->n[idx]));
1401 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1402 const ir_node *irn = c->n[idx];
1403 if (! bitset_is_set(visited, get_irn_idx(irn))) {
1404 aff_chunk_t *new_chunk = new_aff_chunk(env);
1405 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1407 expand_chunk_from(env, node, visited, new_chunk, c, decider_always_yes, 0);
1408 aff_chunk_assure_weight(env, new_chunk);
1409 pqueue_put(env->chunks, new_chunk, new_chunk->weight);
1413 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1414 const ir_node *n = best_chunk->n[idx];
1415 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1419 /* clear obsolete chunks and free some memory */
1420 delete_aff_chunk(best_chunk);
1421 bitset_free(visited);
1423 del_waitq(best_starts);
1425 stat_ev_ctx_pop("heur4_color_chunk");
1429 * Main driver for mst safe coalescing algorithm.
1431 static int co_solve_heuristic_mst(copy_opt_t *co)
1433 unsigned n_regs = co->cls->n_regs;
1434 bitset_t *allocatable_regs = bitset_alloca(n_regs);
1439 co_mst_env_t mst_env;
1446 ir_nodemap_init(&mst_env.map, co->irg);
1447 obstack_init(&mst_env.obst);
1449 be_put_allocatable_regs(co->cenv->irg, co->cls, allocatable_regs);
1450 k = bitset_popcount(allocatable_regs);
1452 mst_env.n_regs = n_regs;
1454 mst_env.chunks = new_pqueue();
1456 mst_env.allocatable_regs = allocatable_regs;
1457 mst_env.ifg = co->cenv->ifg;
1458 INIT_LIST_HEAD(&mst_env.chunklist);
1459 mst_env.chunk_visited = 0;
1460 mst_env.single_cols = OALLOCN(&mst_env.obst, col_cost_t*, n_regs);
1462 for (i = 0; i < n_regs; ++i) {
1463 col_cost_t *vec = OALLOCN(&mst_env.obst, col_cost_t, n_regs);
1465 mst_env.single_cols[i] = vec;
1466 for (j = 0; j < n_regs; ++j) {
1468 vec[j].cost = REAL(0.0);
1472 vec[0].cost = REAL(1.0);
1475 DBG((dbg, LEVEL_1, "==== Coloring %+F, class %s ====\n", co->irg, co->cls->name));
1477 /* build affinity chunks */
1479 build_affinity_chunks(&mst_env);
1480 stat_ev_tim_pop("heur4_initial_chunk");
1482 /* color chunks as long as there are some */
1483 while (! pqueue_empty(mst_env.chunks)) {
1484 aff_chunk_t *chunk = (aff_chunk_t*)pqueue_pop_front(mst_env.chunks);
1486 color_aff_chunk(&mst_env, chunk);
1487 DB((dbg, LEVEL_4, "<<<====== Coloring chunk (%u) done\n", chunk->id));
1488 delete_aff_chunk(chunk);
1491 /* apply coloring */
1492 for (pn = 0; pn < ARR_LEN(mst_env.map.data); ++pn) {
1493 co_mst_irn_t *mirn = mst_env.map.data[pn];
1494 const arch_register_t *reg;
1497 irn = get_idx_irn(co->irg, pn);
1498 if (arch_irn_is_ignore(irn))
1501 /* skip nodes where color hasn't changed */
1502 if (mirn->init_col == mirn->col)
1505 reg = arch_register_for_index(co->cls, mirn->col);
1506 arch_set_irn_register(irn, reg);
1507 DB((dbg, LEVEL_1, "%+F set color from %d to %d\n", irn, mirn->init_col, mirn->col));
1510 /* free allocated memory */
1511 del_pqueue(mst_env.chunks);
1512 obstack_free(&mst_env.obst, NULL);
1513 ir_nodemap_destroy(&mst_env.map);
1515 stat_ev_tim_pop("heur4_total");
1520 static const lc_opt_table_entry_t options[] = {
1521 LC_OPT_ENT_INT ("limit", "limit recoloring", &recolor_limit),
1522 LC_OPT_ENT_DBL ("di", "dislike influence", &dislike_influence),
1526 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur4)
1527 void be_init_copyheur4(void)
1529 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
1530 lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
1531 lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
1532 lc_opt_entry_t *co_grp = lc_opt_get_grp(chordal_grp, "co");
1533 lc_opt_entry_t *heur4_grp = lc_opt_get_grp(co_grp, "heur4");
1535 static co_algo_info copyheur = {
1536 co_solve_heuristic_mst, 0
1539 lc_opt_add_table(heur4_grp, options);
1540 be_register_copyopt("heur4", ©heur);
1542 FIRM_DBG_REGISTER(dbg, "firm.be.co.heur4");