2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Simple copy minimization heuristics.
23 * @author Christian Wuerdig
26 * This is the C implementation of the mst algorithm
27 * originally written in Java by Sebastian Hack.
28 * (also known as "heur3" :)
29 * Performs simple copy minimization.
33 #define DISABLE_STATEV
40 #include "raw_bitset.h"
41 #include "irnodemap.h"
55 #include "becopyopt_t.h"
61 #define DBG_AFF_CHUNK(env, level, chunk) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_aff_chunk((env), (chunk)); } while (0)
62 #define DBG_COL_COST(env, level, cost) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_col_cost((env), (cost)); } while (0)
64 static firm_dbg_module_t *dbg = NULL;
68 #define DBG_AFF_CHUNK(env, level, chunk)
69 #define DBG_COL_COST(env, level, cost)
74 #define REAL(C) (C ## f)
76 static unsigned last_chunk_id = 0;
77 static int recolor_limit = 7;
78 static double dislike_influence = REAL(0.1);
80 typedef struct col_cost_t {
88 typedef struct aff_chunk_t {
89 const ir_node **n; /**< An ARR_F containing all nodes of the chunk. */
90 const ir_node **interfere; /**< An ARR_F containing all inference. */
91 int weight; /**< Weight of this chunk */
92 unsigned weight_consistent : 1; /**< Set if the weight is consistent. */
93 unsigned deleted : 1; /**< For debugging: Set if the was deleted. */
94 unsigned id; /**< An id of this chunk. */
97 col_cost_t color_affinity[1];
103 typedef struct aff_edge_t {
104 const ir_node *src; /**< Source node. */
105 const ir_node *tgt; /**< Target node. */
106 int weight; /**< The weight of this edge. */
109 /* main coalescing environment */
110 typedef struct co_mst_env_t {
111 int n_regs; /**< number of regs in class */
112 bitset_t *allocatable_regs; /**< set containing all global ignore registers */
113 ir_nodemap map; /**< phase object holding data for nodes */
115 pqueue_t *chunks; /**< priority queue for chunks */
116 list_head chunklist; /**< list holding all chunks */
117 be_ifg_t *ifg; /**< the interference graph */
118 copy_opt_t *co; /**< the copy opt object */
119 unsigned chunk_visited;
120 col_cost_t **single_cols;
123 /* stores coalescing related information for a node */
124 typedef struct co_mst_irn_t {
125 const ir_node *irn; /**< the irn this information belongs to */
126 aff_chunk_t *chunk; /**< the chunk this irn belongs to */
127 bitset_t *adm_colors; /**< set of admissible colors for this irn */
128 ir_node **int_neighs; /**< array of all interfering neighbours (cached for speed reasons) */
129 int n_neighs; /**< length of the interfering neighbours array. */
130 int int_aff_neigh; /**< number of interfering affinity neighbours */
131 int col; /**< color currently assigned */
132 int init_col; /**< the initial color */
133 int tmp_col; /**< a temporary assigned color */
134 unsigned fixed : 1; /**< the color is fixed */
135 struct list_head list; /**< Queue for coloring undo. */
136 real_t constr_factor;
140 * In case there is no phase information for irn, initialize it.
142 static co_mst_irn_t *co_mst_irn_init(co_mst_env_t *env, const ir_node *irn)
144 co_mst_irn_t *res = OALLOC(&env->obst, co_mst_irn_t);
146 const arch_register_req_t *req;
147 neighbours_iter_t nodes_it;
155 res->int_neighs = NULL;
156 res->int_aff_neigh = 0;
157 res->col = arch_get_irn_register(irn)->index;
158 res->init_col = res->col;
159 INIT_LIST_HEAD(&res->list);
161 DB((dbg, LEVEL_4, "Creating phase info for %+F\n", irn));
163 /* set admissible registers */
164 res->adm_colors = bitset_obstack_alloc(&env->obst, env->n_regs);
166 /* Exclude colors not assignable to the irn */
167 req = arch_get_irn_register_req(irn);
168 if (arch_register_req_is(req, limited)) {
169 rbitset_copy_to_bitset(req->limited, res->adm_colors);
170 /* exclude global ignore registers as well */
171 bitset_and(res->adm_colors, env->allocatable_regs);
173 bitset_copy(res->adm_colors, env->allocatable_regs);
176 /* compute the constraint factor */
177 res->constr_factor = (real_t) (1 + env->n_regs - bitset_popcount(res->adm_colors)) / env->n_regs;
179 /* set the number of interfering affinity neighbours to -1, they are calculated later */
180 res->int_aff_neigh = -1;
182 /* build list of interfering neighbours */
184 be_ifg_foreach_neighbour(env->ifg, &nodes_it, irn, neigh) {
185 if (!arch_irn_is_ignore(neigh)) {
186 obstack_ptr_grow(&env->obst, neigh);
190 res->int_neighs = (ir_node**)obstack_finish(&env->obst);
195 static co_mst_irn_t *get_co_mst_irn(co_mst_env_t *env, const ir_node *node)
197 co_mst_irn_t *res = ir_nodemap_get(co_mst_irn_t, &env->map, node);
199 res = co_mst_irn_init(env, node);
200 ir_nodemap_insert(&env->map, node, res);
205 typedef int decide_func_t(const co_mst_irn_t *node, int col);
210 * Write a chunk to stderr for debugging.
212 static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c)
216 if (c->weight_consistent)
217 ir_fprintf(stderr, " $%d ", c->weight);
218 ir_fprintf(stderr, "{");
219 for (i = 0, l = ARR_LEN(c->n); i < l; ++i) {
220 const ir_node *n = c->n[i];
221 ir_fprintf(stderr, " %+F,", n);
223 ir_fprintf(stderr, "}");
227 * Dump all admissible colors to stderr.
229 static void dbg_admissible_colors(const co_mst_env_t *env, const co_mst_irn_t *node)
233 if (bitset_popcount(node->adm_colors) < 1)
234 fprintf(stderr, "no admissible colors?!?");
236 bitset_foreach(node->adm_colors, idx) {
237 ir_fprintf(stderr, " %zu", idx);
243 * Dump color-cost pairs to stderr.
245 static void dbg_col_cost(const co_mst_env_t *env, const col_cost_t *cost)
248 for (i = 0; i < env->n_regs; ++i)
249 fprintf(stderr, " (%d, %.4f)", cost[i].col, cost[i].cost);
252 #endif /* DEBUG_libfirm */
254 static inline int get_mst_irn_col(const co_mst_irn_t *node)
256 return node->tmp_col >= 0 ? node->tmp_col : node->col;
260 * @return 1 if node @p node has color @p col, 0 otherwise.
262 static int decider_has_color(const co_mst_irn_t *node, int col)
264 return get_mst_irn_col(node) == col;
268 * @return 1 if node @p node has not color @p col, 0 otherwise.
270 static int decider_hasnot_color(const co_mst_irn_t *node, int col)
272 return get_mst_irn_col(node) != col;
276 * Always returns true.
278 static int decider_always_yes(const co_mst_irn_t *node, int col)
285 /** compares two affinity edges by its weight */
286 static int cmp_aff_edge(const void *a, const void *b)
288 const aff_edge_t *e1 = (const aff_edge_t*)a;
289 const aff_edge_t *e2 = (const aff_edge_t*)b;
291 if (e2->weight == e1->weight) {
292 if (e2->src->node_idx == e1->src->node_idx)
293 return QSORT_CMP(e2->tgt->node_idx, e1->tgt->node_idx);
295 return QSORT_CMP(e2->src->node_idx, e1->src->node_idx);
297 /* sort in descending order */
298 return QSORT_CMP(e2->weight, e1->weight);
301 /** compares to color-cost pairs */
302 static __attribute__((unused)) int cmp_col_cost_lt(const void *a, const void *b)
304 const col_cost_t *c1 = (const col_cost_t*)a;
305 const col_cost_t *c2 = (const col_cost_t*)b;
306 real_t diff = c1->cost - c2->cost;
313 return QSORT_CMP(c1->col, c2->col);
316 static int cmp_col_cost_gt(const void *a, const void *b)
318 const col_cost_t *c1 = (const col_cost_t*)a;
319 const col_cost_t *c2 = (const col_cost_t*)b;
320 real_t diff = c2->cost - c1->cost;
327 return QSORT_CMP(c1->col, c2->col);
331 * Creates a new affinity chunk
333 static inline aff_chunk_t *new_aff_chunk(co_mst_env_t *env)
335 aff_chunk_t *c = XMALLOCF(aff_chunk_t, color_affinity, env->n_regs);
336 c->n = NEW_ARR_F(const ir_node *, 0);
337 c->interfere = NEW_ARR_F(const ir_node *, 0);
339 c->weight_consistent = 0;
341 c->id = ++last_chunk_id;
343 list_add(&c->list, &env->chunklist);
348 * Frees all memory allocated by an affinity chunk.
350 static inline void delete_aff_chunk(aff_chunk_t *c)
353 DEL_ARR_F(c->interfere);
360 * binary search of sorted nodes.
362 * @return the position where n is found in the array arr or ~pos
363 * if the nodes is not here.
365 static inline int nodes_bsearch(const ir_node **arr, const ir_node *n)
367 int hi = ARR_LEN(arr);
371 int md = lo + ((hi - lo) >> 1);
384 /** Check if a node n can be found inside arr. */
385 static int node_contains(const ir_node **arr, const ir_node *n)
387 int i = nodes_bsearch(arr, n);
392 * Insert a node into the sorted nodes list.
394 * @return 1 if the node was inserted, 0 else
396 static int nodes_insert(const ir_node ***arr, const ir_node *irn)
398 int idx = nodes_bsearch(*arr, irn);
401 int i, n = ARR_LEN(*arr);
404 ARR_APP1(const ir_node *, *arr, irn);
409 for (i = n - 1; i >= idx; --i)
418 * Adds a node to an affinity chunk
420 static inline void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node)
424 if (! nodes_insert(&c->n, node->irn))
427 c->weight_consistent = 0;
430 for (i = node->n_neighs - 1; i >= 0; --i) {
431 ir_node *neigh = node->int_neighs[i];
432 nodes_insert(&c->interfere, neigh);
437 * Check if affinity chunk @p chunk interferes with node @p irn.
439 static inline int aff_chunk_interferes(const aff_chunk_t *chunk, const ir_node *irn)
441 return node_contains(chunk->interfere, irn);
445 * Check if there are interference edges from c1 to c2.
447 * @param c2 Another chunk
448 * @return 1 if there are interferences between nodes of c1 and c2, 0 otherwise.
450 static inline int aff_chunks_interfere(const aff_chunk_t *c1, const aff_chunk_t *c2)
457 /* check if there is a node in c2 having an interfering neighbor in c1 */
458 for (i = ARR_LEN(c2->n) - 1; i >= 0; --i) {
459 const ir_node *irn = c2->n[i];
461 if (node_contains(c1->interfere, irn))
468 * Returns the affinity chunk of @p irn or creates a new
469 * one with @p irn as element if there is none assigned.
471 static inline aff_chunk_t *get_aff_chunk(co_mst_env_t *env, const ir_node *irn)
473 co_mst_irn_t *node = get_co_mst_irn(env, irn);
478 * Let chunk(src) absorb the nodes of chunk(tgt) (only possible when there
479 * are no interference edges from chunk(src) to chunk(tgt)).
480 * @return 1 if successful, 0 if not possible
482 static int aff_chunk_absorb(co_mst_env_t *env, const ir_node *src, const ir_node *tgt)
484 aff_chunk_t *c1 = get_aff_chunk(env, src);
485 aff_chunk_t *c2 = get_aff_chunk(env, tgt);
488 DB((dbg, LEVEL_4, "Attempt to let c1 (id %u): ", c1 ? c1->id : 0));
490 DBG_AFF_CHUNK(env, LEVEL_4, c1);
492 DB((dbg, LEVEL_4, "{%+F}", src));
494 DB((dbg, LEVEL_4, "\n\tabsorb c2 (id %u): ", c2 ? c2->id : 0));
496 DBG_AFF_CHUNK(env, LEVEL_4, c2);
498 DB((dbg, LEVEL_4, "{%+F}", tgt));
500 DB((dbg, LEVEL_4, "\n"));
505 /* no chunk exists */
506 co_mst_irn_t *mirn = get_co_mst_irn(env, src);
509 for (i = mirn->n_neighs - 1; i >= 0; --i) {
510 if (mirn->int_neighs[i] == tgt)
514 /* create one containing both nodes */
515 c1 = new_aff_chunk(env);
516 aff_chunk_add_node(c1, get_co_mst_irn(env, src));
517 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
521 /* c2 already exists */
522 if (! aff_chunk_interferes(c2, src)) {
523 aff_chunk_add_node(c2, get_co_mst_irn(env, src));
527 } else if (c2 == NULL) {
528 /* c1 already exists */
529 if (! aff_chunk_interferes(c1, tgt)) {
530 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
533 } else if (c1 != c2 && ! aff_chunks_interfere(c1, c2)) {
536 for (idx = 0, len = ARR_LEN(c2->n); idx < len; ++idx)
537 aff_chunk_add_node(c1, get_co_mst_irn(env, c2->n[idx]));
539 for (idx = 0, len = ARR_LEN(c2->interfere); idx < len; ++idx) {
540 const ir_node *irn = c2->interfere[idx];
541 nodes_insert(&c1->interfere, irn);
544 c1->weight_consistent = 0;
546 delete_aff_chunk(c2);
549 DB((dbg, LEVEL_4, " ... c1 interferes with c2, skipped\n"));
553 DB((dbg, LEVEL_4, " ... absorbed\n"));
558 * Assures that the weight of the given chunk is consistent.
560 static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c)
562 if (! c->weight_consistent) {
566 for (i = 0; i < env->n_regs; ++i) {
567 c->color_affinity[i].col = i;
568 c->color_affinity[i].cost = REAL(0.0);
571 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
572 const ir_node *n = c->n[idx];
573 const affinity_node_t *an = get_affinity_info(env->co, n);
574 co_mst_irn_t *node = get_co_mst_irn(env, n);
577 if (node->constr_factor > REAL(0.0)) {
578 bitset_foreach (node->adm_colors, col)
579 c->color_affinity[col].cost += node->constr_factor;
583 co_gs_foreach_neighb(an, neigh) {
584 const ir_node *m = neigh->irn;
586 if (arch_irn_is_ignore(m))
589 w += node_contains(c->n, m) ? neigh->costs : 0;
594 for (i = 0; i < env->n_regs; ++i)
595 c->color_affinity[i].cost *= (REAL(1.0) / ARR_LEN(c->n));
598 // c->weight = bitset_popcount(c->nodes);
599 c->weight_consistent = 1;
604 * Count the number of interfering affinity neighbours
606 static int count_interfering_aff_neighs(co_mst_env_t *env, const affinity_node_t *an)
608 const ir_node *irn = an->irn;
609 const co_mst_irn_t *node = get_co_mst_irn(env, irn);
612 co_gs_foreach_neighb(an, neigh) {
613 const ir_node *n = neigh->irn;
616 if (arch_irn_is_ignore(n))
619 /* check if the affinity neighbour interfere */
620 for (i = 0; i < node->n_neighs; ++i) {
621 if (node->int_neighs[i] == n) {
632 * Build chunks of nodes connected by affinity edges.
633 * We start at the heaviest affinity edge.
634 * The chunks of the two edge-defining nodes will be
635 * merged if there are no interference edges from one
636 * chunk to the other.
638 static void build_affinity_chunks(co_mst_env_t *env)
640 nodes_iter_t nodes_it;
641 aff_edge_t *edges = NEW_ARR_F(aff_edge_t, 0);
646 /* at first we create the affinity edge objects */
647 be_ifg_foreach_node(env->ifg, &nodes_it, n) {
648 int n_idx = get_irn_idx(n);
652 if (arch_irn_is_ignore(n))
655 n1 = get_co_mst_irn(env, n);
656 an = get_affinity_info(env->co, n);
659 if (n1->int_aff_neigh < 0)
660 n1->int_aff_neigh = count_interfering_aff_neighs(env, an);
662 /* build the affinity edges */
663 co_gs_foreach_neighb(an, neigh) {
664 const ir_node *m = neigh->irn;
665 int m_idx = get_irn_idx(m);
667 /* record the edge in only one direction */
672 /* skip ignore nodes */
673 if (arch_irn_is_ignore(m))
679 n2 = get_co_mst_irn(env, m);
680 if (n2->int_aff_neigh < 0) {
681 affinity_node_t *am = get_affinity_info(env->co, m);
682 n2->int_aff_neigh = count_interfering_aff_neighs(env, am);
685 * these weights are pure hackery ;-).
686 * It's not chriswue's fault but mine.
688 edge.weight = neigh->costs;
689 ARR_APP1(aff_edge_t, edges, edge);
695 /* now: sort edges and build the affinity chunks */
696 len = ARR_LEN(edges);
697 qsort(edges, len, sizeof(edges[0]), cmp_aff_edge);
698 for (i = 0; i < len; ++i) {
699 DBG((dbg, LEVEL_1, "edge (%u,%u) %f\n", edges[i].src->node_idx, edges[i].tgt->node_idx, edges[i].weight));
701 (void)aff_chunk_absorb(env, edges[i].src, edges[i].tgt);
704 /* now insert all chunks into a priority queue */
705 list_for_each_entry(aff_chunk_t, curr_chunk, &env->chunklist, list) {
706 aff_chunk_assure_weight(env, curr_chunk);
708 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
709 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
710 DBG((dbg, LEVEL_1, "\n"));
712 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
715 for (pn = 0; pn < ARR_LEN(env->map.data); ++pn) {
716 co_mst_irn_t *mirn = (co_mst_irn_t*)env->map.data[pn];
719 if (mirn->chunk != NULL)
722 /* no chunk is allocated so far, do it now */
723 aff_chunk_t *curr_chunk = new_aff_chunk(env);
724 aff_chunk_add_node(curr_chunk, mirn);
726 aff_chunk_assure_weight(env, curr_chunk);
728 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
729 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
730 DBG((dbg, LEVEL_1, "\n"));
732 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
738 static __attribute__((unused)) void chunk_order_nodes(co_mst_env_t *env, aff_chunk_t *chunk)
740 pqueue_t *grow = new_pqueue();
741 ir_node const *max_node = NULL;
745 for (i = ARR_LEN(chunk->n); i != 0;) {
746 const ir_node *irn = chunk->n[--i];
747 affinity_node_t *an = get_affinity_info(env->co, irn);
750 if (arch_irn_is_ignore(irn))
754 co_gs_foreach_neighb(an, neigh)
757 if (w > max_weight) {
765 bitset_t *visited = bitset_malloc(get_irg_last_idx(env->co->irg));
767 for (i = ARR_LEN(chunk->n); i != 0;)
768 bitset_set(visited, get_irn_idx(chunk->n[--i]));
770 pqueue_put(grow, (void *) max_node, max_weight);
771 bitset_clear(visited, get_irn_idx(max_node));
773 while (!pqueue_empty(grow)) {
774 ir_node *irn = (ir_node*)pqueue_pop_front(grow);
775 affinity_node_t *an = get_affinity_info(env->co, irn);
777 if (arch_irn_is_ignore(irn))
780 assert(i <= ARR_LEN(chunk->n));
785 /* build the affinity edges */
786 co_gs_foreach_neighb(an, neigh) {
787 co_mst_irn_t *node = get_co_mst_irn(env, neigh->irn);
789 if (bitset_is_set(visited, get_irn_idx(node->irn))) {
790 pqueue_put(grow, (void *) neigh->irn, neigh->costs);
791 bitset_clear(visited, get_irn_idx(node->irn));
797 bitset_free(visited);
802 * Greedy collect affinity neighbours into thew new chunk @p chunk starting at node @p node.
804 static void expand_chunk_from(co_mst_env_t *env, co_mst_irn_t *node, bitset_t *visited,
805 aff_chunk_t *chunk, aff_chunk_t *orig_chunk, decide_func_t *decider, int col)
807 waitq *nodes = new_waitq();
809 DBG((dbg, LEVEL_1, "\n\tExpanding new chunk (#%u) from %+F, color %d:", chunk->id, node->irn, col));
811 /* init queue and chunk */
812 waitq_put(nodes, node);
813 bitset_set(visited, get_irn_idx(node->irn));
814 aff_chunk_add_node(chunk, node);
815 DB((dbg, LEVEL_1, " %+F", node->irn));
817 /* as long as there are nodes in the queue */
818 while (! waitq_empty(nodes)) {
819 co_mst_irn_t *n = (co_mst_irn_t*)waitq_get(nodes);
820 affinity_node_t *an = get_affinity_info(env->co, n->irn);
822 /* check all affinity neighbors */
824 co_gs_foreach_neighb(an, neigh) {
825 const ir_node *m = neigh->irn;
826 int m_idx = get_irn_idx(m);
829 if (arch_irn_is_ignore(m))
832 n2 = get_co_mst_irn(env, m);
834 if (! bitset_is_set(visited, m_idx) &&
837 ! aff_chunk_interferes(chunk, m) &&
838 node_contains(orig_chunk->n, m))
841 following conditions are met:
842 - neighbour is not visited
843 - neighbour likes the color
844 - neighbour has not yet a fixed color
845 - the new chunk doesn't interfere with the neighbour
846 - neighbour belongs or belonged once to the original chunk
848 bitset_set(visited, m_idx);
849 aff_chunk_add_node(chunk, n2);
850 DB((dbg, LEVEL_1, " %+F", n2->irn));
851 /* enqueue for further search */
852 waitq_put(nodes, n2);
858 DB((dbg, LEVEL_1, "\n"));
864 * Fragment the given chunk into chunks having given color and not having given color.
866 static aff_chunk_t *fragment_chunk(co_mst_env_t *env, int col, aff_chunk_t *c, waitq *tmp)
868 bitset_t *visited = bitset_malloc(get_irg_last_idx(env->co->irg));
870 aff_chunk_t *best = NULL;
872 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
875 aff_chunk_t *tmp_chunk;
876 decide_func_t *decider;
880 if (bitset_is_set(visited, get_irn_idx(irn)))
883 node = get_co_mst_irn(env, irn);
885 if (get_mst_irn_col(node) == col) {
886 decider = decider_has_color;
888 DBG((dbg, LEVEL_4, "\tcolor %d wanted\n", col));
891 decider = decider_hasnot_color;
893 DBG((dbg, LEVEL_4, "\tcolor %d forbidden\n", col));
896 /* create a new chunk starting at current node */
897 tmp_chunk = new_aff_chunk(env);
898 waitq_put(tmp, tmp_chunk);
899 expand_chunk_from(env, node, visited, tmp_chunk, c, decider, col);
900 assert(ARR_LEN(tmp_chunk->n) > 0 && "No nodes added to chunk");
902 /* remember the local best */
903 aff_chunk_assure_weight(env, tmp_chunk);
904 if (check_for_best && (! best || best->weight < tmp_chunk->weight))
908 assert(best && "No chunk found?");
909 bitset_free(visited);
914 * Resets the temporary fixed color of all nodes within wait queue @p nodes.
915 * ATTENTION: the queue is empty after calling this function!
917 static inline void reject_coloring(struct list_head *nodes)
919 DB((dbg, LEVEL_4, "\treject coloring for"));
920 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
921 DB((dbg, LEVEL_4, " %+F", n->irn));
922 assert(n->tmp_col >= 0);
924 list_del_init(&n->list);
926 DB((dbg, LEVEL_4, "\n"));
929 static inline void materialize_coloring(struct list_head *nodes)
931 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
932 assert(n->tmp_col >= 0);
935 list_del_init(&n->list);
939 static inline void set_temp_color(co_mst_irn_t *node, int col, struct list_head *changed)
942 assert(!node->fixed);
943 assert(node->tmp_col < 0);
944 assert(node->list.next == &node->list && node->list.prev == &node->list);
945 assert(bitset_is_set(node->adm_colors, col));
947 list_add_tail(&node->list, changed);
951 static inline int is_loose(co_mst_irn_t *node)
953 return !node->fixed && node->tmp_col < 0;
957 * Determines the costs for each color if it would be assigned to node @p node.
959 static void determine_color_costs(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs)
961 int *neigh_cols = ALLOCAN(int, env->n_regs);
966 for (i = 0; i < env->n_regs; ++i) {
969 costs[i].cost = bitset_is_set(node->adm_colors, i) ? node->constr_factor : REAL(0.0);
972 for (i = 0; i < node->n_neighs; ++i) {
973 co_mst_irn_t *n = get_co_mst_irn(env, node->int_neighs[i]);
974 int col = get_mst_irn_col(n);
979 costs[col].cost = REAL(0.0);
983 coeff = REAL(1.0) / n_loose;
984 for (i = 0; i < env->n_regs; ++i)
985 costs[i].cost *= REAL(1.0) - coeff * neigh_cols[i];
989 /* need forward declaration due to recursive call */
990 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed_ones, int depth, int *max_depth, int *trip);
993 * Tries to change node to a color but @p explude_col.
994 * @return 1 if succeeded, 0 otherwise.
996 static int change_node_color_excluded(co_mst_env_t *env, co_mst_irn_t *node, int exclude_col, struct list_head *changed, int depth, int *max_depth, int *trip)
998 int col = get_mst_irn_col(node);
1001 /* neighbours has already a different color -> good, temporary fix it */
1002 if (col != exclude_col) {
1004 set_temp_color(node, col, changed);
1008 /* The node has the color it should not have _and_ has not been visited yet. */
1009 if (is_loose(node)) {
1010 col_cost_t *costs = ALLOCAN(col_cost_t, env->n_regs);
1012 /* Get the costs for giving the node a specific color. */
1013 determine_color_costs(env, node, costs);
1015 /* Since the node must not have the not_col, set the costs for that color to "infinity" */
1016 costs[exclude_col].cost = REAL(0.0);
1018 /* sort the colors according costs, cheapest first. */
1019 qsort(costs, env->n_regs, sizeof(costs[0]), cmp_col_cost_gt);
1021 /* Try recoloring the node using the color list. */
1022 res = recolor_nodes(env, node, costs, changed, depth + 1, max_depth, trip);
1029 * Tries to bring node @p node to cheapest color and color all interfering neighbours with other colors.
1030 * ATTENTION: Expect @p costs already sorted by increasing costs.
1031 * @return 1 if coloring could be applied, 0 otherwise.
1033 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed, int depth, int *max_depth, int *trip)
1036 struct list_head local_changed;
1039 if (depth > *max_depth)
1042 DBG((dbg, LEVEL_4, "\tRecoloring %+F with color-costs", node->irn));
1043 DBG_COL_COST(env, LEVEL_4, costs);
1044 DB((dbg, LEVEL_4, "\n"));
1046 if (depth >= recolor_limit) {
1047 DBG((dbg, LEVEL_4, "\tHit recolor limit\n"));
1051 for (i = 0; i < env->n_regs; ++i) {
1052 int tgt_col = costs[i].col;
1056 /* If the costs for that color (and all successive) are infinite, bail out we won't make it anyway. */
1057 if (costs[i].cost == REAL(0.0)) {
1058 DBG((dbg, LEVEL_4, "\tAll further colors forbidden\n"));
1062 /* Set the new color of the node and mark the node as temporarily fixed. */
1063 assert(node->tmp_col < 0 && "Node must not have been temporary fixed.");
1064 INIT_LIST_HEAD(&local_changed);
1065 set_temp_color(node, tgt_col, &local_changed);
1066 DBG((dbg, LEVEL_4, "\tTemporary setting %+F to color %d\n", node->irn, tgt_col));
1068 /* try to color all interfering neighbours with current color forbidden */
1069 for (j = 0; j < node->n_neighs; ++j) {
1073 neigh = node->int_neighs[j];
1075 if (arch_irn_is_ignore(neigh))
1078 nn = get_co_mst_irn(env, neigh);
1079 DB((dbg, LEVEL_4, "\tHandling neighbour %+F, at position %d (fixed: %d, tmp_col: %d, col: %d)\n",
1080 neigh, j, nn->fixed, nn->tmp_col, nn->col));
1083 Try to change the color of the neighbor and record all nodes which
1084 get changed in the tmp list. Add this list to the "changed" list for
1085 that color. If we did not succeed to change the color of the neighbor,
1086 we bail out and try the next color.
1088 if (get_mst_irn_col(nn) == tgt_col) {
1089 /* try to color neighbour with tgt_col forbidden */
1090 neigh_ok = change_node_color_excluded(env, nn, tgt_col, &local_changed, depth + 1, max_depth, trip);
1098 We managed to assign the target color to all neighbors, so from the perspective
1099 of the current node, every thing was ok and we can return safely.
1102 /* append the local_changed ones to global ones */
1103 list_splice(&local_changed, changed);
1107 /* coloring of neighbours failed, so we try next color */
1108 reject_coloring(&local_changed);
1112 DBG((dbg, LEVEL_4, "\tAll colors failed\n"));
1117 * Tries to bring node @p node and all its neighbours to color @p tgt_col.
1118 * @return 1 if color @p col could be applied, 0 otherwise
1120 static int change_node_color(co_mst_env_t *env, co_mst_irn_t *node, int tgt_col, struct list_head *changed)
1122 int col = get_mst_irn_col(node);
1124 /* if node already has the target color -> good, temporary fix it */
1125 if (col == tgt_col) {
1126 DBG((dbg, LEVEL_4, "\t\tCNC: %+F has already color %d, fix temporary\n", node->irn, tgt_col));
1128 set_temp_color(node, tgt_col, changed);
1133 Node has not yet a fixed color and target color is admissible
1134 -> try to recolor node and its affinity neighbours
1136 if (is_loose(node) && bitset_is_set(node->adm_colors, tgt_col)) {
1137 col_cost_t *costs = env->single_cols[tgt_col];
1138 int res, max_depth, trip;
1143 DBG((dbg, LEVEL_4, "\t\tCNC: Attempt to recolor %+F ===>>\n", node->irn));
1144 res = recolor_nodes(env, node, costs, changed, 0, &max_depth, &trip);
1145 DBG((dbg, LEVEL_4, "\t\tCNC: <<=== Recoloring of %+F %s\n", node->irn, res ? "succeeded" : "failed"));
1146 stat_ev_int("heur4_recolor_depth_max", max_depth);
1147 stat_ev_int("heur4_recolor_trip", trip);
1153 #ifdef DEBUG_libfirm
1154 if (firm_dbg_get_mask(dbg) & LEVEL_4) {
1155 if (!is_loose(node))
1156 DB((dbg, LEVEL_4, "\t\tCNC: %+F has already fixed color %d\n", node->irn, col));
1158 DB((dbg, LEVEL_4, "\t\tCNC: color %d not admissible for %+F (", tgt_col, node->irn));
1159 dbg_admissible_colors(env, node);
1160 DB((dbg, LEVEL_4, ")\n"));
1169 * Tries to color an affinity chunk (or at least a part of it).
1170 * Inserts uncolored parts of the chunk as a new chunk into the priority queue.
1172 static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
1174 aff_chunk_t *best_chunk = NULL;
1175 int n_nodes = ARR_LEN(c->n);
1176 int best_color = -1;
1177 int n_int_chunks = 0;
1178 waitq *tmp_chunks = new_waitq();
1179 waitq *best_starts = NULL;
1180 col_cost_t *order = ALLOCANZ(col_cost_t, env->n_regs);
1187 struct list_head changed;
1189 DB((dbg, LEVEL_2, "fragmentizing chunk #%u", c->id));
1190 DBG_AFF_CHUNK(env, LEVEL_2, c);
1191 DB((dbg, LEVEL_2, "\n"));
1193 stat_ev_ctx_push_fmt("heur4_color_chunk", "%u", c->id);
1195 ++env->chunk_visited;
1197 /* compute color preference */
1198 for (pos = 0, len = ARR_LEN(c->interfere); pos < len; ++pos) {
1199 const ir_node *n = c->interfere[pos];
1200 co_mst_irn_t *node = get_co_mst_irn(env, n);
1201 aff_chunk_t *chunk = node->chunk;
1203 if (is_loose(node) && chunk && chunk->visited < env->chunk_visited) {
1204 assert(!chunk->deleted);
1205 chunk->visited = env->chunk_visited;
1208 aff_chunk_assure_weight(env, chunk);
1209 for (i = 0; i < env->n_regs; ++i)
1210 order[i].cost += chunk->color_affinity[i].cost;
1214 for (i = 0; i < env->n_regs; ++i) {
1215 real_t dislike = n_int_chunks > 0 ? REAL(1.0) - order[i].cost / n_int_chunks : REAL(0.0);
1217 order[i].cost = (REAL(1.0) - dislike_influence) * c->color_affinity[i].cost + dislike_influence * dislike;
1220 qsort(order, env->n_regs, sizeof(order[0]), cmp_col_cost_gt);
1222 DBG_COL_COST(env, LEVEL_2, order);
1223 DB((dbg, LEVEL_2, "\n"));
1225 /* check which color is the "best" for the given chunk.
1226 * if we found a color which was ok for all nodes, we take it
1227 * and do not look further. (see did_all flag usage below.)
1228 * If we have many colors which fit all nodes it is hard to decide
1229 * which one to take anyway.
1230 * TODO Sebastian: Perhaps we should at all nodes and figure out
1231 * a suitable color using costs as done above (determine_color_costs).
1233 for (i = 0; i < env->n_regs; ++i) {
1234 int col = order[i].col;
1236 aff_chunk_t *local_best;
1239 /* skip ignore colors */
1240 if (!bitset_is_set(env->allocatable_regs, col))
1243 DB((dbg, LEVEL_2, "\ttrying color %d\n", col));
1246 good_starts = new_waitq();
1248 /* try to bring all nodes of given chunk to the current color. */
1249 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1250 const ir_node *irn = c->n[idx];
1251 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1254 assert(! node->fixed && "Node must not have a fixed color.");
1255 DB((dbg, LEVEL_4, "\t\tBringing %+F from color %d to color %d ...\n", irn, node->col, col));
1258 The order of the colored nodes is important, so we record the successfully
1259 colored ones in the order they appeared.
1261 INIT_LIST_HEAD(&changed);
1263 good = change_node_color(env, node, col, &changed);
1264 stat_ev_tim_pop("heur4_recolor");
1266 waitq_put(good_starts, node);
1267 materialize_coloring(&changed);
1272 reject_coloring(&changed);
1274 n_succeeded += good;
1275 DB((dbg, LEVEL_4, "\t\t... %+F attempt from %d to %d %s\n", irn, node->col, col, good ? "succeeded" : "failed"));
1278 /* unfix all nodes */
1279 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1280 co_mst_irn_t *node = get_co_mst_irn(env, c->n[idx]);
1284 /* try next color when failed */
1285 if (n_succeeded == 0) {
1286 del_waitq(good_starts);
1290 /* fragment the chunk according to the coloring */
1291 local_best = fragment_chunk(env, col, c, tmp_chunks);
1293 /* search the best of the good list
1294 and make it the new best if it is better than the current */
1296 aff_chunk_assure_weight(env, local_best);
1298 DB((dbg, LEVEL_3, "\t\tlocal best chunk (id %u) for color %d: ", local_best->id, col));
1299 DBG_AFF_CHUNK(env, LEVEL_3, local_best);
1301 if (! best_chunk || best_chunk->weight < local_best->weight) {
1302 best_chunk = local_best;
1305 del_waitq(best_starts);
1306 best_starts = good_starts;
1307 DB((dbg, LEVEL_3, "\n\t\t... setting global best chunk (id %u), color %d\n", best_chunk->id, best_color));
1309 DB((dbg, LEVEL_3, "\n\t\t... omitting, global best is better\n"));
1310 del_waitq(good_starts);
1314 del_waitq(good_starts);
1317 /* if all nodes were recolored, bail out */
1318 if (n_succeeded == n_nodes)
1322 stat_ev_int("heur4_colors_tried", i);
1324 /* free all intermediate created chunks except best one */
1325 while (! waitq_empty(tmp_chunks)) {
1326 aff_chunk_t *tmp = (aff_chunk_t*)waitq_get(tmp_chunks);
1327 if (tmp != best_chunk)
1328 delete_aff_chunk(tmp);
1330 del_waitq(tmp_chunks);
1332 /* return if coloring failed */
1335 del_waitq(best_starts);
1339 DB((dbg, LEVEL_2, "\tbest chunk #%u ", best_chunk->id));
1340 DBG_AFF_CHUNK(env, LEVEL_2, best_chunk);
1341 DB((dbg, LEVEL_2, "using color %d\n", best_color));
1343 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1344 const ir_node *irn = best_chunk->n[idx];
1345 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1348 /* bring the node to the color. */
1349 DB((dbg, LEVEL_4, "\tManifesting color %d for %+F, chunk #%u\n", best_color, node->irn, best_chunk->id));
1350 INIT_LIST_HEAD(&changed);
1352 res = change_node_color(env, node, best_color, &changed);
1353 stat_ev_tim_pop("heur4_recolor");
1355 materialize_coloring(&changed);
1358 assert(list_empty(&changed));
1361 /* remove the nodes in best chunk from original chunk */
1362 len = ARR_LEN(best_chunk->n);
1363 for (idx = 0; idx < len; ++idx) {
1364 const ir_node *irn = best_chunk->n[idx];
1365 int pos = nodes_bsearch(c->n, irn);
1370 len = ARR_LEN(c->n);
1371 for (idx = nidx = 0; idx < len; ++idx) {
1372 const ir_node *irn = c->n[idx];
1378 ARR_SHRINKLEN(c->n, nidx);
1381 /* we have to get the nodes back into the original chunk because they are scattered over temporary chunks */
1382 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1383 const ir_node *n = c->n[idx];
1384 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1388 /* fragment the remaining chunk */
1389 visited = bitset_malloc(get_irg_last_idx(env->co->irg));
1390 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx)
1391 bitset_set(visited, get_irn_idx(best_chunk->n[idx]));
1393 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1394 const ir_node *irn = c->n[idx];
1395 if (! bitset_is_set(visited, get_irn_idx(irn))) {
1396 aff_chunk_t *new_chunk = new_aff_chunk(env);
1397 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1399 expand_chunk_from(env, node, visited, new_chunk, c, decider_always_yes, 0);
1400 aff_chunk_assure_weight(env, new_chunk);
1401 pqueue_put(env->chunks, new_chunk, new_chunk->weight);
1405 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1406 const ir_node *n = best_chunk->n[idx];
1407 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1411 /* clear obsolete chunks and free some memory */
1412 delete_aff_chunk(best_chunk);
1413 bitset_free(visited);
1415 del_waitq(best_starts);
1417 stat_ev_ctx_pop("heur4_color_chunk");
1421 * Main driver for mst safe coalescing algorithm.
1423 static int co_solve_heuristic_mst(copy_opt_t *co)
1425 unsigned n_regs = co->cls->n_regs;
1426 bitset_t *allocatable_regs = bitset_alloca(n_regs);
1430 co_mst_env_t mst_env;
1437 ir_nodemap_init(&mst_env.map, co->irg);
1438 obstack_init(&mst_env.obst);
1440 be_put_allocatable_regs(co->cenv->irg, co->cls, allocatable_regs);
1442 mst_env.n_regs = n_regs;
1443 mst_env.chunks = new_pqueue();
1445 mst_env.allocatable_regs = allocatable_regs;
1446 mst_env.ifg = co->cenv->ifg;
1447 INIT_LIST_HEAD(&mst_env.chunklist);
1448 mst_env.chunk_visited = 0;
1449 mst_env.single_cols = OALLOCN(&mst_env.obst, col_cost_t*, n_regs);
1451 for (i = 0; i < n_regs; ++i) {
1452 col_cost_t *vec = OALLOCN(&mst_env.obst, col_cost_t, n_regs);
1454 mst_env.single_cols[i] = vec;
1455 for (j = 0; j < n_regs; ++j) {
1457 vec[j].cost = REAL(0.0);
1461 vec[0].cost = REAL(1.0);
1464 DBG((dbg, LEVEL_1, "==== Coloring %+F, class %s ====\n", co->irg, co->cls->name));
1466 /* build affinity chunks */
1468 build_affinity_chunks(&mst_env);
1469 stat_ev_tim_pop("heur4_initial_chunk");
1471 /* color chunks as long as there are some */
1472 while (! pqueue_empty(mst_env.chunks)) {
1473 aff_chunk_t *chunk = (aff_chunk_t*)pqueue_pop_front(mst_env.chunks);
1475 color_aff_chunk(&mst_env, chunk);
1476 DB((dbg, LEVEL_4, "<<<====== Coloring chunk (%u) done\n", chunk->id));
1477 delete_aff_chunk(chunk);
1480 /* apply coloring */
1481 for (pn = 0; pn < ARR_LEN(mst_env.map.data); ++pn) {
1482 co_mst_irn_t *mirn = (co_mst_irn_t*)mst_env.map.data[pn];
1483 const arch_register_t *reg;
1486 irn = get_idx_irn(co->irg, pn);
1487 if (arch_irn_is_ignore(irn))
1490 /* skip nodes where color hasn't changed */
1491 if (mirn->init_col == mirn->col)
1494 reg = arch_register_for_index(co->cls, mirn->col);
1495 arch_set_irn_register(irn, reg);
1496 DB((dbg, LEVEL_1, "%+F set color from %d to %d\n", irn, mirn->init_col, mirn->col));
1499 /* free allocated memory */
1500 del_pqueue(mst_env.chunks);
1501 obstack_free(&mst_env.obst, NULL);
1502 ir_nodemap_destroy(&mst_env.map);
1504 stat_ev_tim_pop("heur4_total");
1509 static const lc_opt_table_entry_t options[] = {
1510 LC_OPT_ENT_INT ("limit", "limit recoloring", &recolor_limit),
1511 LC_OPT_ENT_DBL ("di", "dislike influence", &dislike_influence),
1515 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur4)
1516 void be_init_copyheur4(void)
1518 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
1519 lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
1520 lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
1521 lc_opt_entry_t *co_grp = lc_opt_get_grp(chordal_grp, "co");
1522 lc_opt_entry_t *heur4_grp = lc_opt_get_grp(co_grp, "heur4");
1524 static co_algo_info copyheur = {
1525 co_solve_heuristic_mst, 0
1528 lc_opt_add_table(heur4_grp, options);
1529 be_register_copyopt("heur4", ©heur);
1531 FIRM_DBG_REGISTER(dbg, "firm.be.co.heur4");