2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Simple copy minimization heuristics.
23 * @author Christian Wuerdig
26 * This is the C implementation of the mst algorithm
27 * originally written in Java by Sebastian Hack.
28 * (also known as "heur3" :)
29 * Performs simple copy minimization.
33 #define DISABLE_STATEV
40 #include "raw_bitset.h"
41 #include "irnodemap.h"
55 #include "becopyopt_t.h"
61 #define DBG_AFF_CHUNK(env, level, chunk) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_aff_chunk((env), (chunk)); } while (0)
62 #define DBG_COL_COST(env, level, cost) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_col_cost((env), (cost)); } while (0)
64 static firm_dbg_module_t *dbg = NULL;
68 #define DBG_AFF_CHUNK(env, level, chunk)
69 #define DBG_COL_COST(env, level, cost)
74 #define REAL(C) (C ## f)
76 static unsigned last_chunk_id = 0;
77 static int recolor_limit = 7;
78 static double dislike_influence = REAL(0.1);
80 typedef struct col_cost_t {
88 typedef struct aff_chunk_t {
89 const ir_node **n; /**< An ARR_F containing all nodes of the chunk. */
90 const ir_node **interfere; /**< An ARR_F containing all inference. */
91 int weight; /**< Weight of this chunk */
92 unsigned weight_consistent : 1; /**< Set if the weight is consistent. */
93 unsigned deleted : 1; /**< For debugging: Set if the was deleted. */
94 unsigned id; /**< An id of this chunk. */
97 col_cost_t color_affinity[1];
103 typedef struct aff_edge_t {
104 const ir_node *src; /**< Source node. */
105 const ir_node *tgt; /**< Target node. */
106 int weight; /**< The weight of this edge. */
109 /* main coalescing environment */
110 typedef struct co_mst_env_t {
111 int n_regs; /**< number of regs in class */
112 bitset_t *allocatable_regs; /**< set containing all global ignore registers */
113 ir_nodemap map; /**< phase object holding data for nodes */
115 pqueue_t *chunks; /**< priority queue for chunks */
116 list_head chunklist; /**< list holding all chunks */
117 be_ifg_t *ifg; /**< the interference graph */
118 copy_opt_t *co; /**< the copy opt object */
119 unsigned chunk_visited;
120 col_cost_t **single_cols;
123 /* stores coalescing related information for a node */
124 typedef struct co_mst_irn_t {
125 const ir_node *irn; /**< the irn this information belongs to */
126 aff_chunk_t *chunk; /**< the chunk this irn belongs to */
127 bitset_t *adm_colors; /**< set of admissible colors for this irn */
128 ir_node **int_neighs; /**< array of all interfering neighbours (cached for speed reasons) */
129 int n_neighs; /**< length of the interfering neighbours array. */
130 int int_aff_neigh; /**< number of interfering affinity neighbours */
131 int col; /**< color currently assigned */
132 int init_col; /**< the initial color */
133 int tmp_col; /**< a temporary assigned color */
134 unsigned fixed : 1; /**< the color is fixed */
135 struct list_head list; /**< Queue for coloring undo. */
136 real_t constr_factor;
140 * In case there is no phase information for irn, initialize it.
142 static co_mst_irn_t *co_mst_irn_init(co_mst_env_t *env, const ir_node *irn)
144 co_mst_irn_t *res = OALLOC(&env->obst, co_mst_irn_t);
146 const arch_register_req_t *req;
147 neighbours_iter_t nodes_it;
154 res->int_neighs = NULL;
155 res->int_aff_neigh = 0;
156 res->col = arch_get_irn_register(irn)->index;
157 res->init_col = res->col;
158 INIT_LIST_HEAD(&res->list);
160 DB((dbg, LEVEL_4, "Creating phase info for %+F\n", irn));
162 /* set admissible registers */
163 res->adm_colors = bitset_obstack_alloc(&env->obst, env->n_regs);
165 /* Exclude colors not assignable to the irn */
166 req = arch_get_irn_register_req(irn);
167 if (arch_register_req_is(req, limited)) {
168 rbitset_copy_to_bitset(req->limited, res->adm_colors);
169 /* exclude global ignore registers as well */
170 bitset_and(res->adm_colors, env->allocatable_regs);
172 bitset_copy(res->adm_colors, env->allocatable_regs);
175 /* compute the constraint factor */
176 res->constr_factor = (real_t) (1 + env->n_regs - bitset_popcount(res->adm_colors)) / env->n_regs;
178 /* set the number of interfering affinity neighbours to -1, they are calculated later */
179 res->int_aff_neigh = -1;
181 /* build list of interfering neighbours */
183 be_ifg_foreach_neighbour(env->ifg, &nodes_it, irn, neigh) {
184 if (!arch_irn_is_ignore(neigh)) {
185 obstack_ptr_grow(&env->obst, neigh);
189 res->int_neighs = (ir_node**)obstack_finish(&env->obst);
194 static co_mst_irn_t *get_co_mst_irn(co_mst_env_t *env, const ir_node *node)
196 co_mst_irn_t *res = ir_nodemap_get(co_mst_irn_t, &env->map, node);
198 res = co_mst_irn_init(env, node);
199 ir_nodemap_insert(&env->map, node, res);
204 typedef int decide_func_t(const co_mst_irn_t *node, int col);
209 * Write a chunk to stderr for debugging.
211 static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c)
215 if (c->weight_consistent)
216 ir_fprintf(stderr, " $%d ", c->weight);
217 ir_fprintf(stderr, "{");
218 for (i = 0, l = ARR_LEN(c->n); i < l; ++i) {
219 const ir_node *n = c->n[i];
220 ir_fprintf(stderr, " %+F,", n);
222 ir_fprintf(stderr, "}");
226 * Dump all admissible colors to stderr.
228 static void dbg_admissible_colors(const co_mst_env_t *env, const co_mst_irn_t *node)
232 if (bitset_popcount(node->adm_colors) < 1)
233 fprintf(stderr, "no admissible colors?!?");
235 bitset_foreach(node->adm_colors, idx) {
236 ir_fprintf(stderr, " %zu", idx);
242 * Dump color-cost pairs to stderr.
244 static void dbg_col_cost(const co_mst_env_t *env, const col_cost_t *cost)
247 for (i = 0; i < env->n_regs; ++i)
248 fprintf(stderr, " (%d, %.4f)", cost[i].col, cost[i].cost);
251 #endif /* DEBUG_libfirm */
253 static inline int get_mst_irn_col(const co_mst_irn_t *node)
255 return node->tmp_col >= 0 ? node->tmp_col : node->col;
259 * @return 1 if node @p node has color @p col, 0 otherwise.
261 static int decider_has_color(const co_mst_irn_t *node, int col)
263 return get_mst_irn_col(node) == col;
267 * @return 1 if node @p node has not color @p col, 0 otherwise.
269 static int decider_hasnot_color(const co_mst_irn_t *node, int col)
271 return get_mst_irn_col(node) != col;
275 * Always returns true.
277 static int decider_always_yes(const co_mst_irn_t *node, int col)
284 /** compares two affinity edges by its weight */
285 static int cmp_aff_edge(const void *a, const void *b)
287 const aff_edge_t *e1 = (const aff_edge_t*)a;
288 const aff_edge_t *e2 = (const aff_edge_t*)b;
290 if (e2->weight == e1->weight) {
291 if (e2->src->node_idx == e1->src->node_idx)
292 return QSORT_CMP(e2->tgt->node_idx, e1->tgt->node_idx);
294 return QSORT_CMP(e2->src->node_idx, e1->src->node_idx);
296 /* sort in descending order */
297 return QSORT_CMP(e2->weight, e1->weight);
300 /** compares to color-cost pairs */
301 static __attribute__((unused)) int cmp_col_cost_lt(const void *a, const void *b)
303 const col_cost_t *c1 = (const col_cost_t*)a;
304 const col_cost_t *c2 = (const col_cost_t*)b;
305 real_t diff = c1->cost - c2->cost;
312 return QSORT_CMP(c1->col, c2->col);
315 static int cmp_col_cost_gt(const void *a, const void *b)
317 const col_cost_t *c1 = (const col_cost_t*)a;
318 const col_cost_t *c2 = (const col_cost_t*)b;
319 real_t diff = c2->cost - c1->cost;
326 return QSORT_CMP(c1->col, c2->col);
330 * Creates a new affinity chunk
332 static inline aff_chunk_t *new_aff_chunk(co_mst_env_t *env)
334 aff_chunk_t *c = XMALLOCF(aff_chunk_t, color_affinity, env->n_regs);
335 c->n = NEW_ARR_F(const ir_node *, 0);
336 c->interfere = NEW_ARR_F(const ir_node *, 0);
338 c->weight_consistent = 0;
340 c->id = ++last_chunk_id;
342 list_add(&c->list, &env->chunklist);
347 * Frees all memory allocated by an affinity chunk.
349 static inline void delete_aff_chunk(aff_chunk_t *c)
352 DEL_ARR_F(c->interfere);
359 * binary search of sorted nodes.
361 * @return the position where n is found in the array arr or ~pos
362 * if the nodes is not here.
364 static inline int nodes_bsearch(const ir_node **arr, const ir_node *n)
366 int hi = ARR_LEN(arr);
370 int md = lo + ((hi - lo) >> 1);
383 /** Check if a node n can be found inside arr. */
384 static int node_contains(const ir_node **arr, const ir_node *n)
386 int i = nodes_bsearch(arr, n);
391 * Insert a node into the sorted nodes list.
393 * @return 1 if the node was inserted, 0 else
395 static int nodes_insert(const ir_node ***arr, const ir_node *irn)
397 int idx = nodes_bsearch(*arr, irn);
400 int i, n = ARR_LEN(*arr);
403 ARR_APP1(const ir_node *, *arr, irn);
408 for (i = n - 1; i >= idx; --i)
417 * Adds a node to an affinity chunk
419 static inline void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node)
423 if (! nodes_insert(&c->n, node->irn))
426 c->weight_consistent = 0;
429 for (i = node->n_neighs - 1; i >= 0; --i) {
430 ir_node *neigh = node->int_neighs[i];
431 nodes_insert(&c->interfere, neigh);
436 * Check if affinity chunk @p chunk interferes with node @p irn.
438 static inline int aff_chunk_interferes(const aff_chunk_t *chunk, const ir_node *irn)
440 return node_contains(chunk->interfere, irn);
444 * Check if there are interference edges from c1 to c2.
446 * @param c2 Another chunk
447 * @return 1 if there are interferences between nodes of c1 and c2, 0 otherwise.
449 static inline int aff_chunks_interfere(const aff_chunk_t *c1, const aff_chunk_t *c2)
456 /* check if there is a node in c2 having an interfering neighbor in c1 */
457 for (i = ARR_LEN(c2->n) - 1; i >= 0; --i) {
458 const ir_node *irn = c2->n[i];
460 if (node_contains(c1->interfere, irn))
467 * Returns the affinity chunk of @p irn or creates a new
468 * one with @p irn as element if there is none assigned.
470 static inline aff_chunk_t *get_aff_chunk(co_mst_env_t *env, const ir_node *irn)
472 co_mst_irn_t *node = get_co_mst_irn(env, irn);
477 * Let chunk(src) absorb the nodes of chunk(tgt) (only possible when there
478 * are no interference edges from chunk(src) to chunk(tgt)).
479 * @return 1 if successful, 0 if not possible
481 static int aff_chunk_absorb(co_mst_env_t *env, const ir_node *src, const ir_node *tgt)
483 aff_chunk_t *c1 = get_aff_chunk(env, src);
484 aff_chunk_t *c2 = get_aff_chunk(env, tgt);
487 DB((dbg, LEVEL_4, "Attempt to let c1 (id %u): ", c1 ? c1->id : 0));
489 DBG_AFF_CHUNK(env, LEVEL_4, c1);
491 DB((dbg, LEVEL_4, "{%+F}", src));
493 DB((dbg, LEVEL_4, "\n\tabsorb c2 (id %u): ", c2 ? c2->id : 0));
495 DBG_AFF_CHUNK(env, LEVEL_4, c2);
497 DB((dbg, LEVEL_4, "{%+F}", tgt));
499 DB((dbg, LEVEL_4, "\n"));
504 /* no chunk exists */
505 co_mst_irn_t *mirn = get_co_mst_irn(env, src);
508 for (i = mirn->n_neighs - 1; i >= 0; --i) {
509 if (mirn->int_neighs[i] == tgt)
513 /* create one containing both nodes */
514 c1 = new_aff_chunk(env);
515 aff_chunk_add_node(c1, get_co_mst_irn(env, src));
516 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
520 /* c2 already exists */
521 if (! aff_chunk_interferes(c2, src)) {
522 aff_chunk_add_node(c2, get_co_mst_irn(env, src));
526 } else if (c2 == NULL) {
527 /* c1 already exists */
528 if (! aff_chunk_interferes(c1, tgt)) {
529 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
532 } else if (c1 != c2 && ! aff_chunks_interfere(c1, c2)) {
535 for (idx = 0, len = ARR_LEN(c2->n); idx < len; ++idx)
536 aff_chunk_add_node(c1, get_co_mst_irn(env, c2->n[idx]));
538 for (idx = 0, len = ARR_LEN(c2->interfere); idx < len; ++idx) {
539 const ir_node *irn = c2->interfere[idx];
540 nodes_insert(&c1->interfere, irn);
543 c1->weight_consistent = 0;
545 delete_aff_chunk(c2);
548 DB((dbg, LEVEL_4, " ... c1 interferes with c2, skipped\n"));
552 DB((dbg, LEVEL_4, " ... absorbed\n"));
557 * Assures that the weight of the given chunk is consistent.
559 static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c)
561 if (! c->weight_consistent) {
565 for (i = 0; i < env->n_regs; ++i) {
566 c->color_affinity[i].col = i;
567 c->color_affinity[i].cost = REAL(0.0);
570 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
571 const ir_node *n = c->n[idx];
572 const affinity_node_t *an = get_affinity_info(env->co, n);
573 co_mst_irn_t *node = get_co_mst_irn(env, n);
576 if (node->constr_factor > REAL(0.0)) {
577 bitset_foreach (node->adm_colors, col)
578 c->color_affinity[col].cost += node->constr_factor;
582 co_gs_foreach_neighb(an, neigh) {
583 const ir_node *m = neigh->irn;
585 if (arch_irn_is_ignore(m))
588 w += node_contains(c->n, m) ? neigh->costs : 0;
593 for (i = 0; i < env->n_regs; ++i)
594 c->color_affinity[i].cost *= (REAL(1.0) / ARR_LEN(c->n));
597 // c->weight = bitset_popcount(c->nodes);
598 c->weight_consistent = 1;
603 * Count the number of interfering affinity neighbours
605 static int count_interfering_aff_neighs(co_mst_env_t *env, const affinity_node_t *an)
607 const ir_node *irn = an->irn;
608 const co_mst_irn_t *node = get_co_mst_irn(env, irn);
611 co_gs_foreach_neighb(an, neigh) {
612 const ir_node *n = neigh->irn;
615 if (arch_irn_is_ignore(n))
618 /* check if the affinity neighbour interfere */
619 for (i = 0; i < node->n_neighs; ++i) {
620 if (node->int_neighs[i] == n) {
631 * Build chunks of nodes connected by affinity edges.
632 * We start at the heaviest affinity edge.
633 * The chunks of the two edge-defining nodes will be
634 * merged if there are no interference edges from one
635 * chunk to the other.
637 static void build_affinity_chunks(co_mst_env_t *env)
639 nodes_iter_t nodes_it;
640 aff_edge_t *edges = NEW_ARR_F(aff_edge_t, 0);
645 /* at first we create the affinity edge objects */
646 be_ifg_foreach_node(env->ifg, &nodes_it, n) {
647 int n_idx = get_irn_idx(n);
651 if (arch_irn_is_ignore(n))
654 n1 = get_co_mst_irn(env, n);
655 an = get_affinity_info(env->co, n);
658 if (n1->int_aff_neigh < 0)
659 n1->int_aff_neigh = count_interfering_aff_neighs(env, an);
661 /* build the affinity edges */
662 co_gs_foreach_neighb(an, neigh) {
663 const ir_node *m = neigh->irn;
664 int m_idx = get_irn_idx(m);
666 /* record the edge in only one direction */
671 /* skip ignore nodes */
672 if (arch_irn_is_ignore(m))
678 n2 = get_co_mst_irn(env, m);
679 if (n2->int_aff_neigh < 0) {
680 affinity_node_t *am = get_affinity_info(env->co, m);
681 n2->int_aff_neigh = count_interfering_aff_neighs(env, am);
684 * these weights are pure hackery ;-).
685 * It's not chriswue's fault but mine.
687 edge.weight = neigh->costs;
688 ARR_APP1(aff_edge_t, edges, edge);
694 /* now: sort edges and build the affinity chunks */
695 len = ARR_LEN(edges);
696 qsort(edges, len, sizeof(edges[0]), cmp_aff_edge);
697 for (i = 0; i < len; ++i) {
698 DBG((dbg, LEVEL_1, "edge (%u,%u) %f\n", edges[i].src->node_idx, edges[i].tgt->node_idx, edges[i].weight));
700 (void)aff_chunk_absorb(env, edges[i].src, edges[i].tgt);
703 /* now insert all chunks into a priority queue */
704 list_for_each_entry(aff_chunk_t, curr_chunk, &env->chunklist, list) {
705 aff_chunk_assure_weight(env, curr_chunk);
707 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
708 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
709 DBG((dbg, LEVEL_1, "\n"));
711 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
714 for (pn = 0; pn < ARR_LEN(env->map.data); ++pn) {
715 co_mst_irn_t *mirn = (co_mst_irn_t*)env->map.data[pn];
718 if (mirn->chunk != NULL)
721 /* no chunk is allocated so far, do it now */
722 aff_chunk_t *curr_chunk = new_aff_chunk(env);
723 aff_chunk_add_node(curr_chunk, mirn);
725 aff_chunk_assure_weight(env, curr_chunk);
727 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
728 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
729 DBG((dbg, LEVEL_1, "\n"));
731 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
737 static __attribute__((unused)) void chunk_order_nodes(co_mst_env_t *env, aff_chunk_t *chunk)
739 pqueue_t *grow = new_pqueue();
740 ir_node const *max_node = NULL;
744 for (i = ARR_LEN(chunk->n); i != 0;) {
745 const ir_node *irn = chunk->n[--i];
746 affinity_node_t *an = get_affinity_info(env->co, irn);
749 if (arch_irn_is_ignore(irn))
753 co_gs_foreach_neighb(an, neigh)
756 if (w > max_weight) {
764 bitset_t *visited = bitset_malloc(get_irg_last_idx(env->co->irg));
766 for (i = ARR_LEN(chunk->n); i != 0;)
767 bitset_set(visited, get_irn_idx(chunk->n[--i]));
769 pqueue_put(grow, (void *) max_node, max_weight);
770 bitset_clear(visited, get_irn_idx(max_node));
772 while (!pqueue_empty(grow)) {
773 ir_node *irn = (ir_node*)pqueue_pop_front(grow);
774 affinity_node_t *an = get_affinity_info(env->co, irn);
776 if (arch_irn_is_ignore(irn))
779 assert(i <= ARR_LEN(chunk->n));
784 /* build the affinity edges */
785 co_gs_foreach_neighb(an, neigh) {
786 co_mst_irn_t *node = get_co_mst_irn(env, neigh->irn);
788 if (bitset_is_set(visited, get_irn_idx(node->irn))) {
789 pqueue_put(grow, (void *) neigh->irn, neigh->costs);
790 bitset_clear(visited, get_irn_idx(node->irn));
796 bitset_free(visited);
801 * Greedy collect affinity neighbours into thew new chunk @p chunk starting at node @p node.
803 static void expand_chunk_from(co_mst_env_t *env, co_mst_irn_t *node, bitset_t *visited,
804 aff_chunk_t *chunk, aff_chunk_t *orig_chunk, decide_func_t *decider, int col)
806 waitq *nodes = new_waitq();
808 DBG((dbg, LEVEL_1, "\n\tExpanding new chunk (#%u) from %+F, color %d:", chunk->id, node->irn, col));
810 /* init queue and chunk */
811 waitq_put(nodes, node);
812 bitset_set(visited, get_irn_idx(node->irn));
813 aff_chunk_add_node(chunk, node);
814 DB((dbg, LEVEL_1, " %+F", node->irn));
816 /* as long as there are nodes in the queue */
817 while (! waitq_empty(nodes)) {
818 co_mst_irn_t *n = (co_mst_irn_t*)waitq_get(nodes);
819 affinity_node_t *an = get_affinity_info(env->co, n->irn);
821 /* check all affinity neighbors */
823 co_gs_foreach_neighb(an, neigh) {
824 const ir_node *m = neigh->irn;
825 int m_idx = get_irn_idx(m);
828 if (arch_irn_is_ignore(m))
831 n2 = get_co_mst_irn(env, m);
833 if (! bitset_is_set(visited, m_idx) &&
836 ! aff_chunk_interferes(chunk, m) &&
837 node_contains(orig_chunk->n, m))
840 following conditions are met:
841 - neighbour is not visited
842 - neighbour likes the color
843 - neighbour has not yet a fixed color
844 - the new chunk doesn't interfere with the neighbour
845 - neighbour belongs or belonged once to the original chunk
847 bitset_set(visited, m_idx);
848 aff_chunk_add_node(chunk, n2);
849 DB((dbg, LEVEL_1, " %+F", n2->irn));
850 /* enqueue for further search */
851 waitq_put(nodes, n2);
857 DB((dbg, LEVEL_1, "\n"));
863 * Fragment the given chunk into chunks having given color and not having given color.
865 static aff_chunk_t *fragment_chunk(co_mst_env_t *env, int col, aff_chunk_t *c, waitq *tmp)
867 bitset_t *visited = bitset_malloc(get_irg_last_idx(env->co->irg));
869 aff_chunk_t *best = NULL;
871 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
874 aff_chunk_t *tmp_chunk;
875 decide_func_t *decider;
879 if (bitset_is_set(visited, get_irn_idx(irn)))
882 node = get_co_mst_irn(env, irn);
884 if (get_mst_irn_col(node) == col) {
885 decider = decider_has_color;
887 DBG((dbg, LEVEL_4, "\tcolor %d wanted\n", col));
890 decider = decider_hasnot_color;
892 DBG((dbg, LEVEL_4, "\tcolor %d forbidden\n", col));
895 /* create a new chunk starting at current node */
896 tmp_chunk = new_aff_chunk(env);
897 waitq_put(tmp, tmp_chunk);
898 expand_chunk_from(env, node, visited, tmp_chunk, c, decider, col);
899 assert(ARR_LEN(tmp_chunk->n) > 0 && "No nodes added to chunk");
901 /* remember the local best */
902 aff_chunk_assure_weight(env, tmp_chunk);
903 if (check_for_best && (! best || best->weight < tmp_chunk->weight))
907 assert(best && "No chunk found?");
908 bitset_free(visited);
913 * Resets the temporary fixed color of all nodes within wait queue @p nodes.
914 * ATTENTION: the queue is empty after calling this function!
916 static inline void reject_coloring(struct list_head *nodes)
918 DB((dbg, LEVEL_4, "\treject coloring for"));
919 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
920 DB((dbg, LEVEL_4, " %+F", n->irn));
921 assert(n->tmp_col >= 0);
923 list_del_init(&n->list);
925 DB((dbg, LEVEL_4, "\n"));
928 static inline void materialize_coloring(struct list_head *nodes)
930 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
931 assert(n->tmp_col >= 0);
934 list_del_init(&n->list);
938 static inline void set_temp_color(co_mst_irn_t *node, int col, struct list_head *changed)
941 assert(!node->fixed);
942 assert(node->tmp_col < 0);
943 assert(node->list.next == &node->list && node->list.prev == &node->list);
944 assert(bitset_is_set(node->adm_colors, col));
946 list_add_tail(&node->list, changed);
950 static inline int is_loose(co_mst_irn_t *node)
952 return !node->fixed && node->tmp_col < 0;
956 * Determines the costs for each color if it would be assigned to node @p node.
958 static void determine_color_costs(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs)
960 int *neigh_cols = ALLOCAN(int, env->n_regs);
965 for (i = 0; i < env->n_regs; ++i) {
968 costs[i].cost = bitset_is_set(node->adm_colors, i) ? node->constr_factor : REAL(0.0);
971 for (i = 0; i < node->n_neighs; ++i) {
972 co_mst_irn_t *n = get_co_mst_irn(env, node->int_neighs[i]);
973 int col = get_mst_irn_col(n);
978 costs[col].cost = REAL(0.0);
982 coeff = REAL(1.0) / n_loose;
983 for (i = 0; i < env->n_regs; ++i)
984 costs[i].cost *= REAL(1.0) - coeff * neigh_cols[i];
988 /* need forward declaration due to recursive call */
989 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed_ones, int depth, int *max_depth, int *trip);
992 * Tries to change node to a color but @p explude_col.
993 * @return 1 if succeeded, 0 otherwise.
995 static int change_node_color_excluded(co_mst_env_t *env, co_mst_irn_t *node, int exclude_col, struct list_head *changed, int depth, int *max_depth, int *trip)
997 int col = get_mst_irn_col(node);
1000 /* neighbours has already a different color -> good, temporary fix it */
1001 if (col != exclude_col) {
1003 set_temp_color(node, col, changed);
1007 /* The node has the color it should not have _and_ has not been visited yet. */
1008 if (is_loose(node)) {
1009 col_cost_t *costs = ALLOCAN(col_cost_t, env->n_regs);
1011 /* Get the costs for giving the node a specific color. */
1012 determine_color_costs(env, node, costs);
1014 /* Since the node must not have the not_col, set the costs for that color to "infinity" */
1015 costs[exclude_col].cost = REAL(0.0);
1017 /* sort the colors according costs, cheapest first. */
1018 qsort(costs, env->n_regs, sizeof(costs[0]), cmp_col_cost_gt);
1020 /* Try recoloring the node using the color list. */
1021 res = recolor_nodes(env, node, costs, changed, depth + 1, max_depth, trip);
1028 * Tries to bring node @p node to cheapest color and color all interfering neighbours with other colors.
1029 * ATTENTION: Expect @p costs already sorted by increasing costs.
1030 * @return 1 if coloring could be applied, 0 otherwise.
1032 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed, int depth, int *max_depth, int *trip)
1035 struct list_head local_changed;
1038 if (depth > *max_depth)
1041 DBG((dbg, LEVEL_4, "\tRecoloring %+F with color-costs", node->irn));
1042 DBG_COL_COST(env, LEVEL_4, costs);
1043 DB((dbg, LEVEL_4, "\n"));
1045 if (depth >= recolor_limit) {
1046 DBG((dbg, LEVEL_4, "\tHit recolor limit\n"));
1050 for (i = 0; i < env->n_regs; ++i) {
1051 int tgt_col = costs[i].col;
1055 /* If the costs for that color (and all successive) are infinite, bail out we won't make it anyway. */
1056 if (costs[i].cost == REAL(0.0)) {
1057 DBG((dbg, LEVEL_4, "\tAll further colors forbidden\n"));
1061 /* Set the new color of the node and mark the node as temporarily fixed. */
1062 assert(node->tmp_col < 0 && "Node must not have been temporary fixed.");
1063 INIT_LIST_HEAD(&local_changed);
1064 set_temp_color(node, tgt_col, &local_changed);
1065 DBG((dbg, LEVEL_4, "\tTemporary setting %+F to color %d\n", node->irn, tgt_col));
1067 /* try to color all interfering neighbours with current color forbidden */
1068 for (j = 0; j < node->n_neighs; ++j) {
1072 neigh = node->int_neighs[j];
1074 if (arch_irn_is_ignore(neigh))
1077 nn = get_co_mst_irn(env, neigh);
1078 DB((dbg, LEVEL_4, "\tHandling neighbour %+F, at position %d (fixed: %d, tmp_col: %d, col: %d)\n",
1079 neigh, j, nn->fixed, nn->tmp_col, nn->col));
1082 Try to change the color of the neighbor and record all nodes which
1083 get changed in the tmp list. Add this list to the "changed" list for
1084 that color. If we did not succeed to change the color of the neighbor,
1085 we bail out and try the next color.
1087 if (get_mst_irn_col(nn) == tgt_col) {
1088 /* try to color neighbour with tgt_col forbidden */
1089 neigh_ok = change_node_color_excluded(env, nn, tgt_col, &local_changed, depth + 1, max_depth, trip);
1097 We managed to assign the target color to all neighbors, so from the perspective
1098 of the current node, every thing was ok and we can return safely.
1101 /* append the local_changed ones to global ones */
1102 list_splice(&local_changed, changed);
1106 /* coloring of neighbours failed, so we try next color */
1107 reject_coloring(&local_changed);
1111 DBG((dbg, LEVEL_4, "\tAll colors failed\n"));
1116 * Tries to bring node @p node and all its neighbours to color @p tgt_col.
1117 * @return 1 if color @p col could be applied, 0 otherwise
1119 static int change_node_color(co_mst_env_t *env, co_mst_irn_t *node, int tgt_col, struct list_head *changed)
1121 int col = get_mst_irn_col(node);
1123 /* if node already has the target color -> good, temporary fix it */
1124 if (col == tgt_col) {
1125 DBG((dbg, LEVEL_4, "\t\tCNC: %+F has already color %d, fix temporary\n", node->irn, tgt_col));
1127 set_temp_color(node, tgt_col, changed);
1132 Node has not yet a fixed color and target color is admissible
1133 -> try to recolor node and its affinity neighbours
1135 if (is_loose(node) && bitset_is_set(node->adm_colors, tgt_col)) {
1136 col_cost_t *costs = env->single_cols[tgt_col];
1137 int res, max_depth, trip;
1142 DBG((dbg, LEVEL_4, "\t\tCNC: Attempt to recolor %+F ===>>\n", node->irn));
1143 res = recolor_nodes(env, node, costs, changed, 0, &max_depth, &trip);
1144 DBG((dbg, LEVEL_4, "\t\tCNC: <<=== Recoloring of %+F %s\n", node->irn, res ? "succeeded" : "failed"));
1145 stat_ev_int("heur4_recolor_depth_max", max_depth);
1146 stat_ev_int("heur4_recolor_trip", trip);
1152 #ifdef DEBUG_libfirm
1153 if (firm_dbg_get_mask(dbg) & LEVEL_4) {
1154 if (!is_loose(node))
1155 DB((dbg, LEVEL_4, "\t\tCNC: %+F has already fixed color %d\n", node->irn, col));
1157 DB((dbg, LEVEL_4, "\t\tCNC: color %d not admissible for %+F (", tgt_col, node->irn));
1158 dbg_admissible_colors(env, node);
1159 DB((dbg, LEVEL_4, ")\n"));
1168 * Tries to color an affinity chunk (or at least a part of it).
1169 * Inserts uncolored parts of the chunk as a new chunk into the priority queue.
1171 static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
1173 aff_chunk_t *best_chunk = NULL;
1174 int n_nodes = ARR_LEN(c->n);
1175 int best_color = -1;
1176 int n_int_chunks = 0;
1177 waitq *tmp_chunks = new_waitq();
1178 waitq *best_starts = NULL;
1179 col_cost_t *order = ALLOCANZ(col_cost_t, env->n_regs);
1186 struct list_head changed;
1188 DB((dbg, LEVEL_2, "fragmentizing chunk #%u", c->id));
1189 DBG_AFF_CHUNK(env, LEVEL_2, c);
1190 DB((dbg, LEVEL_2, "\n"));
1192 stat_ev_ctx_push_fmt("heur4_color_chunk", "%u", c->id);
1194 ++env->chunk_visited;
1196 /* compute color preference */
1197 for (pos = 0, len = ARR_LEN(c->interfere); pos < len; ++pos) {
1198 const ir_node *n = c->interfere[pos];
1199 co_mst_irn_t *node = get_co_mst_irn(env, n);
1200 aff_chunk_t *chunk = node->chunk;
1202 if (is_loose(node) && chunk && chunk->visited < env->chunk_visited) {
1203 assert(!chunk->deleted);
1204 chunk->visited = env->chunk_visited;
1207 aff_chunk_assure_weight(env, chunk);
1208 for (i = 0; i < env->n_regs; ++i)
1209 order[i].cost += chunk->color_affinity[i].cost;
1213 for (i = 0; i < env->n_regs; ++i) {
1214 real_t dislike = n_int_chunks > 0 ? REAL(1.0) - order[i].cost / n_int_chunks : REAL(0.0);
1216 order[i].cost = (REAL(1.0) - dislike_influence) * c->color_affinity[i].cost + dislike_influence * dislike;
1219 qsort(order, env->n_regs, sizeof(order[0]), cmp_col_cost_gt);
1221 DBG_COL_COST(env, LEVEL_2, order);
1222 DB((dbg, LEVEL_2, "\n"));
1224 /* check which color is the "best" for the given chunk.
1225 * if we found a color which was ok for all nodes, we take it
1226 * and do not look further. (see did_all flag usage below.)
1227 * If we have many colors which fit all nodes it is hard to decide
1228 * which one to take anyway.
1229 * TODO Sebastian: Perhaps we should at all nodes and figure out
1230 * a suitable color using costs as done above (determine_color_costs).
1232 for (i = 0; i < env->n_regs; ++i) {
1233 int col = order[i].col;
1235 aff_chunk_t *local_best;
1238 /* skip ignore colors */
1239 if (!bitset_is_set(env->allocatable_regs, col))
1242 DB((dbg, LEVEL_2, "\ttrying color %d\n", col));
1245 good_starts = new_waitq();
1247 /* try to bring all nodes of given chunk to the current color. */
1248 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1249 const ir_node *irn = c->n[idx];
1250 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1253 assert(! node->fixed && "Node must not have a fixed color.");
1254 DB((dbg, LEVEL_4, "\t\tBringing %+F from color %d to color %d ...\n", irn, node->col, col));
1257 The order of the colored nodes is important, so we record the successfully
1258 colored ones in the order they appeared.
1260 INIT_LIST_HEAD(&changed);
1262 good = change_node_color(env, node, col, &changed);
1263 stat_ev_tim_pop("heur4_recolor");
1265 waitq_put(good_starts, node);
1266 materialize_coloring(&changed);
1271 reject_coloring(&changed);
1273 n_succeeded += good;
1274 DB((dbg, LEVEL_4, "\t\t... %+F attempt from %d to %d %s\n", irn, node->col, col, good ? "succeeded" : "failed"));
1277 /* unfix all nodes */
1278 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1279 co_mst_irn_t *node = get_co_mst_irn(env, c->n[idx]);
1283 /* try next color when failed */
1284 if (n_succeeded == 0) {
1285 del_waitq(good_starts);
1289 /* fragment the chunk according to the coloring */
1290 local_best = fragment_chunk(env, col, c, tmp_chunks);
1292 /* search the best of the good list
1293 and make it the new best if it is better than the current */
1295 aff_chunk_assure_weight(env, local_best);
1297 DB((dbg, LEVEL_3, "\t\tlocal best chunk (id %u) for color %d: ", local_best->id, col));
1298 DBG_AFF_CHUNK(env, LEVEL_3, local_best);
1300 if (! best_chunk || best_chunk->weight < local_best->weight) {
1301 best_chunk = local_best;
1304 del_waitq(best_starts);
1305 best_starts = good_starts;
1306 DB((dbg, LEVEL_3, "\n\t\t... setting global best chunk (id %u), color %d\n", best_chunk->id, best_color));
1308 DB((dbg, LEVEL_3, "\n\t\t... omitting, global best is better\n"));
1309 del_waitq(good_starts);
1313 del_waitq(good_starts);
1316 /* if all nodes were recolored, bail out */
1317 if (n_succeeded == n_nodes)
1321 stat_ev_int("heur4_colors_tried", i);
1323 /* free all intermediate created chunks except best one */
1324 while (! waitq_empty(tmp_chunks)) {
1325 aff_chunk_t *tmp = (aff_chunk_t*)waitq_get(tmp_chunks);
1326 if (tmp != best_chunk)
1327 delete_aff_chunk(tmp);
1329 del_waitq(tmp_chunks);
1331 /* return if coloring failed */
1334 del_waitq(best_starts);
1338 DB((dbg, LEVEL_2, "\tbest chunk #%u ", best_chunk->id));
1339 DBG_AFF_CHUNK(env, LEVEL_2, best_chunk);
1340 DB((dbg, LEVEL_2, "using color %d\n", best_color));
1342 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1343 const ir_node *irn = best_chunk->n[idx];
1344 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1347 /* bring the node to the color. */
1348 DB((dbg, LEVEL_4, "\tManifesting color %d for %+F, chunk #%u\n", best_color, node->irn, best_chunk->id));
1349 INIT_LIST_HEAD(&changed);
1351 res = change_node_color(env, node, best_color, &changed);
1352 stat_ev_tim_pop("heur4_recolor");
1354 materialize_coloring(&changed);
1357 assert(list_empty(&changed));
1360 /* remove the nodes in best chunk from original chunk */
1361 len = ARR_LEN(best_chunk->n);
1362 for (idx = 0; idx < len; ++idx) {
1363 const ir_node *irn = best_chunk->n[idx];
1364 int pos = nodes_bsearch(c->n, irn);
1369 len = ARR_LEN(c->n);
1370 for (idx = nidx = 0; idx < len; ++idx) {
1371 const ir_node *irn = c->n[idx];
1377 ARR_SHRINKLEN(c->n, nidx);
1380 /* we have to get the nodes back into the original chunk because they are scattered over temporary chunks */
1381 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1382 const ir_node *n = c->n[idx];
1383 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1387 /* fragment the remaining chunk */
1388 visited = bitset_malloc(get_irg_last_idx(env->co->irg));
1389 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx)
1390 bitset_set(visited, get_irn_idx(best_chunk->n[idx]));
1392 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1393 const ir_node *irn = c->n[idx];
1394 if (! bitset_is_set(visited, get_irn_idx(irn))) {
1395 aff_chunk_t *new_chunk = new_aff_chunk(env);
1396 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1398 expand_chunk_from(env, node, visited, new_chunk, c, decider_always_yes, 0);
1399 aff_chunk_assure_weight(env, new_chunk);
1400 pqueue_put(env->chunks, new_chunk, new_chunk->weight);
1404 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1405 const ir_node *n = best_chunk->n[idx];
1406 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1410 /* clear obsolete chunks and free some memory */
1411 delete_aff_chunk(best_chunk);
1412 bitset_free(visited);
1414 del_waitq(best_starts);
1416 stat_ev_ctx_pop("heur4_color_chunk");
1420 * Main driver for mst safe coalescing algorithm.
1422 static int co_solve_heuristic_mst(copy_opt_t *co)
1424 unsigned n_regs = co->cls->n_regs;
1425 bitset_t *allocatable_regs = bitset_alloca(n_regs);
1429 co_mst_env_t mst_env;
1436 ir_nodemap_init(&mst_env.map, co->irg);
1437 obstack_init(&mst_env.obst);
1439 be_put_allocatable_regs(co->cenv->irg, co->cls, allocatable_regs);
1441 mst_env.n_regs = n_regs;
1442 mst_env.chunks = new_pqueue();
1444 mst_env.allocatable_regs = allocatable_regs;
1445 mst_env.ifg = co->cenv->ifg;
1446 INIT_LIST_HEAD(&mst_env.chunklist);
1447 mst_env.chunk_visited = 0;
1448 mst_env.single_cols = OALLOCN(&mst_env.obst, col_cost_t*, n_regs);
1450 for (i = 0; i < n_regs; ++i) {
1451 col_cost_t *vec = OALLOCN(&mst_env.obst, col_cost_t, n_regs);
1453 mst_env.single_cols[i] = vec;
1454 for (j = 0; j < n_regs; ++j) {
1456 vec[j].cost = REAL(0.0);
1460 vec[0].cost = REAL(1.0);
1463 DBG((dbg, LEVEL_1, "==== Coloring %+F, class %s ====\n", co->irg, co->cls->name));
1465 /* build affinity chunks */
1467 build_affinity_chunks(&mst_env);
1468 stat_ev_tim_pop("heur4_initial_chunk");
1470 /* color chunks as long as there are some */
1471 while (! pqueue_empty(mst_env.chunks)) {
1472 aff_chunk_t *chunk = (aff_chunk_t*)pqueue_pop_front(mst_env.chunks);
1474 color_aff_chunk(&mst_env, chunk);
1475 DB((dbg, LEVEL_4, "<<<====== Coloring chunk (%u) done\n", chunk->id));
1476 delete_aff_chunk(chunk);
1479 /* apply coloring */
1480 for (pn = 0; pn < ARR_LEN(mst_env.map.data); ++pn) {
1481 co_mst_irn_t *mirn = (co_mst_irn_t*)mst_env.map.data[pn];
1482 const arch_register_t *reg;
1485 irn = get_idx_irn(co->irg, pn);
1486 if (arch_irn_is_ignore(irn))
1489 /* skip nodes where color hasn't changed */
1490 if (mirn->init_col == mirn->col)
1493 reg = arch_register_for_index(co->cls, mirn->col);
1494 arch_set_irn_register(irn, reg);
1495 DB((dbg, LEVEL_1, "%+F set color from %d to %d\n", irn, mirn->init_col, mirn->col));
1498 /* free allocated memory */
1499 del_pqueue(mst_env.chunks);
1500 obstack_free(&mst_env.obst, NULL);
1501 ir_nodemap_destroy(&mst_env.map);
1503 stat_ev_tim_pop("heur4_total");
1508 static const lc_opt_table_entry_t options[] = {
1509 LC_OPT_ENT_INT ("limit", "limit recoloring", &recolor_limit),
1510 LC_OPT_ENT_DBL ("di", "dislike influence", &dislike_influence),
1514 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur4)
1515 void be_init_copyheur4(void)
1517 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
1518 lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
1519 lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
1520 lc_opt_entry_t *co_grp = lc_opt_get_grp(chordal_grp, "co");
1521 lc_opt_entry_t *heur4_grp = lc_opt_get_grp(co_grp, "heur4");
1523 static co_algo_info copyheur = {
1524 co_solve_heuristic_mst, 0
1527 lc_opt_add_table(heur4_grp, options);
1528 be_register_copyopt("heur4", ©heur);
1530 FIRM_DBG_REGISTER(dbg, "firm.be.co.heur4");