2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Simple copy minimization heuristics.
23 * @author Christian Wuerdig
26 * This is the C implementation of the mst algorithm
27 * originally written in Java by Sebastian Hack.
28 * (also known as "heur3" :)
29 * Performs simple copy minimization.
33 #define DISABLE_STATEV
40 #include "raw_bitset.h"
41 #include "irnodemap.h"
55 #include "becopyopt_t.h"
61 #define DBG_AFF_CHUNK(env, level, chunk) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_aff_chunk((env), (chunk)); } while (0)
62 #define DBG_COL_COST(env, level, cost) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_col_cost((env), (cost)); } while (0)
64 static firm_dbg_module_t *dbg = NULL;
68 #define DBG_AFF_CHUNK(env, level, chunk)
69 #define DBG_COL_COST(env, level, cost)
74 #define REAL(C) (C ## f)
76 static unsigned last_chunk_id = 0;
77 static int recolor_limit = 7;
78 static double dislike_influence = REAL(0.1);
80 typedef struct col_cost_t {
88 typedef struct aff_chunk_t {
89 const ir_node **n; /**< An ARR_F containing all nodes of the chunk. */
90 const ir_node **interfere; /**< An ARR_F containing all inference. */
91 int weight; /**< Weight of this chunk */
92 unsigned weight_consistent : 1; /**< Set if the weight is consistent. */
93 unsigned deleted : 1; /**< For debugging: Set if the was deleted. */
94 unsigned id; /**< An id of this chunk. */
97 col_cost_t color_affinity[1];
103 typedef struct aff_edge_t {
104 const ir_node *src; /**< Source node. */
105 const ir_node *tgt; /**< Target node. */
106 int weight; /**< The weight of this edge. */
109 /* main coalescing environment */
110 typedef struct co_mst_env_t {
111 int n_regs; /**< number of regs in class */
112 bitset_t *allocatable_regs; /**< set containing all global ignore registers */
113 ir_nodemap map; /**< phase object holding data for nodes */
115 pqueue_t *chunks; /**< priority queue for chunks */
116 list_head chunklist; /**< list holding all chunks */
117 be_ifg_t *ifg; /**< the interference graph */
118 copy_opt_t *co; /**< the copy opt object */
119 unsigned chunk_visited;
120 col_cost_t **single_cols;
123 /* stores coalescing related information for a node */
124 typedef struct co_mst_irn_t {
125 const ir_node *irn; /**< the irn this information belongs to */
126 aff_chunk_t *chunk; /**< the chunk this irn belongs to */
127 bitset_t *adm_colors; /**< set of admissible colors for this irn */
128 ir_node **int_neighs; /**< array of all interfering neighbours (cached for speed reasons) */
129 int n_neighs; /**< length of the interfering neighbours array. */
130 int int_aff_neigh; /**< number of interfering affinity neighbours */
131 int col; /**< color currently assigned */
132 int init_col; /**< the initial color */
133 int tmp_col; /**< a temporary assigned color */
134 unsigned fixed : 1; /**< the color is fixed */
135 struct list_head list; /**< Queue for coloring undo. */
136 real_t constr_factor;
140 * In case there is no phase information for irn, initialize it.
142 static co_mst_irn_t *co_mst_irn_init(co_mst_env_t *env, const ir_node *irn)
144 co_mst_irn_t *res = OALLOC(&env->obst, co_mst_irn_t);
146 const arch_register_req_t *req;
147 neighbours_iter_t nodes_it;
154 res->int_neighs = NULL;
155 res->int_aff_neigh = 0;
156 res->col = arch_get_irn_register(irn)->index;
157 res->init_col = res->col;
158 INIT_LIST_HEAD(&res->list);
160 DB((dbg, LEVEL_4, "Creating phase info for %+F\n", irn));
162 /* set admissible registers */
163 res->adm_colors = bitset_obstack_alloc(&env->obst, env->n_regs);
165 /* Exclude colors not assignable to the irn */
166 req = arch_get_irn_register_req(irn);
167 if (arch_register_req_is(req, limited)) {
168 rbitset_copy_to_bitset(req->limited, res->adm_colors);
169 /* exclude global ignore registers as well */
170 bitset_and(res->adm_colors, env->allocatable_regs);
172 bitset_copy(res->adm_colors, env->allocatable_regs);
175 /* compute the constraint factor */
176 res->constr_factor = (real_t) (1 + env->n_regs - bitset_popcount(res->adm_colors)) / env->n_regs;
178 /* set the number of interfering affinity neighbours to -1, they are calculated later */
179 res->int_aff_neigh = -1;
181 /* build list of interfering neighbours */
183 be_ifg_foreach_neighbour(env->ifg, &nodes_it, irn, neigh) {
184 if (!arch_irn_is_ignore(neigh)) {
185 obstack_ptr_grow(&env->obst, neigh);
189 res->int_neighs = (ir_node**)obstack_finish(&env->obst);
194 static co_mst_irn_t *get_co_mst_irn(co_mst_env_t *env, const ir_node *node)
196 co_mst_irn_t *res = ir_nodemap_get(co_mst_irn_t, &env->map, node);
198 res = co_mst_irn_init(env, node);
199 ir_nodemap_insert(&env->map, node, res);
204 typedef int decide_func_t(const co_mst_irn_t *node, int col);
209 * Write a chunk to stderr for debugging.
211 static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c)
215 if (c->weight_consistent)
216 ir_fprintf(stderr, " $%d ", c->weight);
217 ir_fprintf(stderr, "{");
218 for (i = 0, l = ARR_LEN(c->n); i < l; ++i) {
219 const ir_node *n = c->n[i];
220 ir_fprintf(stderr, " %+F,", n);
222 ir_fprintf(stderr, "}");
226 * Dump all admissible colors to stderr.
228 static void dbg_admissible_colors(const co_mst_env_t *env, const co_mst_irn_t *node)
232 if (bitset_popcount(node->adm_colors) < 1)
233 fprintf(stderr, "no admissible colors?!?");
235 bitset_foreach(node->adm_colors, idx) {
236 ir_fprintf(stderr, " %zu", idx);
242 * Dump color-cost pairs to stderr.
244 static void dbg_col_cost(const co_mst_env_t *env, const col_cost_t *cost)
247 for (i = 0; i < env->n_regs; ++i)
248 fprintf(stderr, " (%d, %.4f)", cost[i].col, cost[i].cost);
251 #endif /* DEBUG_libfirm */
253 static inline int get_mst_irn_col(const co_mst_irn_t *node)
255 return node->tmp_col >= 0 ? node->tmp_col : node->col;
259 * @return 1 if node @p node has color @p col, 0 otherwise.
261 static int decider_has_color(const co_mst_irn_t *node, int col)
263 return get_mst_irn_col(node) == col;
267 * @return 1 if node @p node has not color @p col, 0 otherwise.
269 static int decider_hasnot_color(const co_mst_irn_t *node, int col)
271 return get_mst_irn_col(node) != col;
275 * Always returns true.
277 static int decider_always_yes(const co_mst_irn_t *node, int col)
284 /** compares two affinity edges by its weight */
285 static int cmp_aff_edge(const void *a, const void *b)
287 const aff_edge_t *e1 = (const aff_edge_t*)a;
288 const aff_edge_t *e2 = (const aff_edge_t*)b;
290 if (e2->weight == e1->weight) {
291 if (e2->src->node_idx == e1->src->node_idx)
292 return QSORT_CMP(e2->tgt->node_idx, e1->tgt->node_idx);
294 return QSORT_CMP(e2->src->node_idx, e1->src->node_idx);
296 /* sort in descending order */
297 return QSORT_CMP(e2->weight, e1->weight);
300 /** compares to color-cost pairs */
301 static __attribute__((unused)) int cmp_col_cost_lt(const void *a, const void *b)
303 const col_cost_t *c1 = (const col_cost_t*)a;
304 const col_cost_t *c2 = (const col_cost_t*)b;
305 real_t diff = c1->cost - c2->cost;
312 return QSORT_CMP(c1->col, c2->col);
315 static int cmp_col_cost_gt(const void *a, const void *b)
317 const col_cost_t *c1 = (const col_cost_t*)a;
318 const col_cost_t *c2 = (const col_cost_t*)b;
319 real_t diff = c2->cost - c1->cost;
326 return QSORT_CMP(c1->col, c2->col);
330 * Creates a new affinity chunk
332 static inline aff_chunk_t *new_aff_chunk(co_mst_env_t *env)
334 aff_chunk_t *c = XMALLOCF(aff_chunk_t, color_affinity, env->n_regs);
335 c->n = NEW_ARR_F(const ir_node *, 0);
336 c->interfere = NEW_ARR_F(const ir_node *, 0);
338 c->weight_consistent = 0;
340 c->id = ++last_chunk_id;
342 list_add(&c->list, &env->chunklist);
347 * Frees all memory allocated by an affinity chunk.
349 static inline void delete_aff_chunk(aff_chunk_t *c)
352 DEL_ARR_F(c->interfere);
359 * binary search of sorted nodes.
361 * @return the position where n is found in the array arr or ~pos
362 * if the nodes is not here.
364 static inline int nodes_bsearch(const ir_node **arr, const ir_node *n)
366 int hi = ARR_LEN(arr);
370 int md = lo + ((hi - lo) >> 1);
383 /** Check if a node n can be found inside arr. */
384 static int node_contains(const ir_node **arr, const ir_node *n)
386 int i = nodes_bsearch(arr, n);
391 * Insert a node into the sorted nodes list.
393 * @return 1 if the node was inserted, 0 else
395 static int nodes_insert(const ir_node ***arr, const ir_node *irn)
397 int idx = nodes_bsearch(*arr, irn);
400 int i, n = ARR_LEN(*arr);
403 ARR_APP1(const ir_node *, *arr, irn);
408 for (i = n - 1; i >= idx; --i)
417 * Adds a node to an affinity chunk
419 static inline void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node)
423 if (! nodes_insert(&c->n, node->irn))
426 c->weight_consistent = 0;
429 for (i = node->n_neighs - 1; i >= 0; --i) {
430 ir_node *neigh = node->int_neighs[i];
431 nodes_insert(&c->interfere, neigh);
436 * Check if affinity chunk @p chunk interferes with node @p irn.
438 static inline int aff_chunk_interferes(const aff_chunk_t *chunk, const ir_node *irn)
440 return node_contains(chunk->interfere, irn);
444 * Check if there are interference edges from c1 to c2.
446 * @param c2 Another chunk
447 * @return 1 if there are interferences between nodes of c1 and c2, 0 otherwise.
449 static inline int aff_chunks_interfere(const aff_chunk_t *c1, const aff_chunk_t *c2)
456 /* check if there is a node in c2 having an interfering neighbor in c1 */
457 for (i = ARR_LEN(c2->n) - 1; i >= 0; --i) {
458 const ir_node *irn = c2->n[i];
460 if (node_contains(c1->interfere, irn))
467 * Returns the affinity chunk of @p irn or creates a new
468 * one with @p irn as element if there is none assigned.
470 static inline aff_chunk_t *get_aff_chunk(co_mst_env_t *env, const ir_node *irn)
472 co_mst_irn_t *node = get_co_mst_irn(env, irn);
477 * Let chunk(src) absorb the nodes of chunk(tgt) (only possible when there
478 * are no interference edges from chunk(src) to chunk(tgt)).
479 * @return 1 if successful, 0 if not possible
481 static int aff_chunk_absorb(co_mst_env_t *env, const ir_node *src, const ir_node *tgt)
483 aff_chunk_t *c1 = get_aff_chunk(env, src);
484 aff_chunk_t *c2 = get_aff_chunk(env, tgt);
487 DB((dbg, LEVEL_4, "Attempt to let c1 (id %u): ", c1 ? c1->id : 0));
489 DBG_AFF_CHUNK(env, LEVEL_4, c1);
491 DB((dbg, LEVEL_4, "{%+F}", src));
493 DB((dbg, LEVEL_4, "\n\tabsorb c2 (id %u): ", c2 ? c2->id : 0));
495 DBG_AFF_CHUNK(env, LEVEL_4, c2);
497 DB((dbg, LEVEL_4, "{%+F}", tgt));
499 DB((dbg, LEVEL_4, "\n"));
504 /* no chunk exists */
505 co_mst_irn_t *mirn = get_co_mst_irn(env, src);
508 for (i = mirn->n_neighs - 1; i >= 0; --i) {
509 if (mirn->int_neighs[i] == tgt)
513 /* create one containing both nodes */
514 c1 = new_aff_chunk(env);
515 aff_chunk_add_node(c1, get_co_mst_irn(env, src));
516 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
520 /* c2 already exists */
521 if (! aff_chunk_interferes(c2, src)) {
522 aff_chunk_add_node(c2, get_co_mst_irn(env, src));
526 } else if (c2 == NULL) {
527 /* c1 already exists */
528 if (! aff_chunk_interferes(c1, tgt)) {
529 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
532 } else if (c1 != c2 && ! aff_chunks_interfere(c1, c2)) {
535 for (idx = 0, len = ARR_LEN(c2->n); idx < len; ++idx)
536 aff_chunk_add_node(c1, get_co_mst_irn(env, c2->n[idx]));
538 for (idx = 0, len = ARR_LEN(c2->interfere); idx < len; ++idx) {
539 const ir_node *irn = c2->interfere[idx];
540 nodes_insert(&c1->interfere, irn);
543 c1->weight_consistent = 0;
545 delete_aff_chunk(c2);
548 DB((dbg, LEVEL_4, " ... c1 interferes with c2, skipped\n"));
552 DB((dbg, LEVEL_4, " ... absorbed\n"));
557 * Assures that the weight of the given chunk is consistent.
559 static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c)
561 if (! c->weight_consistent) {
565 for (i = 0; i < env->n_regs; ++i) {
566 c->color_affinity[i].col = i;
567 c->color_affinity[i].cost = REAL(0.0);
570 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
571 const ir_node *n = c->n[idx];
572 const affinity_node_t *an = get_affinity_info(env->co, n);
573 co_mst_irn_t *node = get_co_mst_irn(env, n);
576 if (node->constr_factor > REAL(0.0)) {
577 bitset_foreach (node->adm_colors, col)
578 c->color_affinity[col].cost += node->constr_factor;
582 co_gs_foreach_neighb(an, neigh) {
583 const ir_node *m = neigh->irn;
585 if (arch_irn_is_ignore(m))
588 w += node_contains(c->n, m) ? neigh->costs : 0;
593 for (i = 0; i < env->n_regs; ++i)
594 c->color_affinity[i].cost *= (REAL(1.0) / ARR_LEN(c->n));
597 // c->weight = bitset_popcount(c->nodes);
598 c->weight_consistent = 1;
603 * Count the number of interfering affinity neighbours
605 static int count_interfering_aff_neighs(co_mst_env_t *env, const affinity_node_t *an)
607 const ir_node *irn = an->irn;
608 const co_mst_irn_t *node = get_co_mst_irn(env, irn);
611 co_gs_foreach_neighb(an, neigh) {
612 const ir_node *n = neigh->irn;
615 if (arch_irn_is_ignore(n))
618 /* check if the affinity neighbour interfere */
619 for (i = 0; i < node->n_neighs; ++i) {
620 if (node->int_neighs[i] == n) {
631 * Build chunks of nodes connected by affinity edges.
632 * We start at the heaviest affinity edge.
633 * The chunks of the two edge-defining nodes will be
634 * merged if there are no interference edges from one
635 * chunk to the other.
637 static void build_affinity_chunks(co_mst_env_t *env)
639 nodes_iter_t nodes_it;
640 aff_edge_t *edges = NEW_ARR_F(aff_edge_t, 0);
644 /* at first we create the affinity edge objects */
645 be_ifg_foreach_node(env->ifg, &nodes_it, n) {
646 int n_idx = get_irn_idx(n);
650 if (arch_irn_is_ignore(n))
653 n1 = get_co_mst_irn(env, n);
654 an = get_affinity_info(env->co, n);
657 if (n1->int_aff_neigh < 0)
658 n1->int_aff_neigh = count_interfering_aff_neighs(env, an);
660 /* build the affinity edges */
661 co_gs_foreach_neighb(an, neigh) {
662 const ir_node *m = neigh->irn;
663 int m_idx = get_irn_idx(m);
665 /* record the edge in only one direction */
670 /* skip ignore nodes */
671 if (arch_irn_is_ignore(m))
677 n2 = get_co_mst_irn(env, m);
678 if (n2->int_aff_neigh < 0) {
679 affinity_node_t *am = get_affinity_info(env->co, m);
680 n2->int_aff_neigh = count_interfering_aff_neighs(env, am);
683 * these weights are pure hackery ;-).
684 * It's not chriswue's fault but mine.
686 edge.weight = neigh->costs;
687 ARR_APP1(aff_edge_t, edges, edge);
693 /* now: sort edges and build the affinity chunks */
694 len = ARR_LEN(edges);
695 qsort(edges, len, sizeof(edges[0]), cmp_aff_edge);
696 for (i = 0; i < len; ++i) {
697 DBG((dbg, LEVEL_1, "edge (%u,%u) %f\n", edges[i].src->node_idx, edges[i].tgt->node_idx, edges[i].weight));
699 (void)aff_chunk_absorb(env, edges[i].src, edges[i].tgt);
702 /* now insert all chunks into a priority queue */
703 list_for_each_entry(aff_chunk_t, curr_chunk, &env->chunklist, list) {
704 aff_chunk_assure_weight(env, curr_chunk);
706 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
707 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
708 DBG((dbg, LEVEL_1, "\n"));
710 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
713 for (pn = 0; pn < ARR_LEN(env->map.data); ++pn) {
714 co_mst_irn_t *mirn = (co_mst_irn_t*)env->map.data[pn];
717 if (mirn->chunk != NULL)
720 /* no chunk is allocated so far, do it now */
721 aff_chunk_t *curr_chunk = new_aff_chunk(env);
722 aff_chunk_add_node(curr_chunk, mirn);
724 aff_chunk_assure_weight(env, curr_chunk);
726 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
727 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
728 DBG((dbg, LEVEL_1, "\n"));
730 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
736 static __attribute__((unused)) void chunk_order_nodes(co_mst_env_t *env, aff_chunk_t *chunk)
738 pqueue_t *grow = new_pqueue();
739 ir_node const *max_node = NULL;
743 for (i = ARR_LEN(chunk->n); i != 0;) {
744 const ir_node *irn = chunk->n[--i];
745 affinity_node_t *an = get_affinity_info(env->co, irn);
748 if (arch_irn_is_ignore(irn))
752 co_gs_foreach_neighb(an, neigh)
755 if (w > max_weight) {
763 bitset_t *visited = bitset_malloc(get_irg_last_idx(env->co->irg));
765 for (i = ARR_LEN(chunk->n); i != 0;)
766 bitset_set(visited, get_irn_idx(chunk->n[--i]));
768 pqueue_put(grow, (void *) max_node, max_weight);
769 bitset_clear(visited, get_irn_idx(max_node));
771 while (!pqueue_empty(grow)) {
772 ir_node *irn = (ir_node*)pqueue_pop_front(grow);
773 affinity_node_t *an = get_affinity_info(env->co, irn);
775 if (arch_irn_is_ignore(irn))
778 assert(i <= ARR_LEN(chunk->n));
783 /* build the affinity edges */
784 co_gs_foreach_neighb(an, neigh) {
785 co_mst_irn_t *node = get_co_mst_irn(env, neigh->irn);
787 if (bitset_is_set(visited, get_irn_idx(node->irn))) {
788 pqueue_put(grow, (void *) neigh->irn, neigh->costs);
789 bitset_clear(visited, get_irn_idx(node->irn));
795 bitset_free(visited);
800 * Greedy collect affinity neighbours into thew new chunk @p chunk starting at node @p node.
802 static void expand_chunk_from(co_mst_env_t *env, co_mst_irn_t *node, bitset_t *visited,
803 aff_chunk_t *chunk, aff_chunk_t *orig_chunk, decide_func_t *decider, int col)
805 waitq *nodes = new_waitq();
807 DBG((dbg, LEVEL_1, "\n\tExpanding new chunk (#%u) from %+F, color %d:", chunk->id, node->irn, col));
809 /* init queue and chunk */
810 waitq_put(nodes, node);
811 bitset_set(visited, get_irn_idx(node->irn));
812 aff_chunk_add_node(chunk, node);
813 DB((dbg, LEVEL_1, " %+F", node->irn));
815 /* as long as there are nodes in the queue */
816 while (! waitq_empty(nodes)) {
817 co_mst_irn_t *n = (co_mst_irn_t*)waitq_get(nodes);
818 affinity_node_t *an = get_affinity_info(env->co, n->irn);
820 /* check all affinity neighbors */
822 co_gs_foreach_neighb(an, neigh) {
823 const ir_node *m = neigh->irn;
824 int m_idx = get_irn_idx(m);
827 if (arch_irn_is_ignore(m))
830 n2 = get_co_mst_irn(env, m);
832 if (! bitset_is_set(visited, m_idx) &&
835 ! aff_chunk_interferes(chunk, m) &&
836 node_contains(orig_chunk->n, m))
839 following conditions are met:
840 - neighbour is not visited
841 - neighbour likes the color
842 - neighbour has not yet a fixed color
843 - the new chunk doesn't interfere with the neighbour
844 - neighbour belongs or belonged once to the original chunk
846 bitset_set(visited, m_idx);
847 aff_chunk_add_node(chunk, n2);
848 DB((dbg, LEVEL_1, " %+F", n2->irn));
849 /* enqueue for further search */
850 waitq_put(nodes, n2);
856 DB((dbg, LEVEL_1, "\n"));
862 * Fragment the given chunk into chunks having given color and not having given color.
864 static aff_chunk_t *fragment_chunk(co_mst_env_t *env, int col, aff_chunk_t *c, waitq *tmp)
866 bitset_t *visited = bitset_malloc(get_irg_last_idx(env->co->irg));
868 aff_chunk_t *best = NULL;
870 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
873 aff_chunk_t *tmp_chunk;
874 decide_func_t *decider;
878 if (bitset_is_set(visited, get_irn_idx(irn)))
881 node = get_co_mst_irn(env, irn);
883 if (get_mst_irn_col(node) == col) {
884 decider = decider_has_color;
886 DBG((dbg, LEVEL_4, "\tcolor %d wanted\n", col));
889 decider = decider_hasnot_color;
891 DBG((dbg, LEVEL_4, "\tcolor %d forbidden\n", col));
894 /* create a new chunk starting at current node */
895 tmp_chunk = new_aff_chunk(env);
896 waitq_put(tmp, tmp_chunk);
897 expand_chunk_from(env, node, visited, tmp_chunk, c, decider, col);
898 assert(ARR_LEN(tmp_chunk->n) > 0 && "No nodes added to chunk");
900 /* remember the local best */
901 aff_chunk_assure_weight(env, tmp_chunk);
902 if (check_for_best && (! best || best->weight < tmp_chunk->weight))
906 assert(best && "No chunk found?");
907 bitset_free(visited);
912 * Resets the temporary fixed color of all nodes within wait queue @p nodes.
913 * ATTENTION: the queue is empty after calling this function!
915 static inline void reject_coloring(struct list_head *nodes)
917 DB((dbg, LEVEL_4, "\treject coloring for"));
918 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
919 DB((dbg, LEVEL_4, " %+F", n->irn));
920 assert(n->tmp_col >= 0);
922 list_del_init(&n->list);
924 DB((dbg, LEVEL_4, "\n"));
927 static inline void materialize_coloring(struct list_head *nodes)
929 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
930 assert(n->tmp_col >= 0);
933 list_del_init(&n->list);
937 static inline void set_temp_color(co_mst_irn_t *node, int col, struct list_head *changed)
940 assert(!node->fixed);
941 assert(node->tmp_col < 0);
942 assert(node->list.next == &node->list && node->list.prev == &node->list);
943 assert(bitset_is_set(node->adm_colors, col));
945 list_add_tail(&node->list, changed);
949 static inline int is_loose(co_mst_irn_t *node)
951 return !node->fixed && node->tmp_col < 0;
955 * Determines the costs for each color if it would be assigned to node @p node.
957 static void determine_color_costs(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs)
959 int *neigh_cols = ALLOCAN(int, env->n_regs);
964 for (i = 0; i < env->n_regs; ++i) {
967 costs[i].cost = bitset_is_set(node->adm_colors, i) ? node->constr_factor : REAL(0.0);
970 for (i = 0; i < node->n_neighs; ++i) {
971 co_mst_irn_t *n = get_co_mst_irn(env, node->int_neighs[i]);
972 int col = get_mst_irn_col(n);
977 costs[col].cost = REAL(0.0);
981 coeff = REAL(1.0) / n_loose;
982 for (i = 0; i < env->n_regs; ++i)
983 costs[i].cost *= REAL(1.0) - coeff * neigh_cols[i];
987 /* need forward declaration due to recursive call */
988 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed_ones, int depth, int *max_depth, int *trip);
991 * Tries to change node to a color but @p explude_col.
992 * @return 1 if succeeded, 0 otherwise.
994 static int change_node_color_excluded(co_mst_env_t *env, co_mst_irn_t *node, int exclude_col, struct list_head *changed, int depth, int *max_depth, int *trip)
996 int col = get_mst_irn_col(node);
999 /* neighbours has already a different color -> good, temporary fix it */
1000 if (col != exclude_col) {
1002 set_temp_color(node, col, changed);
1006 /* The node has the color it should not have _and_ has not been visited yet. */
1007 if (is_loose(node)) {
1008 col_cost_t *costs = ALLOCAN(col_cost_t, env->n_regs);
1010 /* Get the costs for giving the node a specific color. */
1011 determine_color_costs(env, node, costs);
1013 /* Since the node must not have the not_col, set the costs for that color to "infinity" */
1014 costs[exclude_col].cost = REAL(0.0);
1016 /* sort the colors according costs, cheapest first. */
1017 qsort(costs, env->n_regs, sizeof(costs[0]), cmp_col_cost_gt);
1019 /* Try recoloring the node using the color list. */
1020 res = recolor_nodes(env, node, costs, changed, depth + 1, max_depth, trip);
1027 * Tries to bring node @p node to cheapest color and color all interfering neighbours with other colors.
1028 * ATTENTION: Expect @p costs already sorted by increasing costs.
1029 * @return 1 if coloring could be applied, 0 otherwise.
1031 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed, int depth, int *max_depth, int *trip)
1034 struct list_head local_changed;
1037 if (depth > *max_depth)
1040 DBG((dbg, LEVEL_4, "\tRecoloring %+F with color-costs", node->irn));
1041 DBG_COL_COST(env, LEVEL_4, costs);
1042 DB((dbg, LEVEL_4, "\n"));
1044 if (depth >= recolor_limit) {
1045 DBG((dbg, LEVEL_4, "\tHit recolor limit\n"));
1049 for (i = 0; i < env->n_regs; ++i) {
1050 int tgt_col = costs[i].col;
1054 /* If the costs for that color (and all successive) are infinite, bail out we won't make it anyway. */
1055 if (costs[i].cost == REAL(0.0)) {
1056 DBG((dbg, LEVEL_4, "\tAll further colors forbidden\n"));
1060 /* Set the new color of the node and mark the node as temporarily fixed. */
1061 assert(node->tmp_col < 0 && "Node must not have been temporary fixed.");
1062 INIT_LIST_HEAD(&local_changed);
1063 set_temp_color(node, tgt_col, &local_changed);
1064 DBG((dbg, LEVEL_4, "\tTemporary setting %+F to color %d\n", node->irn, tgt_col));
1066 /* try to color all interfering neighbours with current color forbidden */
1067 for (j = 0; j < node->n_neighs; ++j) {
1071 neigh = node->int_neighs[j];
1073 if (arch_irn_is_ignore(neigh))
1076 nn = get_co_mst_irn(env, neigh);
1077 DB((dbg, LEVEL_4, "\tHandling neighbour %+F, at position %d (fixed: %d, tmp_col: %d, col: %d)\n",
1078 neigh, j, nn->fixed, nn->tmp_col, nn->col));
1081 Try to change the color of the neighbor and record all nodes which
1082 get changed in the tmp list. Add this list to the "changed" list for
1083 that color. If we did not succeed to change the color of the neighbor,
1084 we bail out and try the next color.
1086 if (get_mst_irn_col(nn) == tgt_col) {
1087 /* try to color neighbour with tgt_col forbidden */
1088 neigh_ok = change_node_color_excluded(env, nn, tgt_col, &local_changed, depth + 1, max_depth, trip);
1096 We managed to assign the target color to all neighbors, so from the perspective
1097 of the current node, every thing was ok and we can return safely.
1100 /* append the local_changed ones to global ones */
1101 list_splice(&local_changed, changed);
1105 /* coloring of neighbours failed, so we try next color */
1106 reject_coloring(&local_changed);
1110 DBG((dbg, LEVEL_4, "\tAll colors failed\n"));
1115 * Tries to bring node @p node and all its neighbours to color @p tgt_col.
1116 * @return 1 if color @p col could be applied, 0 otherwise
1118 static int change_node_color(co_mst_env_t *env, co_mst_irn_t *node, int tgt_col, struct list_head *changed)
1120 int col = get_mst_irn_col(node);
1122 /* if node already has the target color -> good, temporary fix it */
1123 if (col == tgt_col) {
1124 DBG((dbg, LEVEL_4, "\t\tCNC: %+F has already color %d, fix temporary\n", node->irn, tgt_col));
1126 set_temp_color(node, tgt_col, changed);
1131 Node has not yet a fixed color and target color is admissible
1132 -> try to recolor node and its affinity neighbours
1134 if (is_loose(node) && bitset_is_set(node->adm_colors, tgt_col)) {
1135 col_cost_t *costs = env->single_cols[tgt_col];
1136 int res, max_depth, trip;
1141 DBG((dbg, LEVEL_4, "\t\tCNC: Attempt to recolor %+F ===>>\n", node->irn));
1142 res = recolor_nodes(env, node, costs, changed, 0, &max_depth, &trip);
1143 DBG((dbg, LEVEL_4, "\t\tCNC: <<=== Recoloring of %+F %s\n", node->irn, res ? "succeeded" : "failed"));
1144 stat_ev_int("heur4_recolor_depth_max", max_depth);
1145 stat_ev_int("heur4_recolor_trip", trip);
1151 #ifdef DEBUG_libfirm
1152 if (firm_dbg_get_mask(dbg) & LEVEL_4) {
1153 if (!is_loose(node))
1154 DB((dbg, LEVEL_4, "\t\tCNC: %+F has already fixed color %d\n", node->irn, col));
1156 DB((dbg, LEVEL_4, "\t\tCNC: color %d not admissible for %+F (", tgt_col, node->irn));
1157 dbg_admissible_colors(env, node);
1158 DB((dbg, LEVEL_4, ")\n"));
1167 * Tries to color an affinity chunk (or at least a part of it).
1168 * Inserts uncolored parts of the chunk as a new chunk into the priority queue.
1170 static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
1172 aff_chunk_t *best_chunk = NULL;
1173 int n_nodes = ARR_LEN(c->n);
1174 int best_color = -1;
1175 int n_int_chunks = 0;
1176 waitq *tmp_chunks = new_waitq();
1177 waitq *best_starts = NULL;
1178 col_cost_t *order = ALLOCANZ(col_cost_t, env->n_regs);
1185 struct list_head changed;
1187 DB((dbg, LEVEL_2, "fragmentizing chunk #%u", c->id));
1188 DBG_AFF_CHUNK(env, LEVEL_2, c);
1189 DB((dbg, LEVEL_2, "\n"));
1191 stat_ev_ctx_push_fmt("heur4_color_chunk", "%u", c->id);
1193 ++env->chunk_visited;
1195 /* compute color preference */
1196 for (pos = 0, len = ARR_LEN(c->interfere); pos < len; ++pos) {
1197 const ir_node *n = c->interfere[pos];
1198 co_mst_irn_t *node = get_co_mst_irn(env, n);
1199 aff_chunk_t *chunk = node->chunk;
1201 if (is_loose(node) && chunk && chunk->visited < env->chunk_visited) {
1202 assert(!chunk->deleted);
1203 chunk->visited = env->chunk_visited;
1206 aff_chunk_assure_weight(env, chunk);
1207 for (i = 0; i < env->n_regs; ++i)
1208 order[i].cost += chunk->color_affinity[i].cost;
1212 for (i = 0; i < env->n_regs; ++i) {
1213 real_t dislike = n_int_chunks > 0 ? REAL(1.0) - order[i].cost / n_int_chunks : REAL(0.0);
1215 order[i].cost = (REAL(1.0) - dislike_influence) * c->color_affinity[i].cost + dislike_influence * dislike;
1218 qsort(order, env->n_regs, sizeof(order[0]), cmp_col_cost_gt);
1220 DBG_COL_COST(env, LEVEL_2, order);
1221 DB((dbg, LEVEL_2, "\n"));
1223 /* check which color is the "best" for the given chunk.
1224 * if we found a color which was ok for all nodes, we take it
1225 * and do not look further. (see did_all flag usage below.)
1226 * If we have many colors which fit all nodes it is hard to decide
1227 * which one to take anyway.
1228 * TODO Sebastian: Perhaps we should at all nodes and figure out
1229 * a suitable color using costs as done above (determine_color_costs).
1231 for (i = 0; i < env->n_regs; ++i) {
1232 int col = order[i].col;
1234 aff_chunk_t *local_best;
1237 /* skip ignore colors */
1238 if (!bitset_is_set(env->allocatable_regs, col))
1241 DB((dbg, LEVEL_2, "\ttrying color %d\n", col));
1244 good_starts = new_waitq();
1246 /* try to bring all nodes of given chunk to the current color. */
1247 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1248 const ir_node *irn = c->n[idx];
1249 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1252 assert(! node->fixed && "Node must not have a fixed color.");
1253 DB((dbg, LEVEL_4, "\t\tBringing %+F from color %d to color %d ...\n", irn, node->col, col));
1256 The order of the colored nodes is important, so we record the successfully
1257 colored ones in the order they appeared.
1259 INIT_LIST_HEAD(&changed);
1261 good = change_node_color(env, node, col, &changed);
1262 stat_ev_tim_pop("heur4_recolor");
1264 waitq_put(good_starts, node);
1265 materialize_coloring(&changed);
1270 reject_coloring(&changed);
1272 n_succeeded += good;
1273 DB((dbg, LEVEL_4, "\t\t... %+F attempt from %d to %d %s\n", irn, node->col, col, good ? "succeeded" : "failed"));
1276 /* unfix all nodes */
1277 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1278 co_mst_irn_t *node = get_co_mst_irn(env, c->n[idx]);
1282 /* try next color when failed */
1283 if (n_succeeded == 0) {
1284 del_waitq(good_starts);
1288 /* fragment the chunk according to the coloring */
1289 local_best = fragment_chunk(env, col, c, tmp_chunks);
1291 /* search the best of the good list
1292 and make it the new best if it is better than the current */
1294 aff_chunk_assure_weight(env, local_best);
1296 DB((dbg, LEVEL_3, "\t\tlocal best chunk (id %u) for color %d: ", local_best->id, col));
1297 DBG_AFF_CHUNK(env, LEVEL_3, local_best);
1299 if (! best_chunk || best_chunk->weight < local_best->weight) {
1300 best_chunk = local_best;
1303 del_waitq(best_starts);
1304 best_starts = good_starts;
1305 DB((dbg, LEVEL_3, "\n\t\t... setting global best chunk (id %u), color %d\n", best_chunk->id, best_color));
1307 DB((dbg, LEVEL_3, "\n\t\t... omitting, global best is better\n"));
1308 del_waitq(good_starts);
1312 del_waitq(good_starts);
1315 /* if all nodes were recolored, bail out */
1316 if (n_succeeded == n_nodes)
1320 stat_ev_int("heur4_colors_tried", i);
1322 /* free all intermediate created chunks except best one */
1323 while (! waitq_empty(tmp_chunks)) {
1324 aff_chunk_t *tmp = (aff_chunk_t*)waitq_get(tmp_chunks);
1325 if (tmp != best_chunk)
1326 delete_aff_chunk(tmp);
1328 del_waitq(tmp_chunks);
1330 /* return if coloring failed */
1333 del_waitq(best_starts);
1337 DB((dbg, LEVEL_2, "\tbest chunk #%u ", best_chunk->id));
1338 DBG_AFF_CHUNK(env, LEVEL_2, best_chunk);
1339 DB((dbg, LEVEL_2, "using color %d\n", best_color));
1341 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1342 const ir_node *irn = best_chunk->n[idx];
1343 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1346 /* bring the node to the color. */
1347 DB((dbg, LEVEL_4, "\tManifesting color %d for %+F, chunk #%u\n", best_color, node->irn, best_chunk->id));
1348 INIT_LIST_HEAD(&changed);
1350 res = change_node_color(env, node, best_color, &changed);
1351 stat_ev_tim_pop("heur4_recolor");
1353 materialize_coloring(&changed);
1356 assert(list_empty(&changed));
1359 /* remove the nodes in best chunk from original chunk */
1360 len = ARR_LEN(best_chunk->n);
1361 for (idx = 0; idx < len; ++idx) {
1362 const ir_node *irn = best_chunk->n[idx];
1363 int pos = nodes_bsearch(c->n, irn);
1368 len = ARR_LEN(c->n);
1369 for (idx = nidx = 0; idx < len; ++idx) {
1370 const ir_node *irn = c->n[idx];
1376 ARR_SHRINKLEN(c->n, nidx);
1379 /* we have to get the nodes back into the original chunk because they are scattered over temporary chunks */
1380 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1381 const ir_node *n = c->n[idx];
1382 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1386 /* fragment the remaining chunk */
1387 visited = bitset_malloc(get_irg_last_idx(env->co->irg));
1388 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx)
1389 bitset_set(visited, get_irn_idx(best_chunk->n[idx]));
1391 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1392 const ir_node *irn = c->n[idx];
1393 if (! bitset_is_set(visited, get_irn_idx(irn))) {
1394 aff_chunk_t *new_chunk = new_aff_chunk(env);
1395 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1397 expand_chunk_from(env, node, visited, new_chunk, c, decider_always_yes, 0);
1398 aff_chunk_assure_weight(env, new_chunk);
1399 pqueue_put(env->chunks, new_chunk, new_chunk->weight);
1403 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1404 const ir_node *n = best_chunk->n[idx];
1405 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1409 /* clear obsolete chunks and free some memory */
1410 delete_aff_chunk(best_chunk);
1411 bitset_free(visited);
1413 del_waitq(best_starts);
1415 stat_ev_ctx_pop("heur4_color_chunk");
1419 * Main driver for mst safe coalescing algorithm.
1421 static int co_solve_heuristic_mst(copy_opt_t *co)
1423 unsigned n_regs = co->cls->n_regs;
1424 bitset_t *allocatable_regs = bitset_alloca(n_regs);
1428 co_mst_env_t mst_env;
1435 ir_nodemap_init(&mst_env.map, co->irg);
1436 obstack_init(&mst_env.obst);
1438 be_put_allocatable_regs(co->cenv->irg, co->cls, allocatable_regs);
1440 mst_env.n_regs = n_regs;
1441 mst_env.chunks = new_pqueue();
1443 mst_env.allocatable_regs = allocatable_regs;
1444 mst_env.ifg = co->cenv->ifg;
1445 INIT_LIST_HEAD(&mst_env.chunklist);
1446 mst_env.chunk_visited = 0;
1447 mst_env.single_cols = OALLOCN(&mst_env.obst, col_cost_t*, n_regs);
1449 for (i = 0; i < n_regs; ++i) {
1450 col_cost_t *vec = OALLOCN(&mst_env.obst, col_cost_t, n_regs);
1452 mst_env.single_cols[i] = vec;
1453 for (j = 0; j < n_regs; ++j) {
1455 vec[j].cost = REAL(0.0);
1459 vec[0].cost = REAL(1.0);
1462 DBG((dbg, LEVEL_1, "==== Coloring %+F, class %s ====\n", co->irg, co->cls->name));
1464 /* build affinity chunks */
1466 build_affinity_chunks(&mst_env);
1467 stat_ev_tim_pop("heur4_initial_chunk");
1469 /* color chunks as long as there are some */
1470 while (! pqueue_empty(mst_env.chunks)) {
1471 aff_chunk_t *chunk = (aff_chunk_t*)pqueue_pop_front(mst_env.chunks);
1473 color_aff_chunk(&mst_env, chunk);
1474 DB((dbg, LEVEL_4, "<<<====== Coloring chunk (%u) done\n", chunk->id));
1475 delete_aff_chunk(chunk);
1478 /* apply coloring */
1479 for (pn = 0; pn < ARR_LEN(mst_env.map.data); ++pn) {
1480 co_mst_irn_t *mirn = (co_mst_irn_t*)mst_env.map.data[pn];
1481 const arch_register_t *reg;
1484 irn = get_idx_irn(co->irg, pn);
1485 if (arch_irn_is_ignore(irn))
1488 /* skip nodes where color hasn't changed */
1489 if (mirn->init_col == mirn->col)
1492 reg = arch_register_for_index(co->cls, mirn->col);
1493 arch_set_irn_register(irn, reg);
1494 DB((dbg, LEVEL_1, "%+F set color from %d to %d\n", irn, mirn->init_col, mirn->col));
1497 /* free allocated memory */
1498 del_pqueue(mst_env.chunks);
1499 obstack_free(&mst_env.obst, NULL);
1500 ir_nodemap_destroy(&mst_env.map);
1502 stat_ev_tim_pop("heur4_total");
1507 static const lc_opt_table_entry_t options[] = {
1508 LC_OPT_ENT_INT ("limit", "limit recoloring", &recolor_limit),
1509 LC_OPT_ENT_DBL ("di", "dislike influence", &dislike_influence),
1513 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur4)
1514 void be_init_copyheur4(void)
1516 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
1517 lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
1518 lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
1519 lc_opt_entry_t *co_grp = lc_opt_get_grp(chordal_grp, "co");
1520 lc_opt_entry_t *heur4_grp = lc_opt_get_grp(co_grp, "heur4");
1522 static co_algo_info copyheur = {
1523 co_solve_heuristic_mst, 0
1526 lc_opt_add_table(heur4_grp, options);
1527 be_register_copyopt("heur4", ©heur);
1529 FIRM_DBG_REGISTER(dbg, "firm.be.co.heur4");