2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Simple copy minimization heuristics.
23 * @author Christian Wuerdig
26 * This is the C implementation of the mst algorithm
27 * originally written in Java by Sebastian Hack.
28 * (also known as "heur3" :)
29 * Performs simple copy minimization.
33 #define DISABLE_STATEV
40 #include "raw_bitset.h"
41 #include "irnodemap.h"
55 #include "becopyopt_t.h"
59 #define COL_COST_INFEASIBLE DBL_MAX
60 #define AFF_NEIGHBOUR_FIX_BENEFIT 128.0
61 #define NEIGHBOUR_CONSTR_COSTS 64.0
66 #define DBG_AFF_CHUNK(env, level, chunk) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_aff_chunk((env), (chunk)); } while (0)
67 #define DBG_COL_COST(env, level, cost) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_col_cost((env), (cost)); } while (0)
69 static firm_dbg_module_t *dbg = NULL;
73 #define DBG_AFF_CHUNK(env, level, chunk)
74 #define DBG_COL_COST(env, level, cost)
79 #define REAL(C) (C ## f)
81 static unsigned last_chunk_id = 0;
82 static int recolor_limit = 7;
83 static double dislike_influence = REAL(0.1);
85 typedef struct col_cost_t {
93 typedef struct aff_chunk_t {
94 const ir_node **n; /**< An ARR_F containing all nodes of the chunk. */
95 const ir_node **interfere; /**< An ARR_F containing all inference. */
96 int weight; /**< Weight of this chunk */
97 unsigned weight_consistent : 1; /**< Set if the weight is consistent. */
98 unsigned deleted : 1; /**< For debugging: Set if the was deleted. */
99 unsigned id; /**< An id of this chunk. */
102 col_cost_t color_affinity[1];
108 typedef struct aff_edge_t {
109 const ir_node *src; /**< Source node. */
110 const ir_node *tgt; /**< Target node. */
111 int weight; /**< The weight of this edge. */
114 /* main coalescing environment */
115 typedef struct co_mst_env_t {
116 int n_regs; /**< number of regs in class */
117 bitset_t *allocatable_regs; /**< set containing all global ignore registers */
118 ir_nodemap map; /**< phase object holding data for nodes */
120 pqueue_t *chunks; /**< priority queue for chunks */
121 list_head chunklist; /**< list holding all chunks */
122 be_ifg_t *ifg; /**< the interference graph */
123 copy_opt_t *co; /**< the copy opt object */
124 unsigned chunk_visited;
125 col_cost_t **single_cols;
128 /* stores coalescing related information for a node */
129 typedef struct co_mst_irn_t {
130 const ir_node *irn; /**< the irn this information belongs to */
131 aff_chunk_t *chunk; /**< the chunk this irn belongs to */
132 bitset_t *adm_colors; /**< set of admissible colors for this irn */
133 ir_node **int_neighs; /**< array of all interfering neighbours (cached for speed reasons) */
134 int n_neighs; /**< length of the interfering neighbours array. */
135 int int_aff_neigh; /**< number of interfering affinity neighbours */
136 int col; /**< color currently assigned */
137 int init_col; /**< the initial color */
138 int tmp_col; /**< a temporary assigned color */
139 unsigned fixed : 1; /**< the color is fixed */
140 struct list_head list; /**< Queue for coloring undo. */
141 real_t constr_factor;
145 * In case there is no phase information for irn, initialize it.
147 static co_mst_irn_t *co_mst_irn_init(co_mst_env_t *env, const ir_node *irn)
149 co_mst_irn_t *res = OALLOC(&env->obst, co_mst_irn_t);
151 const arch_register_req_t *req;
152 neighbours_iter_t nodes_it;
160 res->int_neighs = NULL;
161 res->int_aff_neigh = 0;
162 res->col = arch_get_irn_register(irn)->index;
163 res->init_col = res->col;
164 INIT_LIST_HEAD(&res->list);
166 DB((dbg, LEVEL_4, "Creating phase info for %+F\n", irn));
168 /* set admissible registers */
169 res->adm_colors = bitset_obstack_alloc(&env->obst, env->n_regs);
171 /* Exclude colors not assignable to the irn */
172 req = arch_get_irn_register_req(irn);
173 if (arch_register_req_is(req, limited)) {
174 rbitset_copy_to_bitset(req->limited, res->adm_colors);
175 /* exclude global ignore registers as well */
176 bitset_and(res->adm_colors, env->allocatable_regs);
178 bitset_copy(res->adm_colors, env->allocatable_regs);
181 /* compute the constraint factor */
182 res->constr_factor = (real_t) (1 + env->n_regs - bitset_popcount(res->adm_colors)) / env->n_regs;
184 /* set the number of interfering affinity neighbours to -1, they are calculated later */
185 res->int_aff_neigh = -1;
187 /* build list of interfering neighbours */
189 be_ifg_foreach_neighbour(env->ifg, &nodes_it, irn, neigh) {
190 if (!arch_irn_is_ignore(neigh)) {
191 obstack_ptr_grow(&env->obst, neigh);
195 res->int_neighs = (ir_node**)obstack_finish(&env->obst);
200 static co_mst_irn_t *get_co_mst_irn(co_mst_env_t *env, const ir_node *node)
202 co_mst_irn_t *res = ir_nodemap_get(co_mst_irn_t, &env->map, node);
204 res = co_mst_irn_init(env, node);
205 ir_nodemap_insert(&env->map, node, res);
210 typedef int decide_func_t(const co_mst_irn_t *node, int col);
215 * Write a chunk to stderr for debugging.
217 static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c)
221 if (c->weight_consistent)
222 ir_fprintf(stderr, " $%d ", c->weight);
223 ir_fprintf(stderr, "{");
224 for (i = 0, l = ARR_LEN(c->n); i < l; ++i) {
225 const ir_node *n = c->n[i];
226 ir_fprintf(stderr, " %+F,", n);
228 ir_fprintf(stderr, "}");
232 * Dump all admissible colors to stderr.
234 static void dbg_admissible_colors(const co_mst_env_t *env, const co_mst_irn_t *node)
238 if (bitset_popcount(node->adm_colors) < 1)
239 fprintf(stderr, "no admissible colors?!?");
241 bitset_foreach(node->adm_colors, idx) {
242 ir_fprintf(stderr, " %zu", idx);
248 * Dump color-cost pairs to stderr.
250 static void dbg_col_cost(const co_mst_env_t *env, const col_cost_t *cost)
253 for (i = 0; i < env->n_regs; ++i)
254 fprintf(stderr, " (%d, %.4f)", cost[i].col, cost[i].cost);
257 #endif /* DEBUG_libfirm */
259 static inline int get_mst_irn_col(const co_mst_irn_t *node)
261 return node->tmp_col >= 0 ? node->tmp_col : node->col;
265 * @return 1 if node @p node has color @p col, 0 otherwise.
267 static int decider_has_color(const co_mst_irn_t *node, int col)
269 return get_mst_irn_col(node) == col;
273 * @return 1 if node @p node has not color @p col, 0 otherwise.
275 static int decider_hasnot_color(const co_mst_irn_t *node, int col)
277 return get_mst_irn_col(node) != col;
281 * Always returns true.
283 static int decider_always_yes(const co_mst_irn_t *node, int col)
290 /** compares two affinity edges by its weight */
291 static int cmp_aff_edge(const void *a, const void *b)
293 const aff_edge_t *e1 = (const aff_edge_t*)a;
294 const aff_edge_t *e2 = (const aff_edge_t*)b;
296 if (e2->weight == e1->weight) {
297 if (e2->src->node_idx == e1->src->node_idx)
298 return QSORT_CMP(e2->tgt->node_idx, e1->tgt->node_idx);
300 return QSORT_CMP(e2->src->node_idx, e1->src->node_idx);
302 /* sort in descending order */
303 return QSORT_CMP(e2->weight, e1->weight);
306 /** compares to color-cost pairs */
307 static __attribute__((unused)) int cmp_col_cost_lt(const void *a, const void *b)
309 const col_cost_t *c1 = (const col_cost_t*)a;
310 const col_cost_t *c2 = (const col_cost_t*)b;
311 real_t diff = c1->cost - c2->cost;
318 return QSORT_CMP(c1->col, c2->col);
321 static int cmp_col_cost_gt(const void *a, const void *b)
323 const col_cost_t *c1 = (const col_cost_t*)a;
324 const col_cost_t *c2 = (const col_cost_t*)b;
325 real_t diff = c2->cost - c1->cost;
332 return QSORT_CMP(c1->col, c2->col);
336 * Creates a new affinity chunk
338 static inline aff_chunk_t *new_aff_chunk(co_mst_env_t *env)
340 aff_chunk_t *c = XMALLOCF(aff_chunk_t, color_affinity, env->n_regs);
341 c->n = NEW_ARR_F(const ir_node *, 0);
342 c->interfere = NEW_ARR_F(const ir_node *, 0);
344 c->weight_consistent = 0;
346 c->id = ++last_chunk_id;
348 list_add(&c->list, &env->chunklist);
353 * Frees all memory allocated by an affinity chunk.
355 static inline void delete_aff_chunk(aff_chunk_t *c)
358 DEL_ARR_F(c->interfere);
365 * binary search of sorted nodes.
367 * @return the position where n is found in the array arr or ~pos
368 * if the nodes is not here.
370 static inline int nodes_bsearch(const ir_node **arr, const ir_node *n)
372 int hi = ARR_LEN(arr);
376 int md = lo + ((hi - lo) >> 1);
389 /** Check if a node n can be found inside arr. */
390 static int node_contains(const ir_node **arr, const ir_node *n)
392 int i = nodes_bsearch(arr, n);
397 * Insert a node into the sorted nodes list.
399 * @return 1 if the node was inserted, 0 else
401 static int nodes_insert(const ir_node ***arr, const ir_node *irn)
403 int idx = nodes_bsearch(*arr, irn);
406 int i, n = ARR_LEN(*arr);
409 ARR_APP1(const ir_node *, *arr, irn);
414 for (i = n - 1; i >= idx; --i)
423 * Adds a node to an affinity chunk
425 static inline void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node)
429 if (! nodes_insert(&c->n, node->irn))
432 c->weight_consistent = 0;
435 for (i = node->n_neighs - 1; i >= 0; --i) {
436 ir_node *neigh = node->int_neighs[i];
437 nodes_insert(&c->interfere, neigh);
442 * Check if affinity chunk @p chunk interferes with node @p irn.
444 static inline int aff_chunk_interferes(const aff_chunk_t *chunk, const ir_node *irn)
446 return node_contains(chunk->interfere, irn);
450 * Check if there are interference edges from c1 to c2.
452 * @param c2 Another chunk
453 * @return 1 if there are interferences between nodes of c1 and c2, 0 otherwise.
455 static inline int aff_chunks_interfere(const aff_chunk_t *c1, const aff_chunk_t *c2)
462 /* check if there is a node in c2 having an interfering neighbor in c1 */
463 for (i = ARR_LEN(c2->n) - 1; i >= 0; --i) {
464 const ir_node *irn = c2->n[i];
466 if (node_contains(c1->interfere, irn))
473 * Returns the affinity chunk of @p irn or creates a new
474 * one with @p irn as element if there is none assigned.
476 static inline aff_chunk_t *get_aff_chunk(co_mst_env_t *env, const ir_node *irn)
478 co_mst_irn_t *node = get_co_mst_irn(env, irn);
483 * Let chunk(src) absorb the nodes of chunk(tgt) (only possible when there
484 * are no interference edges from chunk(src) to chunk(tgt)).
485 * @return 1 if successful, 0 if not possible
487 static int aff_chunk_absorb(co_mst_env_t *env, const ir_node *src, const ir_node *tgt)
489 aff_chunk_t *c1 = get_aff_chunk(env, src);
490 aff_chunk_t *c2 = get_aff_chunk(env, tgt);
493 DB((dbg, LEVEL_4, "Attempt to let c1 (id %u): ", c1 ? c1->id : 0));
495 DBG_AFF_CHUNK(env, LEVEL_4, c1);
497 DB((dbg, LEVEL_4, "{%+F}", src));
499 DB((dbg, LEVEL_4, "\n\tabsorb c2 (id %u): ", c2 ? c2->id : 0));
501 DBG_AFF_CHUNK(env, LEVEL_4, c2);
503 DB((dbg, LEVEL_4, "{%+F}", tgt));
505 DB((dbg, LEVEL_4, "\n"));
510 /* no chunk exists */
511 co_mst_irn_t *mirn = get_co_mst_irn(env, src);
514 for (i = mirn->n_neighs - 1; i >= 0; --i) {
515 if (mirn->int_neighs[i] == tgt)
519 /* create one containing both nodes */
520 c1 = new_aff_chunk(env);
521 aff_chunk_add_node(c1, get_co_mst_irn(env, src));
522 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
526 /* c2 already exists */
527 if (! aff_chunk_interferes(c2, src)) {
528 aff_chunk_add_node(c2, get_co_mst_irn(env, src));
532 } else if (c2 == NULL) {
533 /* c1 already exists */
534 if (! aff_chunk_interferes(c1, tgt)) {
535 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
538 } else if (c1 != c2 && ! aff_chunks_interfere(c1, c2)) {
541 for (idx = 0, len = ARR_LEN(c2->n); idx < len; ++idx)
542 aff_chunk_add_node(c1, get_co_mst_irn(env, c2->n[idx]));
544 for (idx = 0, len = ARR_LEN(c2->interfere); idx < len; ++idx) {
545 const ir_node *irn = c2->interfere[idx];
546 nodes_insert(&c1->interfere, irn);
549 c1->weight_consistent = 0;
551 delete_aff_chunk(c2);
554 DB((dbg, LEVEL_4, " ... c1 interferes with c2, skipped\n"));
558 DB((dbg, LEVEL_4, " ... absorbed\n"));
563 * Assures that the weight of the given chunk is consistent.
565 static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c)
567 if (! c->weight_consistent) {
571 for (i = 0; i < env->n_regs; ++i) {
572 c->color_affinity[i].col = i;
573 c->color_affinity[i].cost = REAL(0.0);
576 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
577 const ir_node *n = c->n[idx];
578 const affinity_node_t *an = get_affinity_info(env->co, n);
579 co_mst_irn_t *node = get_co_mst_irn(env, n);
582 if (node->constr_factor > REAL(0.0)) {
583 bitset_foreach (node->adm_colors, col)
584 c->color_affinity[col].cost += node->constr_factor;
588 co_gs_foreach_neighb(an, neigh) {
589 const ir_node *m = neigh->irn;
591 if (arch_irn_is_ignore(m))
594 w += node_contains(c->n, m) ? neigh->costs : 0;
599 for (i = 0; i < env->n_regs; ++i)
600 c->color_affinity[i].cost *= (REAL(1.0) / ARR_LEN(c->n));
603 // c->weight = bitset_popcount(c->nodes);
604 c->weight_consistent = 1;
609 * Count the number of interfering affinity neighbours
611 static int count_interfering_aff_neighs(co_mst_env_t *env, const affinity_node_t *an)
613 const ir_node *irn = an->irn;
614 const co_mst_irn_t *node = get_co_mst_irn(env, irn);
617 co_gs_foreach_neighb(an, neigh) {
618 const ir_node *n = neigh->irn;
621 if (arch_irn_is_ignore(n))
624 /* check if the affinity neighbour interfere */
625 for (i = 0; i < node->n_neighs; ++i) {
626 if (node->int_neighs[i] == n) {
637 * Build chunks of nodes connected by affinity edges.
638 * We start at the heaviest affinity edge.
639 * The chunks of the two edge-defining nodes will be
640 * merged if there are no interference edges from one
641 * chunk to the other.
643 static void build_affinity_chunks(co_mst_env_t *env)
645 nodes_iter_t nodes_it;
646 aff_edge_t *edges = NEW_ARR_F(aff_edge_t, 0);
651 /* at first we create the affinity edge objects */
652 be_ifg_foreach_node(env->ifg, &nodes_it, n) {
653 int n_idx = get_irn_idx(n);
657 if (arch_irn_is_ignore(n))
660 n1 = get_co_mst_irn(env, n);
661 an = get_affinity_info(env->co, n);
664 if (n1->int_aff_neigh < 0)
665 n1->int_aff_neigh = count_interfering_aff_neighs(env, an);
667 /* build the affinity edges */
668 co_gs_foreach_neighb(an, neigh) {
669 const ir_node *m = neigh->irn;
670 int m_idx = get_irn_idx(m);
672 /* record the edge in only one direction */
677 /* skip ignore nodes */
678 if (arch_irn_is_ignore(m))
684 n2 = get_co_mst_irn(env, m);
685 if (n2->int_aff_neigh < 0) {
686 affinity_node_t *am = get_affinity_info(env->co, m);
687 n2->int_aff_neigh = count_interfering_aff_neighs(env, am);
690 * these weights are pure hackery ;-).
691 * It's not chriswue's fault but mine.
693 edge.weight = neigh->costs;
694 ARR_APP1(aff_edge_t, edges, edge);
700 /* now: sort edges and build the affinity chunks */
701 len = ARR_LEN(edges);
702 qsort(edges, len, sizeof(edges[0]), cmp_aff_edge);
703 for (i = 0; i < len; ++i) {
704 DBG((dbg, LEVEL_1, "edge (%u,%u) %f\n", edges[i].src->node_idx, edges[i].tgt->node_idx, edges[i].weight));
706 (void)aff_chunk_absorb(env, edges[i].src, edges[i].tgt);
709 /* now insert all chunks into a priority queue */
710 list_for_each_entry(aff_chunk_t, curr_chunk, &env->chunklist, list) {
711 aff_chunk_assure_weight(env, curr_chunk);
713 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
714 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
715 DBG((dbg, LEVEL_1, "\n"));
717 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
720 for (pn = 0; pn < ARR_LEN(env->map.data); ++pn) {
721 co_mst_irn_t *mirn = (co_mst_irn_t*)env->map.data[pn];
724 if (mirn->chunk != NULL)
727 /* no chunk is allocated so far, do it now */
728 aff_chunk_t *curr_chunk = new_aff_chunk(env);
729 aff_chunk_add_node(curr_chunk, mirn);
731 aff_chunk_assure_weight(env, curr_chunk);
733 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
734 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
735 DBG((dbg, LEVEL_1, "\n"));
737 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
743 static __attribute__((unused)) void chunk_order_nodes(co_mst_env_t *env, aff_chunk_t *chunk)
745 pqueue_t *grow = new_pqueue();
746 ir_node const *max_node = NULL;
750 for (i = ARR_LEN(chunk->n); i != 0;) {
751 const ir_node *irn = chunk->n[--i];
752 affinity_node_t *an = get_affinity_info(env->co, irn);
755 if (arch_irn_is_ignore(irn))
759 co_gs_foreach_neighb(an, neigh)
762 if (w > max_weight) {
770 bitset_t *visited = bitset_malloc(get_irg_last_idx(env->co->irg));
772 for (i = ARR_LEN(chunk->n); i != 0;)
773 bitset_set(visited, get_irn_idx(chunk->n[--i]));
775 pqueue_put(grow, (void *) max_node, max_weight);
776 bitset_clear(visited, get_irn_idx(max_node));
778 while (!pqueue_empty(grow)) {
779 ir_node *irn = (ir_node*)pqueue_pop_front(grow);
780 affinity_node_t *an = get_affinity_info(env->co, irn);
782 if (arch_irn_is_ignore(irn))
785 assert(i <= ARR_LEN(chunk->n));
790 /* build the affinity edges */
791 co_gs_foreach_neighb(an, neigh) {
792 co_mst_irn_t *node = get_co_mst_irn(env, neigh->irn);
794 if (bitset_is_set(visited, get_irn_idx(node->irn))) {
795 pqueue_put(grow, (void *) neigh->irn, neigh->costs);
796 bitset_clear(visited, get_irn_idx(node->irn));
802 bitset_free(visited);
807 * Greedy collect affinity neighbours into thew new chunk @p chunk starting at node @p node.
809 static void expand_chunk_from(co_mst_env_t *env, co_mst_irn_t *node, bitset_t *visited,
810 aff_chunk_t *chunk, aff_chunk_t *orig_chunk, decide_func_t *decider, int col)
812 waitq *nodes = new_waitq();
814 DBG((dbg, LEVEL_1, "\n\tExpanding new chunk (#%u) from %+F, color %d:", chunk->id, node->irn, col));
816 /* init queue and chunk */
817 waitq_put(nodes, node);
818 bitset_set(visited, get_irn_idx(node->irn));
819 aff_chunk_add_node(chunk, node);
820 DB((dbg, LEVEL_1, " %+F", node->irn));
822 /* as long as there are nodes in the queue */
823 while (! waitq_empty(nodes)) {
824 co_mst_irn_t *n = (co_mst_irn_t*)waitq_get(nodes);
825 affinity_node_t *an = get_affinity_info(env->co, n->irn);
827 /* check all affinity neighbors */
829 co_gs_foreach_neighb(an, neigh) {
830 const ir_node *m = neigh->irn;
831 int m_idx = get_irn_idx(m);
834 if (arch_irn_is_ignore(m))
837 n2 = get_co_mst_irn(env, m);
839 if (! bitset_is_set(visited, m_idx) &&
842 ! aff_chunk_interferes(chunk, m) &&
843 node_contains(orig_chunk->n, m))
846 following conditions are met:
847 - neighbour is not visited
848 - neighbour likes the color
849 - neighbour has not yet a fixed color
850 - the new chunk doesn't interfere with the neighbour
851 - neighbour belongs or belonged once to the original chunk
853 bitset_set(visited, m_idx);
854 aff_chunk_add_node(chunk, n2);
855 DB((dbg, LEVEL_1, " %+F", n2->irn));
856 /* enqueue for further search */
857 waitq_put(nodes, n2);
863 DB((dbg, LEVEL_1, "\n"));
869 * Fragment the given chunk into chunks having given color and not having given color.
871 static aff_chunk_t *fragment_chunk(co_mst_env_t *env, int col, aff_chunk_t *c, waitq *tmp)
873 bitset_t *visited = bitset_malloc(get_irg_last_idx(env->co->irg));
875 aff_chunk_t *best = NULL;
877 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
880 aff_chunk_t *tmp_chunk;
881 decide_func_t *decider;
885 if (bitset_is_set(visited, get_irn_idx(irn)))
888 node = get_co_mst_irn(env, irn);
890 if (get_mst_irn_col(node) == col) {
891 decider = decider_has_color;
893 DBG((dbg, LEVEL_4, "\tcolor %d wanted\n", col));
896 decider = decider_hasnot_color;
898 DBG((dbg, LEVEL_4, "\tcolor %d forbidden\n", col));
901 /* create a new chunk starting at current node */
902 tmp_chunk = new_aff_chunk(env);
903 waitq_put(tmp, tmp_chunk);
904 expand_chunk_from(env, node, visited, tmp_chunk, c, decider, col);
905 assert(ARR_LEN(tmp_chunk->n) > 0 && "No nodes added to chunk");
907 /* remember the local best */
908 aff_chunk_assure_weight(env, tmp_chunk);
909 if (check_for_best && (! best || best->weight < tmp_chunk->weight))
913 assert(best && "No chunk found?");
914 bitset_free(visited);
919 * Resets the temporary fixed color of all nodes within wait queue @p nodes.
920 * ATTENTION: the queue is empty after calling this function!
922 static inline void reject_coloring(struct list_head *nodes)
924 DB((dbg, LEVEL_4, "\treject coloring for"));
925 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
926 DB((dbg, LEVEL_4, " %+F", n->irn));
927 assert(n->tmp_col >= 0);
929 list_del_init(&n->list);
931 DB((dbg, LEVEL_4, "\n"));
934 static inline void materialize_coloring(struct list_head *nodes)
936 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
937 assert(n->tmp_col >= 0);
940 list_del_init(&n->list);
944 static inline void set_temp_color(co_mst_irn_t *node, int col, struct list_head *changed)
947 assert(!node->fixed);
948 assert(node->tmp_col < 0);
949 assert(node->list.next == &node->list && node->list.prev == &node->list);
950 assert(bitset_is_set(node->adm_colors, col));
952 list_add_tail(&node->list, changed);
956 static inline int is_loose(co_mst_irn_t *node)
958 return !node->fixed && node->tmp_col < 0;
962 * Determines the costs for each color if it would be assigned to node @p node.
964 static void determine_color_costs(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs)
966 int *neigh_cols = ALLOCAN(int, env->n_regs);
971 for (i = 0; i < env->n_regs; ++i) {
974 costs[i].cost = bitset_is_set(node->adm_colors, i) ? node->constr_factor : REAL(0.0);
977 for (i = 0; i < node->n_neighs; ++i) {
978 co_mst_irn_t *n = get_co_mst_irn(env, node->int_neighs[i]);
979 int col = get_mst_irn_col(n);
984 costs[col].cost = REAL(0.0);
988 coeff = REAL(1.0) / n_loose;
989 for (i = 0; i < env->n_regs; ++i)
990 costs[i].cost *= REAL(1.0) - coeff * neigh_cols[i];
994 /* need forward declaration due to recursive call */
995 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed_ones, int depth, int *max_depth, int *trip);
998 * Tries to change node to a color but @p explude_col.
999 * @return 1 if succeeded, 0 otherwise.
1001 static int change_node_color_excluded(co_mst_env_t *env, co_mst_irn_t *node, int exclude_col, struct list_head *changed, int depth, int *max_depth, int *trip)
1003 int col = get_mst_irn_col(node);
1006 /* neighbours has already a different color -> good, temporary fix it */
1007 if (col != exclude_col) {
1009 set_temp_color(node, col, changed);
1013 /* The node has the color it should not have _and_ has not been visited yet. */
1014 if (is_loose(node)) {
1015 col_cost_t *costs = ALLOCAN(col_cost_t, env->n_regs);
1017 /* Get the costs for giving the node a specific color. */
1018 determine_color_costs(env, node, costs);
1020 /* Since the node must not have the not_col, set the costs for that color to "infinity" */
1021 costs[exclude_col].cost = REAL(0.0);
1023 /* sort the colors according costs, cheapest first. */
1024 qsort(costs, env->n_regs, sizeof(costs[0]), cmp_col_cost_gt);
1026 /* Try recoloring the node using the color list. */
1027 res = recolor_nodes(env, node, costs, changed, depth + 1, max_depth, trip);
1034 * Tries to bring node @p node to cheapest color and color all interfering neighbours with other colors.
1035 * ATTENTION: Expect @p costs already sorted by increasing costs.
1036 * @return 1 if coloring could be applied, 0 otherwise.
1038 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed, int depth, int *max_depth, int *trip)
1041 struct list_head local_changed;
1044 if (depth > *max_depth)
1047 DBG((dbg, LEVEL_4, "\tRecoloring %+F with color-costs", node->irn));
1048 DBG_COL_COST(env, LEVEL_4, costs);
1049 DB((dbg, LEVEL_4, "\n"));
1051 if (depth >= recolor_limit) {
1052 DBG((dbg, LEVEL_4, "\tHit recolor limit\n"));
1056 for (i = 0; i < env->n_regs; ++i) {
1057 int tgt_col = costs[i].col;
1061 /* If the costs for that color (and all successive) are infinite, bail out we won't make it anyway. */
1062 if (costs[i].cost == REAL(0.0)) {
1063 DBG((dbg, LEVEL_4, "\tAll further colors forbidden\n"));
1067 /* Set the new color of the node and mark the node as temporarily fixed. */
1068 assert(node->tmp_col < 0 && "Node must not have been temporary fixed.");
1069 INIT_LIST_HEAD(&local_changed);
1070 set_temp_color(node, tgt_col, &local_changed);
1071 DBG((dbg, LEVEL_4, "\tTemporary setting %+F to color %d\n", node->irn, tgt_col));
1073 /* try to color all interfering neighbours with current color forbidden */
1074 for (j = 0; j < node->n_neighs; ++j) {
1078 neigh = node->int_neighs[j];
1080 if (arch_irn_is_ignore(neigh))
1083 nn = get_co_mst_irn(env, neigh);
1084 DB((dbg, LEVEL_4, "\tHandling neighbour %+F, at position %d (fixed: %d, tmp_col: %d, col: %d)\n",
1085 neigh, j, nn->fixed, nn->tmp_col, nn->col));
1088 Try to change the color of the neighbor and record all nodes which
1089 get changed in the tmp list. Add this list to the "changed" list for
1090 that color. If we did not succeed to change the color of the neighbor,
1091 we bail out and try the next color.
1093 if (get_mst_irn_col(nn) == tgt_col) {
1094 /* try to color neighbour with tgt_col forbidden */
1095 neigh_ok = change_node_color_excluded(env, nn, tgt_col, &local_changed, depth + 1, max_depth, trip);
1103 We managed to assign the target color to all neighbors, so from the perspective
1104 of the current node, every thing was ok and we can return safely.
1107 /* append the local_changed ones to global ones */
1108 list_splice(&local_changed, changed);
1112 /* coloring of neighbours failed, so we try next color */
1113 reject_coloring(&local_changed);
1117 DBG((dbg, LEVEL_4, "\tAll colors failed\n"));
1122 * Tries to bring node @p node and all its neighbours to color @p tgt_col.
1123 * @return 1 if color @p col could be applied, 0 otherwise
1125 static int change_node_color(co_mst_env_t *env, co_mst_irn_t *node, int tgt_col, struct list_head *changed)
1127 int col = get_mst_irn_col(node);
1129 /* if node already has the target color -> good, temporary fix it */
1130 if (col == tgt_col) {
1131 DBG((dbg, LEVEL_4, "\t\tCNC: %+F has already color %d, fix temporary\n", node->irn, tgt_col));
1133 set_temp_color(node, tgt_col, changed);
1138 Node has not yet a fixed color and target color is admissible
1139 -> try to recolor node and its affinity neighbours
1141 if (is_loose(node) && bitset_is_set(node->adm_colors, tgt_col)) {
1142 col_cost_t *costs = env->single_cols[tgt_col];
1143 int res, max_depth, trip;
1148 DBG((dbg, LEVEL_4, "\t\tCNC: Attempt to recolor %+F ===>>\n", node->irn));
1149 res = recolor_nodes(env, node, costs, changed, 0, &max_depth, &trip);
1150 DBG((dbg, LEVEL_4, "\t\tCNC: <<=== Recoloring of %+F %s\n", node->irn, res ? "succeeded" : "failed"));
1151 stat_ev_int("heur4_recolor_depth_max", max_depth);
1152 stat_ev_int("heur4_recolor_trip", trip);
1158 #ifdef DEBUG_libfirm
1159 if (firm_dbg_get_mask(dbg) & LEVEL_4) {
1160 if (!is_loose(node))
1161 DB((dbg, LEVEL_4, "\t\tCNC: %+F has already fixed color %d\n", node->irn, col));
1163 DB((dbg, LEVEL_4, "\t\tCNC: color %d not admissible for %+F (", tgt_col, node->irn));
1164 dbg_admissible_colors(env, node);
1165 DB((dbg, LEVEL_4, ")\n"));
1174 * Tries to color an affinity chunk (or at least a part of it).
1175 * Inserts uncolored parts of the chunk as a new chunk into the priority queue.
1177 static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
1179 aff_chunk_t *best_chunk = NULL;
1180 int n_nodes = ARR_LEN(c->n);
1181 int best_color = -1;
1182 int n_int_chunks = 0;
1183 waitq *tmp_chunks = new_waitq();
1184 waitq *best_starts = NULL;
1185 col_cost_t *order = ALLOCANZ(col_cost_t, env->n_regs);
1192 struct list_head changed;
1194 DB((dbg, LEVEL_2, "fragmentizing chunk #%u", c->id));
1195 DBG_AFF_CHUNK(env, LEVEL_2, c);
1196 DB((dbg, LEVEL_2, "\n"));
1198 stat_ev_ctx_push_fmt("heur4_color_chunk", "%u", c->id);
1200 ++env->chunk_visited;
1202 /* compute color preference */
1203 for (pos = 0, len = ARR_LEN(c->interfere); pos < len; ++pos) {
1204 const ir_node *n = c->interfere[pos];
1205 co_mst_irn_t *node = get_co_mst_irn(env, n);
1206 aff_chunk_t *chunk = node->chunk;
1208 if (is_loose(node) && chunk && chunk->visited < env->chunk_visited) {
1209 assert(!chunk->deleted);
1210 chunk->visited = env->chunk_visited;
1213 aff_chunk_assure_weight(env, chunk);
1214 for (i = 0; i < env->n_regs; ++i)
1215 order[i].cost += chunk->color_affinity[i].cost;
1219 for (i = 0; i < env->n_regs; ++i) {
1220 real_t dislike = n_int_chunks > 0 ? REAL(1.0) - order[i].cost / n_int_chunks : REAL(0.0);
1222 order[i].cost = (REAL(1.0) - dislike_influence) * c->color_affinity[i].cost + dislike_influence * dislike;
1225 qsort(order, env->n_regs, sizeof(order[0]), cmp_col_cost_gt);
1227 DBG_COL_COST(env, LEVEL_2, order);
1228 DB((dbg, LEVEL_2, "\n"));
1230 /* check which color is the "best" for the given chunk.
1231 * if we found a color which was ok for all nodes, we take it
1232 * and do not look further. (see did_all flag usage below.)
1233 * If we have many colors which fit all nodes it is hard to decide
1234 * which one to take anyway.
1235 * TODO Sebastian: Perhaps we should at all nodes and figure out
1236 * a suitable color using costs as done above (determine_color_costs).
1238 for (i = 0; i < env->n_regs; ++i) {
1239 int col = order[i].col;
1241 aff_chunk_t *local_best;
1244 /* skip ignore colors */
1245 if (!bitset_is_set(env->allocatable_regs, col))
1248 DB((dbg, LEVEL_2, "\ttrying color %d\n", col));
1251 good_starts = new_waitq();
1253 /* try to bring all nodes of given chunk to the current color. */
1254 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1255 const ir_node *irn = c->n[idx];
1256 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1259 assert(! node->fixed && "Node must not have a fixed color.");
1260 DB((dbg, LEVEL_4, "\t\tBringing %+F from color %d to color %d ...\n", irn, node->col, col));
1263 The order of the colored nodes is important, so we record the successfully
1264 colored ones in the order they appeared.
1266 INIT_LIST_HEAD(&changed);
1268 good = change_node_color(env, node, col, &changed);
1269 stat_ev_tim_pop("heur4_recolor");
1271 waitq_put(good_starts, node);
1272 materialize_coloring(&changed);
1277 reject_coloring(&changed);
1279 n_succeeded += good;
1280 DB((dbg, LEVEL_4, "\t\t... %+F attempt from %d to %d %s\n", irn, node->col, col, good ? "succeeded" : "failed"));
1283 /* unfix all nodes */
1284 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1285 co_mst_irn_t *node = get_co_mst_irn(env, c->n[idx]);
1289 /* try next color when failed */
1290 if (n_succeeded == 0) {
1291 del_waitq(good_starts);
1295 /* fragment the chunk according to the coloring */
1296 local_best = fragment_chunk(env, col, c, tmp_chunks);
1298 /* search the best of the good list
1299 and make it the new best if it is better than the current */
1301 aff_chunk_assure_weight(env, local_best);
1303 DB((dbg, LEVEL_3, "\t\tlocal best chunk (id %u) for color %d: ", local_best->id, col));
1304 DBG_AFF_CHUNK(env, LEVEL_3, local_best);
1306 if (! best_chunk || best_chunk->weight < local_best->weight) {
1307 best_chunk = local_best;
1310 del_waitq(best_starts);
1311 best_starts = good_starts;
1312 DB((dbg, LEVEL_3, "\n\t\t... setting global best chunk (id %u), color %d\n", best_chunk->id, best_color));
1314 DB((dbg, LEVEL_3, "\n\t\t... omitting, global best is better\n"));
1315 del_waitq(good_starts);
1319 del_waitq(good_starts);
1322 /* if all nodes were recolored, bail out */
1323 if (n_succeeded == n_nodes)
1327 stat_ev_int("heur4_colors_tried", i);
1329 /* free all intermediate created chunks except best one */
1330 while (! waitq_empty(tmp_chunks)) {
1331 aff_chunk_t *tmp = (aff_chunk_t*)waitq_get(tmp_chunks);
1332 if (tmp != best_chunk)
1333 delete_aff_chunk(tmp);
1335 del_waitq(tmp_chunks);
1337 /* return if coloring failed */
1340 del_waitq(best_starts);
1344 DB((dbg, LEVEL_2, "\tbest chunk #%u ", best_chunk->id));
1345 DBG_AFF_CHUNK(env, LEVEL_2, best_chunk);
1346 DB((dbg, LEVEL_2, "using color %d\n", best_color));
1348 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1349 const ir_node *irn = best_chunk->n[idx];
1350 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1353 /* bring the node to the color. */
1354 DB((dbg, LEVEL_4, "\tManifesting color %d for %+F, chunk #%u\n", best_color, node->irn, best_chunk->id));
1355 INIT_LIST_HEAD(&changed);
1357 res = change_node_color(env, node, best_color, &changed);
1358 stat_ev_tim_pop("heur4_recolor");
1360 materialize_coloring(&changed);
1363 assert(list_empty(&changed));
1366 /* remove the nodes in best chunk from original chunk */
1367 len = ARR_LEN(best_chunk->n);
1368 for (idx = 0; idx < len; ++idx) {
1369 const ir_node *irn = best_chunk->n[idx];
1370 int pos = nodes_bsearch(c->n, irn);
1375 len = ARR_LEN(c->n);
1376 for (idx = nidx = 0; idx < len; ++idx) {
1377 const ir_node *irn = c->n[idx];
1383 ARR_SHRINKLEN(c->n, nidx);
1386 /* we have to get the nodes back into the original chunk because they are scattered over temporary chunks */
1387 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1388 const ir_node *n = c->n[idx];
1389 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1393 /* fragment the remaining chunk */
1394 visited = bitset_malloc(get_irg_last_idx(env->co->irg));
1395 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx)
1396 bitset_set(visited, get_irn_idx(best_chunk->n[idx]));
1398 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1399 const ir_node *irn = c->n[idx];
1400 if (! bitset_is_set(visited, get_irn_idx(irn))) {
1401 aff_chunk_t *new_chunk = new_aff_chunk(env);
1402 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1404 expand_chunk_from(env, node, visited, new_chunk, c, decider_always_yes, 0);
1405 aff_chunk_assure_weight(env, new_chunk);
1406 pqueue_put(env->chunks, new_chunk, new_chunk->weight);
1410 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1411 const ir_node *n = best_chunk->n[idx];
1412 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1416 /* clear obsolete chunks and free some memory */
1417 delete_aff_chunk(best_chunk);
1418 bitset_free(visited);
1420 del_waitq(best_starts);
1422 stat_ev_ctx_pop("heur4_color_chunk");
1426 * Main driver for mst safe coalescing algorithm.
1428 static int co_solve_heuristic_mst(copy_opt_t *co)
1430 unsigned n_regs = co->cls->n_regs;
1431 bitset_t *allocatable_regs = bitset_alloca(n_regs);
1435 co_mst_env_t mst_env;
1442 ir_nodemap_init(&mst_env.map, co->irg);
1443 obstack_init(&mst_env.obst);
1445 be_put_allocatable_regs(co->cenv->irg, co->cls, allocatable_regs);
1447 mst_env.n_regs = n_regs;
1448 mst_env.chunks = new_pqueue();
1450 mst_env.allocatable_regs = allocatable_regs;
1451 mst_env.ifg = co->cenv->ifg;
1452 INIT_LIST_HEAD(&mst_env.chunklist);
1453 mst_env.chunk_visited = 0;
1454 mst_env.single_cols = OALLOCN(&mst_env.obst, col_cost_t*, n_regs);
1456 for (i = 0; i < n_regs; ++i) {
1457 col_cost_t *vec = OALLOCN(&mst_env.obst, col_cost_t, n_regs);
1459 mst_env.single_cols[i] = vec;
1460 for (j = 0; j < n_regs; ++j) {
1462 vec[j].cost = REAL(0.0);
1466 vec[0].cost = REAL(1.0);
1469 DBG((dbg, LEVEL_1, "==== Coloring %+F, class %s ====\n", co->irg, co->cls->name));
1471 /* build affinity chunks */
1473 build_affinity_chunks(&mst_env);
1474 stat_ev_tim_pop("heur4_initial_chunk");
1476 /* color chunks as long as there are some */
1477 while (! pqueue_empty(mst_env.chunks)) {
1478 aff_chunk_t *chunk = (aff_chunk_t*)pqueue_pop_front(mst_env.chunks);
1480 color_aff_chunk(&mst_env, chunk);
1481 DB((dbg, LEVEL_4, "<<<====== Coloring chunk (%u) done\n", chunk->id));
1482 delete_aff_chunk(chunk);
1485 /* apply coloring */
1486 for (pn = 0; pn < ARR_LEN(mst_env.map.data); ++pn) {
1487 co_mst_irn_t *mirn = (co_mst_irn_t*)mst_env.map.data[pn];
1488 const arch_register_t *reg;
1491 irn = get_idx_irn(co->irg, pn);
1492 if (arch_irn_is_ignore(irn))
1495 /* skip nodes where color hasn't changed */
1496 if (mirn->init_col == mirn->col)
1499 reg = arch_register_for_index(co->cls, mirn->col);
1500 arch_set_irn_register(irn, reg);
1501 DB((dbg, LEVEL_1, "%+F set color from %d to %d\n", irn, mirn->init_col, mirn->col));
1504 /* free allocated memory */
1505 del_pqueue(mst_env.chunks);
1506 obstack_free(&mst_env.obst, NULL);
1507 ir_nodemap_destroy(&mst_env.map);
1509 stat_ev_tim_pop("heur4_total");
1514 static const lc_opt_table_entry_t options[] = {
1515 LC_OPT_ENT_INT ("limit", "limit recoloring", &recolor_limit),
1516 LC_OPT_ENT_DBL ("di", "dislike influence", &dislike_influence),
1520 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur4)
1521 void be_init_copyheur4(void)
1523 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
1524 lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
1525 lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
1526 lc_opt_entry_t *co_grp = lc_opt_get_grp(chordal_grp, "co");
1527 lc_opt_entry_t *heur4_grp = lc_opt_get_grp(co_grp, "heur4");
1529 static co_algo_info copyheur = {
1530 co_solve_heuristic_mst, 0
1533 lc_opt_add_table(heur4_grp, options);
1534 be_register_copyopt("heur4", ©heur);
1536 FIRM_DBG_REGISTER(dbg, "firm.be.co.heur4");