2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Simple copy minimization heuristics.
23 * @author Christian Wuerdig
27 * This is the C implementation of the mst algorithm
28 * originally written in Java by Sebastian Hack.
29 * (also known as "heur3" :)
30 * Performs simple copy minimization.
34 #define DISABLE_STATEV
41 #include "raw_bitset.h"
42 #include "irnodemap.h"
58 #include "becopyopt_t.h"
62 #define COL_COST_INFEASIBLE DBL_MAX
63 #define AFF_NEIGHBOUR_FIX_BENEFIT 128.0
64 #define NEIGHBOUR_CONSTR_COSTS 64.0
69 #define DBG_AFF_CHUNK(env, level, chunk) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_aff_chunk((env), (chunk)); } while (0)
70 #define DBG_COL_COST(env, level, cost) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_col_cost((env), (cost)); } while (0)
72 static firm_dbg_module_t *dbg = NULL;
76 #define DBG_AFF_CHUNK(env, level, chunk)
77 #define DBG_COL_COST(env, level, cost)
82 #define REAL(C) (C ## f)
84 static unsigned last_chunk_id = 0;
85 static int recolor_limit = 7;
86 static double dislike_influence = REAL(0.1);
88 typedef struct col_cost_t {
96 typedef struct aff_chunk_t {
97 const ir_node **n; /**< An ARR_F containing all nodes of the chunk. */
98 const ir_node **interfere; /**< An ARR_F containing all inference. */
99 int weight; /**< Weight of this chunk */
100 unsigned weight_consistent : 1; /**< Set if the weight is consistent. */
101 unsigned deleted : 1; /**< For debugging: Set if the was deleted. */
102 unsigned id; /**< An id of this chunk. */
105 col_cost_t color_affinity[1];
111 typedef struct aff_edge_t {
112 const ir_node *src; /**< Source node. */
113 const ir_node *tgt; /**< Target node. */
114 int weight; /**< The weight of this edge. */
117 /* main coalescing environment */
118 typedef struct co_mst_env_t {
119 int n_regs; /**< number of regs in class */
120 int k; /**< number of non-ignore registers in class */
121 bitset_t *allocatable_regs; /**< set containing all global ignore registers */
122 ir_nodemap map; /**< phase object holding data for nodes */
124 pqueue_t *chunks; /**< priority queue for chunks */
125 list_head chunklist; /**< list holding all chunks */
126 be_ifg_t *ifg; /**< the interference graph */
127 copy_opt_t *co; /**< the copy opt object */
128 unsigned chunk_visited;
129 col_cost_t **single_cols;
132 /* stores coalescing related information for a node */
133 typedef struct co_mst_irn_t {
134 const ir_node *irn; /**< the irn this information belongs to */
135 aff_chunk_t *chunk; /**< the chunk this irn belongs to */
136 bitset_t *adm_colors; /**< set of admissible colors for this irn */
137 ir_node **int_neighs; /**< array of all interfering neighbours (cached for speed reasons) */
138 int n_neighs; /**< length of the interfering neighbours array. */
139 int int_aff_neigh; /**< number of interfering affinity neighbours */
140 int col; /**< color currently assigned */
141 int init_col; /**< the initial color */
142 int tmp_col; /**< a temporary assigned color */
143 unsigned fixed : 1; /**< the color is fixed */
144 struct list_head list; /**< Queue for coloring undo. */
145 real_t constr_factor;
149 * In case there is no phase information for irn, initialize it.
151 static co_mst_irn_t *co_mst_irn_init(co_mst_env_t *env, const ir_node *irn)
153 co_mst_irn_t *res = OALLOC(&env->obst, co_mst_irn_t);
155 const arch_register_req_t *req;
156 neighbours_iter_t nodes_it;
164 res->int_neighs = NULL;
165 res->int_aff_neigh = 0;
166 res->col = arch_register_get_index(arch_get_irn_register(irn));
167 res->init_col = res->col;
168 INIT_LIST_HEAD(&res->list);
170 DB((dbg, LEVEL_4, "Creating phase info for %+F\n", irn));
172 /* set admissible registers */
173 res->adm_colors = bitset_obstack_alloc(&env->obst, env->n_regs);
175 /* Exclude colors not assignable to the irn */
176 req = arch_get_irn_register_req(irn);
177 if (arch_register_req_is(req, limited)) {
178 rbitset_copy_to_bitset(req->limited, res->adm_colors);
180 bitset_set_all(res->adm_colors);
183 /* exclude global ignore registers as well */
184 bitset_and(res->adm_colors, env->allocatable_regs);
186 /* compute the constraint factor */
187 res->constr_factor = (real_t) (1 + env->n_regs - bitset_popcount(res->adm_colors)) / env->n_regs;
189 /* set the number of interfering affinity neighbours to -1, they are calculated later */
190 res->int_aff_neigh = -1;
192 /* build list of interfering neighbours */
194 be_ifg_foreach_neighbour(env->ifg, &nodes_it, irn, neigh) {
195 if (!arch_irn_is_ignore(neigh)) {
196 obstack_ptr_grow(&env->obst, neigh);
200 res->int_neighs = (ir_node**)obstack_finish(&env->obst);
205 static co_mst_irn_t *get_co_mst_irn(co_mst_env_t *env, const ir_node *node)
207 co_mst_irn_t *res = (co_mst_irn_t*)ir_nodemap_get(&env->map, node);
209 res = co_mst_irn_init(env, node);
210 ir_nodemap_insert(&env->map, node, res);
215 typedef int decide_func_t(const co_mst_irn_t *node, int col);
220 * Write a chunk to stderr for debugging.
222 static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c)
226 if (c->weight_consistent)
227 ir_fprintf(stderr, " $%d ", c->weight);
228 ir_fprintf(stderr, "{");
229 for (i = 0, l = ARR_LEN(c->n); i < l; ++i) {
230 const ir_node *n = c->n[i];
231 ir_fprintf(stderr, " %+F,", n);
233 ir_fprintf(stderr, "}");
237 * Dump all admissible colors to stderr.
239 static void dbg_admissible_colors(const co_mst_env_t *env, const co_mst_irn_t *node)
244 if (bitset_popcount(node->adm_colors) < 1)
245 fprintf(stderr, "no admissible colors?!?");
247 bitset_foreach(node->adm_colors, idx) {
248 ir_fprintf(stderr, " %zu", idx);
254 * Dump color-cost pairs to stderr.
256 static void dbg_col_cost(const co_mst_env_t *env, const col_cost_t *cost)
259 for (i = 0; i < env->n_regs; ++i)
260 fprintf(stderr, " (%d, %.4f)", cost[i].col, cost[i].cost);
263 #endif /* DEBUG_libfirm */
265 static inline int get_mst_irn_col(const co_mst_irn_t *node)
267 return node->tmp_col >= 0 ? node->tmp_col : node->col;
271 * @return 1 if node @p node has color @p col, 0 otherwise.
273 static int decider_has_color(const co_mst_irn_t *node, int col)
275 return get_mst_irn_col(node) == col;
279 * @return 1 if node @p node has not color @p col, 0 otherwise.
281 static int decider_hasnot_color(const co_mst_irn_t *node, int col)
283 return get_mst_irn_col(node) != col;
287 * Always returns true.
289 static int decider_always_yes(const co_mst_irn_t *node, int col)
296 /** compares two affinity edges by its weight */
297 static int cmp_aff_edge(const void *a, const void *b)
299 const aff_edge_t *e1 = (const aff_edge_t*)a;
300 const aff_edge_t *e2 = (const aff_edge_t*)b;
302 if (e2->weight == e1->weight) {
303 if (e2->src->node_idx == e1->src->node_idx)
304 return QSORT_CMP(e2->tgt->node_idx, e1->tgt->node_idx);
306 return QSORT_CMP(e2->src->node_idx, e1->src->node_idx);
308 /* sort in descending order */
309 return QSORT_CMP(e2->weight, e1->weight);
312 /** compares to color-cost pairs */
313 static __attribute__((unused)) int cmp_col_cost_lt(const void *a, const void *b)
315 const col_cost_t *c1 = (const col_cost_t*)a;
316 const col_cost_t *c2 = (const col_cost_t*)b;
317 real_t diff = c1->cost - c2->cost;
318 return (diff > 0) - (diff < 0);
321 static int cmp_col_cost_gt(const void *a, const void *b)
323 const col_cost_t *c1 = (const col_cost_t*)a;
324 const col_cost_t *c2 = (const col_cost_t*)b;
325 real_t diff = c2->cost - c1->cost;
328 return QSORT_CMP(c1->col, c2->col);
330 return (diff > 0) - (diff < 0);
334 * Creates a new affinity chunk
336 static inline aff_chunk_t *new_aff_chunk(co_mst_env_t *env)
338 aff_chunk_t *c = XMALLOCF(aff_chunk_t, color_affinity, env->n_regs);
339 c->n = NEW_ARR_F(const ir_node *, 0);
340 c->interfere = NEW_ARR_F(const ir_node *, 0);
342 c->weight_consistent = 0;
344 c->id = ++last_chunk_id;
346 list_add(&c->list, &env->chunklist);
351 * Frees all memory allocated by an affinity chunk.
353 static inline void delete_aff_chunk(aff_chunk_t *c)
356 DEL_ARR_F(c->interfere);
363 * binary search of sorted nodes.
365 * @return the position where n is found in the array arr or ~pos
366 * if the nodes is not here.
368 static inline int nodes_bsearch(const ir_node **arr, const ir_node *n)
370 int hi = ARR_LEN(arr);
374 int md = lo + ((hi - lo) >> 1);
387 /** Check if a node n can be found inside arr. */
388 static int node_contains(const ir_node **arr, const ir_node *n)
390 int i = nodes_bsearch(arr, n);
395 * Insert a node into the sorted nodes list.
397 * @return 1 if the node was inserted, 0 else
399 static int nodes_insert(const ir_node ***arr, const ir_node *irn)
401 int idx = nodes_bsearch(*arr, irn);
404 int i, n = ARR_LEN(*arr);
407 ARR_APP1(const ir_node *, *arr, irn);
412 for (i = n - 1; i >= idx; --i)
421 * Adds a node to an affinity chunk
423 static inline void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node)
427 if (! nodes_insert(&c->n, node->irn))
430 c->weight_consistent = 0;
433 for (i = node->n_neighs - 1; i >= 0; --i) {
434 ir_node *neigh = node->int_neighs[i];
435 nodes_insert(&c->interfere, neigh);
440 * Check if affinity chunk @p chunk interferes with node @p irn.
442 static inline int aff_chunk_interferes(const aff_chunk_t *chunk, const ir_node *irn)
444 return node_contains(chunk->interfere, irn);
448 * Check if there are interference edges from c1 to c2.
450 * @param c2 Another chunk
451 * @return 1 if there are interferences between nodes of c1 and c2, 0 otherwise.
453 static inline int aff_chunks_interfere(const aff_chunk_t *c1, const aff_chunk_t *c2)
460 /* check if there is a node in c2 having an interfering neighbor in c1 */
461 for (i = ARR_LEN(c2->n) - 1; i >= 0; --i) {
462 const ir_node *irn = c2->n[i];
464 if (node_contains(c1->interfere, irn))
471 * Returns the affinity chunk of @p irn or creates a new
472 * one with @p irn as element if there is none assigned.
474 static inline aff_chunk_t *get_aff_chunk(co_mst_env_t *env, const ir_node *irn)
476 co_mst_irn_t *node = get_co_mst_irn(env, irn);
481 * Let chunk(src) absorb the nodes of chunk(tgt) (only possible when there
482 * are no interference edges from chunk(src) to chunk(tgt)).
483 * @return 1 if successful, 0 if not possible
485 static int aff_chunk_absorb(co_mst_env_t *env, const ir_node *src, const ir_node *tgt)
487 aff_chunk_t *c1 = get_aff_chunk(env, src);
488 aff_chunk_t *c2 = get_aff_chunk(env, tgt);
491 DB((dbg, LEVEL_4, "Attempt to let c1 (id %u): ", c1 ? c1->id : 0));
493 DBG_AFF_CHUNK(env, LEVEL_4, c1);
495 DB((dbg, LEVEL_4, "{%+F}", src));
497 DB((dbg, LEVEL_4, "\n\tabsorb c2 (id %u): ", c2 ? c2->id : 0));
499 DBG_AFF_CHUNK(env, LEVEL_4, c2);
501 DB((dbg, LEVEL_4, "{%+F}", tgt));
503 DB((dbg, LEVEL_4, "\n"));
508 /* no chunk exists */
509 co_mst_irn_t *mirn = get_co_mst_irn(env, src);
512 for (i = mirn->n_neighs - 1; i >= 0; --i) {
513 if (mirn->int_neighs[i] == tgt)
517 /* create one containing both nodes */
518 c1 = new_aff_chunk(env);
519 aff_chunk_add_node(c1, get_co_mst_irn(env, src));
520 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
524 /* c2 already exists */
525 if (! aff_chunk_interferes(c2, src)) {
526 aff_chunk_add_node(c2, get_co_mst_irn(env, src));
530 } else if (c2 == NULL) {
531 /* c1 already exists */
532 if (! aff_chunk_interferes(c1, tgt)) {
533 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
536 } else if (c1 != c2 && ! aff_chunks_interfere(c1, c2)) {
539 for (idx = 0, len = ARR_LEN(c2->n); idx < len; ++idx)
540 aff_chunk_add_node(c1, get_co_mst_irn(env, c2->n[idx]));
542 for (idx = 0, len = ARR_LEN(c2->interfere); idx < len; ++idx) {
543 const ir_node *irn = c2->interfere[idx];
544 nodes_insert(&c1->interfere, irn);
547 c1->weight_consistent = 0;
549 delete_aff_chunk(c2);
552 DB((dbg, LEVEL_4, " ... c1 interferes with c2, skipped\n"));
556 DB((dbg, LEVEL_4, " ... absorbed\n"));
561 * Assures that the weight of the given chunk is consistent.
563 static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c)
565 if (! c->weight_consistent) {
569 for (i = 0; i < env->n_regs; ++i) {
570 c->color_affinity[i].col = i;
571 c->color_affinity[i].cost = REAL(0.0);
574 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
575 const ir_node *n = c->n[idx];
576 const affinity_node_t *an = get_affinity_info(env->co, n);
577 co_mst_irn_t *node = get_co_mst_irn(env, n);
580 if (node->constr_factor > REAL(0.0)) {
582 bitset_foreach (node->adm_colors, col)
583 c->color_affinity[col].cost += node->constr_factor;
588 co_gs_foreach_neighb(an, neigh) {
589 const ir_node *m = neigh->irn;
591 if (arch_irn_is_ignore(m))
594 w += node_contains(c->n, m) ? neigh->costs : 0;
599 for (i = 0; i < env->n_regs; ++i)
600 c->color_affinity[i].cost *= (REAL(1.0) / ARR_LEN(c->n));
603 // c->weight = bitset_popcount(c->nodes);
604 c->weight_consistent = 1;
609 * Count the number of interfering affinity neighbours
611 static int count_interfering_aff_neighs(co_mst_env_t *env, const affinity_node_t *an)
613 const neighb_t *neigh;
614 const ir_node *irn = an->irn;
615 const co_mst_irn_t *node = get_co_mst_irn(env, irn);
618 co_gs_foreach_neighb(an, neigh) {
619 const ir_node *n = neigh->irn;
622 if (arch_irn_is_ignore(n))
625 /* check if the affinity neighbour interfere */
626 for (i = 0; i < node->n_neighs; ++i) {
627 if (node->int_neighs[i] == n) {
638 * Build chunks of nodes connected by affinity edges.
639 * We start at the heaviest affinity edge.
640 * The chunks of the two edge-defining nodes will be
641 * merged if there are no interference edges from one
642 * chunk to the other.
644 static void build_affinity_chunks(co_mst_env_t *env)
646 nodes_iter_t nodes_it;
647 aff_edge_t *edges = NEW_ARR_F(aff_edge_t, 0);
650 aff_chunk_t *curr_chunk;
653 /* at first we create the affinity edge objects */
654 be_ifg_foreach_node(env->ifg, &nodes_it, n) {
655 int n_idx = get_irn_idx(n);
659 if (arch_irn_is_ignore(n))
662 n1 = get_co_mst_irn(env, n);
663 an = get_affinity_info(env->co, n);
668 if (n1->int_aff_neigh < 0)
669 n1->int_aff_neigh = count_interfering_aff_neighs(env, an);
671 /* build the affinity edges */
672 co_gs_foreach_neighb(an, neigh) {
673 const ir_node *m = neigh->irn;
674 int m_idx = get_irn_idx(m);
676 /* record the edge in only one direction */
681 /* skip ignore nodes */
682 if (arch_irn_is_ignore(m))
688 n2 = get_co_mst_irn(env, m);
689 if (n2->int_aff_neigh < 0) {
690 affinity_node_t *am = get_affinity_info(env->co, m);
691 n2->int_aff_neigh = count_interfering_aff_neighs(env, am);
694 * these weights are pure hackery ;-).
695 * It's not chriswue's fault but mine.
697 edge.weight = neigh->costs;
698 ARR_APP1(aff_edge_t, edges, edge);
704 /* now: sort edges and build the affinity chunks */
705 len = ARR_LEN(edges);
706 qsort(edges, len, sizeof(edges[0]), cmp_aff_edge);
707 for (i = 0; i < len; ++i) {
708 DBG((dbg, LEVEL_1, "edge (%u,%u) %f\n", edges[i].src->node_idx, edges[i].tgt->node_idx, edges[i].weight));
710 (void)aff_chunk_absorb(env, edges[i].src, edges[i].tgt);
713 /* now insert all chunks into a priority queue */
714 list_for_each_entry(aff_chunk_t, curr_chunk, &env->chunklist, list) {
715 aff_chunk_assure_weight(env, curr_chunk);
717 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
718 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
719 DBG((dbg, LEVEL_1, "\n"));
721 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
724 for (pn = 0; pn < ARR_LEN(env->map.data); ++pn) {
725 co_mst_irn_t *mirn = env->map.data[pn];
728 if (mirn->chunk != NULL)
731 /* no chunk is allocated so far, do it now */
732 aff_chunk_t *curr_chunk = new_aff_chunk(env);
733 aff_chunk_add_node(curr_chunk, mirn);
735 aff_chunk_assure_weight(env, curr_chunk);
737 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
738 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
739 DBG((dbg, LEVEL_1, "\n"));
741 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
747 static __attribute__((unused)) void chunk_order_nodes(co_mst_env_t *env, aff_chunk_t *chunk)
749 pqueue_t *grow = new_pqueue();
750 ir_node const *max_node = NULL;
754 for (i = ARR_LEN(chunk->n); i != 0;) {
755 const ir_node *irn = chunk->n[--i];
756 affinity_node_t *an = get_affinity_info(env->co, irn);
760 if (arch_irn_is_ignore(irn))
764 co_gs_foreach_neighb(an, neigh)
767 if (w > max_weight) {
775 bitset_t *visited = bitset_irg_malloc(env->co->irg);
777 for (i = ARR_LEN(chunk->n); i != 0;)
778 bitset_add_irn(visited, chunk->n[--i]);
780 pqueue_put(grow, (void *) max_node, max_weight);
781 bitset_remv_irn(visited, max_node);
783 while (!pqueue_empty(grow)) {
784 ir_node *irn = (ir_node*)pqueue_pop_front(grow);
785 affinity_node_t *an = get_affinity_info(env->co, irn);
788 if (arch_irn_is_ignore(irn))
791 assert(i <= ARR_LEN(chunk->n));
796 /* build the affinity edges */
797 co_gs_foreach_neighb(an, neigh) {
798 co_mst_irn_t *node = get_co_mst_irn(env, neigh->irn);
800 if (bitset_contains_irn(visited, node->irn)) {
801 pqueue_put(grow, (void *) neigh->irn, neigh->costs);
802 bitset_remv_irn(visited, node->irn);
808 bitset_free(visited);
813 * Greedy collect affinity neighbours into thew new chunk @p chunk starting at node @p node.
815 static void expand_chunk_from(co_mst_env_t *env, co_mst_irn_t *node, bitset_t *visited,
816 aff_chunk_t *chunk, aff_chunk_t *orig_chunk, decide_func_t *decider, int col)
818 waitq *nodes = new_waitq();
820 DBG((dbg, LEVEL_1, "\n\tExpanding new chunk (#%u) from %+F, color %d:", chunk->id, node->irn, col));
822 /* init queue and chunk */
823 waitq_put(nodes, node);
824 bitset_set(visited, get_irn_idx(node->irn));
825 aff_chunk_add_node(chunk, node);
826 DB((dbg, LEVEL_1, " %+F", node->irn));
828 /* as long as there are nodes in the queue */
829 while (! waitq_empty(nodes)) {
830 co_mst_irn_t *n = (co_mst_irn_t*)waitq_get(nodes);
831 affinity_node_t *an = get_affinity_info(env->co, n->irn);
833 /* check all affinity neighbors */
836 co_gs_foreach_neighb(an, neigh) {
837 const ir_node *m = neigh->irn;
838 int m_idx = get_irn_idx(m);
841 if (arch_irn_is_ignore(m))
844 n2 = get_co_mst_irn(env, m);
846 if (! bitset_is_set(visited, m_idx) &&
849 ! aff_chunk_interferes(chunk, m) &&
850 node_contains(orig_chunk->n, m))
853 following conditions are met:
854 - neighbour is not visited
855 - neighbour likes the color
856 - neighbour has not yet a fixed color
857 - the new chunk doesn't interfere with the neighbour
858 - neighbour belongs or belonged once to the original chunk
860 bitset_set(visited, m_idx);
861 aff_chunk_add_node(chunk, n2);
862 DB((dbg, LEVEL_1, " %+F", n2->irn));
863 /* enqueue for further search */
864 waitq_put(nodes, n2);
870 DB((dbg, LEVEL_1, "\n"));
876 * Fragment the given chunk into chunks having given color and not having given color.
878 static aff_chunk_t *fragment_chunk(co_mst_env_t *env, int col, aff_chunk_t *c, waitq *tmp)
880 bitset_t *visited = bitset_irg_malloc(env->co->irg);
882 aff_chunk_t *best = NULL;
884 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
887 aff_chunk_t *tmp_chunk;
888 decide_func_t *decider;
892 if (bitset_is_set(visited, get_irn_idx(irn)))
895 node = get_co_mst_irn(env, irn);
897 if (get_mst_irn_col(node) == col) {
898 decider = decider_has_color;
900 DBG((dbg, LEVEL_4, "\tcolor %d wanted\n", col));
903 decider = decider_hasnot_color;
905 DBG((dbg, LEVEL_4, "\tcolor %d forbidden\n", col));
908 /* create a new chunk starting at current node */
909 tmp_chunk = new_aff_chunk(env);
910 waitq_put(tmp, tmp_chunk);
911 expand_chunk_from(env, node, visited, tmp_chunk, c, decider, col);
912 assert(ARR_LEN(tmp_chunk->n) > 0 && "No nodes added to chunk");
914 /* remember the local best */
915 aff_chunk_assure_weight(env, tmp_chunk);
916 if (check_for_best && (! best || best->weight < tmp_chunk->weight))
920 assert(best && "No chunk found?");
921 bitset_free(visited);
926 * Resets the temporary fixed color of all nodes within wait queue @p nodes.
927 * ATTENTION: the queue is empty after calling this function!
929 static inline void reject_coloring(struct list_head *nodes)
931 co_mst_irn_t *n, *temp;
932 DB((dbg, LEVEL_4, "\treject coloring for"));
933 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
934 DB((dbg, LEVEL_4, " %+F", n->irn));
935 assert(n->tmp_col >= 0);
937 list_del_init(&n->list);
939 DB((dbg, LEVEL_4, "\n"));
942 static inline void materialize_coloring(struct list_head *nodes)
944 co_mst_irn_t *n, *temp;
945 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
946 assert(n->tmp_col >= 0);
949 list_del_init(&n->list);
953 static inline void set_temp_color(co_mst_irn_t *node, int col, struct list_head *changed)
956 assert(!node->fixed);
957 assert(node->tmp_col < 0);
958 assert(node->list.next == &node->list && node->list.prev == &node->list);
959 assert(bitset_is_set(node->adm_colors, col));
961 list_add_tail(&node->list, changed);
965 static inline int is_loose(co_mst_irn_t *node)
967 return !node->fixed && node->tmp_col < 0;
971 * Determines the costs for each color if it would be assigned to node @p node.
973 static void determine_color_costs(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs)
975 int *neigh_cols = ALLOCAN(int, env->n_regs);
980 for (i = 0; i < env->n_regs; ++i) {
983 costs[i].cost = bitset_is_set(node->adm_colors, i) ? node->constr_factor : REAL(0.0);
986 for (i = 0; i < node->n_neighs; ++i) {
987 co_mst_irn_t *n = get_co_mst_irn(env, node->int_neighs[i]);
988 int col = get_mst_irn_col(n);
993 costs[col].cost = REAL(0.0);
997 coeff = REAL(1.0) / n_loose;
998 for (i = 0; i < env->n_regs; ++i)
999 costs[i].cost *= REAL(1.0) - coeff * neigh_cols[i];
1003 /* need forward declaration due to recursive call */
1004 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed_ones, int depth, int *max_depth, int *trip);
1007 * Tries to change node to a color but @p explude_col.
1008 * @return 1 if succeeded, 0 otherwise.
1010 static int change_node_color_excluded(co_mst_env_t *env, co_mst_irn_t *node, int exclude_col, struct list_head *changed, int depth, int *max_depth, int *trip)
1012 int col = get_mst_irn_col(node);
1015 /* neighbours has already a different color -> good, temporary fix it */
1016 if (col != exclude_col) {
1018 set_temp_color(node, col, changed);
1022 /* The node has the color it should not have _and_ has not been visited yet. */
1023 if (is_loose(node)) {
1024 col_cost_t *costs = ALLOCAN(col_cost_t, env->n_regs);
1026 /* Get the costs for giving the node a specific color. */
1027 determine_color_costs(env, node, costs);
1029 /* Since the node must not have the not_col, set the costs for that color to "infinity" */
1030 costs[exclude_col].cost = REAL(0.0);
1032 /* sort the colors according costs, cheapest first. */
1033 qsort(costs, env->n_regs, sizeof(costs[0]), cmp_col_cost_gt);
1035 /* Try recoloring the node using the color list. */
1036 res = recolor_nodes(env, node, costs, changed, depth + 1, max_depth, trip);
1043 * Tries to bring node @p node to cheapest color and color all interfering neighbours with other colors.
1044 * ATTENTION: Expect @p costs already sorted by increasing costs.
1045 * @return 1 if coloring could be applied, 0 otherwise.
1047 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed, int depth, int *max_depth, int *trip)
1050 struct list_head local_changed;
1053 if (depth > *max_depth)
1056 DBG((dbg, LEVEL_4, "\tRecoloring %+F with color-costs", node->irn));
1057 DBG_COL_COST(env, LEVEL_4, costs);
1058 DB((dbg, LEVEL_4, "\n"));
1060 if (depth >= recolor_limit) {
1061 DBG((dbg, LEVEL_4, "\tHit recolor limit\n"));
1065 for (i = 0; i < env->n_regs; ++i) {
1066 int tgt_col = costs[i].col;
1070 /* If the costs for that color (and all successive) are infinite, bail out we won't make it anyway. */
1071 if (costs[i].cost == REAL(0.0)) {
1072 DBG((dbg, LEVEL_4, "\tAll further colors forbidden\n"));
1076 /* Set the new color of the node and mark the node as temporarily fixed. */
1077 assert(node->tmp_col < 0 && "Node must not have been temporary fixed.");
1078 INIT_LIST_HEAD(&local_changed);
1079 set_temp_color(node, tgt_col, &local_changed);
1080 DBG((dbg, LEVEL_4, "\tTemporary setting %+F to color %d\n", node->irn, tgt_col));
1082 /* try to color all interfering neighbours with current color forbidden */
1083 for (j = 0; j < node->n_neighs; ++j) {
1087 neigh = node->int_neighs[j];
1089 if (arch_irn_is_ignore(neigh))
1092 nn = get_co_mst_irn(env, neigh);
1093 DB((dbg, LEVEL_4, "\tHandling neighbour %+F, at position %d (fixed: %d, tmp_col: %d, col: %d)\n",
1094 neigh, j, nn->fixed, nn->tmp_col, nn->col));
1097 Try to change the color of the neighbor and record all nodes which
1098 get changed in the tmp list. Add this list to the "changed" list for
1099 that color. If we did not succeed to change the color of the neighbor,
1100 we bail out and try the next color.
1102 if (get_mst_irn_col(nn) == tgt_col) {
1103 /* try to color neighbour with tgt_col forbidden */
1104 neigh_ok = change_node_color_excluded(env, nn, tgt_col, &local_changed, depth + 1, max_depth, trip);
1112 We managed to assign the target color to all neighbors, so from the perspective
1113 of the current node, every thing was ok and we can return safely.
1116 /* append the local_changed ones to global ones */
1117 list_splice(&local_changed, changed);
1121 /* coloring of neighbours failed, so we try next color */
1122 reject_coloring(&local_changed);
1126 DBG((dbg, LEVEL_4, "\tAll colors failed\n"));
1131 * Tries to bring node @p node and all its neighbours to color @p tgt_col.
1132 * @return 1 if color @p col could be applied, 0 otherwise
1134 static int change_node_color(co_mst_env_t *env, co_mst_irn_t *node, int tgt_col, struct list_head *changed)
1136 int col = get_mst_irn_col(node);
1138 /* if node already has the target color -> good, temporary fix it */
1139 if (col == tgt_col) {
1140 DBG((dbg, LEVEL_4, "\t\tCNC: %+F has already color %d, fix temporary\n", node->irn, tgt_col));
1142 set_temp_color(node, tgt_col, changed);
1147 Node has not yet a fixed color and target color is admissible
1148 -> try to recolor node and its affinity neighbours
1150 if (is_loose(node) && bitset_is_set(node->adm_colors, tgt_col)) {
1151 col_cost_t *costs = env->single_cols[tgt_col];
1152 int res, max_depth, trip;
1157 DBG((dbg, LEVEL_4, "\t\tCNC: Attempt to recolor %+F ===>>\n", node->irn));
1158 res = recolor_nodes(env, node, costs, changed, 0, &max_depth, &trip);
1159 DBG((dbg, LEVEL_4, "\t\tCNC: <<=== Recoloring of %+F %s\n", node->irn, res ? "succeeded" : "failed"));
1160 stat_ev_int("heur4_recolor_depth_max", max_depth);
1161 stat_ev_int("heur4_recolor_trip", trip);
1167 #ifdef DEBUG_libfirm
1168 if (firm_dbg_get_mask(dbg) & LEVEL_4) {
1169 if (!is_loose(node))
1170 DB((dbg, LEVEL_4, "\t\tCNC: %+F has already fixed color %d\n", node->irn, col));
1172 DB((dbg, LEVEL_4, "\t\tCNC: color %d not admissible for %+F (", tgt_col, node->irn));
1173 dbg_admissible_colors(env, node);
1174 DB((dbg, LEVEL_4, ")\n"));
1183 * Tries to color an affinity chunk (or at least a part of it).
1184 * Inserts uncolored parts of the chunk as a new chunk into the priority queue.
1186 static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
1188 aff_chunk_t *best_chunk = NULL;
1189 int n_nodes = ARR_LEN(c->n);
1190 int best_color = -1;
1191 int n_int_chunks = 0;
1192 waitq *tmp_chunks = new_waitq();
1193 waitq *best_starts = NULL;
1194 col_cost_t *order = ALLOCANZ(col_cost_t, env->n_regs);
1201 struct list_head changed;
1203 DB((dbg, LEVEL_2, "fragmentizing chunk #%u", c->id));
1204 DBG_AFF_CHUNK(env, LEVEL_2, c);
1205 DB((dbg, LEVEL_2, "\n"));
1207 stat_ev_ctx_push_fmt("heur4_color_chunk", "%u", c->id);
1209 ++env->chunk_visited;
1211 /* compute color preference */
1212 for (pos = 0, len = ARR_LEN(c->interfere); pos < len; ++pos) {
1213 const ir_node *n = c->interfere[pos];
1214 co_mst_irn_t *node = get_co_mst_irn(env, n);
1215 aff_chunk_t *chunk = node->chunk;
1217 if (is_loose(node) && chunk && chunk->visited < env->chunk_visited) {
1218 assert(!chunk->deleted);
1219 chunk->visited = env->chunk_visited;
1222 aff_chunk_assure_weight(env, chunk);
1223 for (i = 0; i < env->n_regs; ++i)
1224 order[i].cost += chunk->color_affinity[i].cost;
1228 for (i = 0; i < env->n_regs; ++i) {
1229 real_t dislike = n_int_chunks > 0 ? REAL(1.0) - order[i].cost / n_int_chunks : REAL(0.0);
1231 order[i].cost = (REAL(1.0) - dislike_influence) * c->color_affinity[i].cost + dislike_influence * dislike;
1234 qsort(order, env->n_regs, sizeof(order[0]), cmp_col_cost_gt);
1236 DBG_COL_COST(env, LEVEL_2, order);
1237 DB((dbg, LEVEL_2, "\n"));
1239 /* check which color is the "best" for the given chunk.
1240 * if we found a color which was ok for all nodes, we take it
1241 * and do not look further. (see did_all flag usage below.)
1242 * If we have many colors which fit all nodes it is hard to decide
1243 * which one to take anyway.
1244 * TODO Sebastian: Perhaps we should at all nodes and figure out
1245 * a suitable color using costs as done above (determine_color_costs).
1247 for (i = 0; i < env->k; ++i) {
1248 int col = order[i].col;
1249 waitq *good_starts = new_waitq();
1250 aff_chunk_t *local_best;
1253 /* skip ignore colors */
1254 if (!bitset_is_set(env->allocatable_regs, col))
1257 DB((dbg, LEVEL_2, "\ttrying color %d\n", col));
1261 /* try to bring all nodes of given chunk to the current color. */
1262 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1263 const ir_node *irn = c->n[idx];
1264 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1267 assert(! node->fixed && "Node must not have a fixed color.");
1268 DB((dbg, LEVEL_4, "\t\tBringing %+F from color %d to color %d ...\n", irn, node->col, col));
1271 The order of the colored nodes is important, so we record the successfully
1272 colored ones in the order they appeared.
1274 INIT_LIST_HEAD(&changed);
1276 good = change_node_color(env, node, col, &changed);
1277 stat_ev_tim_pop("heur4_recolor");
1279 waitq_put(good_starts, node);
1280 materialize_coloring(&changed);
1285 reject_coloring(&changed);
1287 n_succeeded += good;
1288 DB((dbg, LEVEL_4, "\t\t... %+F attempt from %d to %d %s\n", irn, node->col, col, good ? "succeeded" : "failed"));
1291 /* unfix all nodes */
1292 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1293 co_mst_irn_t *node = get_co_mst_irn(env, c->n[idx]);
1297 /* try next color when failed */
1298 if (n_succeeded == 0)
1301 /* fragment the chunk according to the coloring */
1302 local_best = fragment_chunk(env, col, c, tmp_chunks);
1304 /* search the best of the good list
1305 and make it the new best if it is better than the current */
1307 aff_chunk_assure_weight(env, local_best);
1309 DB((dbg, LEVEL_3, "\t\tlocal best chunk (id %u) for color %d: ", local_best->id, col));
1310 DBG_AFF_CHUNK(env, LEVEL_3, local_best);
1312 if (! best_chunk || best_chunk->weight < local_best->weight) {
1313 best_chunk = local_best;
1316 del_waitq(best_starts);
1317 best_starts = good_starts;
1318 DB((dbg, LEVEL_3, "\n\t\t... setting global best chunk (id %u), color %d\n", best_chunk->id, best_color));
1320 DB((dbg, LEVEL_3, "\n\t\t... omitting, global best is better\n"));
1321 del_waitq(good_starts);
1325 del_waitq(good_starts);
1328 /* if all nodes were recolored, bail out */
1329 if (n_succeeded == n_nodes)
1333 stat_ev_int("heur4_colors_tried", i);
1335 /* free all intermediate created chunks except best one */
1336 while (! waitq_empty(tmp_chunks)) {
1337 aff_chunk_t *tmp = (aff_chunk_t*)waitq_get(tmp_chunks);
1338 if (tmp != best_chunk)
1339 delete_aff_chunk(tmp);
1341 del_waitq(tmp_chunks);
1343 /* return if coloring failed */
1346 del_waitq(best_starts);
1350 DB((dbg, LEVEL_2, "\tbest chunk #%u ", best_chunk->id));
1351 DBG_AFF_CHUNK(env, LEVEL_2, best_chunk);
1352 DB((dbg, LEVEL_2, "using color %d\n", best_color));
1354 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1355 const ir_node *irn = best_chunk->n[idx];
1356 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1359 /* bring the node to the color. */
1360 DB((dbg, LEVEL_4, "\tManifesting color %d for %+F, chunk #%u\n", best_color, node->irn, best_chunk->id));
1361 INIT_LIST_HEAD(&changed);
1363 res = change_node_color(env, node, best_color, &changed);
1364 stat_ev_tim_pop("heur4_recolor");
1366 materialize_coloring(&changed);
1369 assert(list_empty(&changed));
1372 /* remove the nodes in best chunk from original chunk */
1373 len = ARR_LEN(best_chunk->n);
1374 for (idx = 0; idx < len; ++idx) {
1375 const ir_node *irn = best_chunk->n[idx];
1376 int pos = nodes_bsearch(c->n, irn);
1381 len = ARR_LEN(c->n);
1382 for (idx = nidx = 0; idx < len; ++idx) {
1383 const ir_node *irn = c->n[idx];
1389 ARR_SHRINKLEN(c->n, nidx);
1392 /* we have to get the nodes back into the original chunk because they are scattered over temporary chunks */
1393 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1394 const ir_node *n = c->n[idx];
1395 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1399 /* fragment the remaining chunk */
1400 visited = bitset_irg_malloc(env->co->irg);
1401 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx)
1402 bitset_set(visited, get_irn_idx(best_chunk->n[idx]));
1404 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1405 const ir_node *irn = c->n[idx];
1406 if (! bitset_is_set(visited, get_irn_idx(irn))) {
1407 aff_chunk_t *new_chunk = new_aff_chunk(env);
1408 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1410 expand_chunk_from(env, node, visited, new_chunk, c, decider_always_yes, 0);
1411 aff_chunk_assure_weight(env, new_chunk);
1412 pqueue_put(env->chunks, new_chunk, new_chunk->weight);
1416 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1417 const ir_node *n = best_chunk->n[idx];
1418 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1422 /* clear obsolete chunks and free some memory */
1423 delete_aff_chunk(best_chunk);
1424 bitset_free(visited);
1426 del_waitq(best_starts);
1428 stat_ev_ctx_pop("heur4_color_chunk");
1432 * Main driver for mst safe coalescing algorithm.
1434 static int co_solve_heuristic_mst(copy_opt_t *co)
1436 unsigned n_regs = co->cls->n_regs;
1437 bitset_t *allocatable_regs = bitset_alloca(n_regs);
1442 co_mst_env_t mst_env;
1449 ir_nodemap_init(&mst_env.map, co->irg);
1450 obstack_init(&mst_env.obst);
1452 be_put_allocatable_regs(co->cenv->irg, co->cls, allocatable_regs);
1453 k = bitset_popcount(allocatable_regs);
1455 mst_env.n_regs = n_regs;
1457 mst_env.chunks = new_pqueue();
1459 mst_env.allocatable_regs = allocatable_regs;
1460 mst_env.ifg = co->cenv->ifg;
1461 INIT_LIST_HEAD(&mst_env.chunklist);
1462 mst_env.chunk_visited = 0;
1463 mst_env.single_cols = OALLOCN(&mst_env.obst, col_cost_t*, n_regs);
1465 for (i = 0; i < n_regs; ++i) {
1466 col_cost_t *vec = OALLOCN(&mst_env.obst, col_cost_t, n_regs);
1468 mst_env.single_cols[i] = vec;
1469 for (j = 0; j < n_regs; ++j) {
1471 vec[j].cost = REAL(0.0);
1475 vec[0].cost = REAL(1.0);
1478 DBG((dbg, LEVEL_1, "==== Coloring %+F, class %s ====\n", co->irg, co->cls->name));
1480 /* build affinity chunks */
1482 build_affinity_chunks(&mst_env);
1483 stat_ev_tim_pop("heur4_initial_chunk");
1485 /* color chunks as long as there are some */
1486 while (! pqueue_empty(mst_env.chunks)) {
1487 aff_chunk_t *chunk = (aff_chunk_t*)pqueue_pop_front(mst_env.chunks);
1489 color_aff_chunk(&mst_env, chunk);
1490 DB((dbg, LEVEL_4, "<<<====== Coloring chunk (%u) done\n", chunk->id));
1491 delete_aff_chunk(chunk);
1494 /* apply coloring */
1495 for (pn = 0; pn < ARR_LEN(mst_env.map.data); ++pn) {
1496 co_mst_irn_t *mirn = mst_env.map.data[pn];
1497 const arch_register_t *reg;
1500 irn = get_idx_irn(co->irg, pn);
1501 if (arch_irn_is_ignore(irn))
1504 /* skip nodes where color hasn't changed */
1505 if (mirn->init_col == mirn->col)
1508 reg = arch_register_for_index(co->cls, mirn->col);
1509 arch_set_irn_register(irn, reg);
1510 DB((dbg, LEVEL_1, "%+F set color from %d to %d\n", irn, mirn->init_col, mirn->col));
1513 /* free allocated memory */
1514 del_pqueue(mst_env.chunks);
1515 obstack_free(&mst_env.obst, NULL);
1516 ir_nodemap_destroy(&mst_env.map);
1518 stat_ev_tim_pop("heur4_total");
1523 static const lc_opt_table_entry_t options[] = {
1524 LC_OPT_ENT_INT ("limit", "limit recoloring", &recolor_limit),
1525 LC_OPT_ENT_DBL ("di", "dislike influence", &dislike_influence),
1529 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur4)
1530 void be_init_copyheur4(void)
1532 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
1533 lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
1534 lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
1535 lc_opt_entry_t *co_grp = lc_opt_get_grp(chordal_grp, "co");
1536 lc_opt_entry_t *heur4_grp = lc_opt_get_grp(co_grp, "heur4");
1538 static co_algo_info copyheur = {
1539 co_solve_heuristic_mst, 0
1542 lc_opt_add_table(heur4_grp, options);
1543 be_register_copyopt("heur4", ©heur);
1545 FIRM_DBG_REGISTER(dbg, "firm.be.co.heur4");