2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Simple copy minimization heuristics.
23 * @author Christian Wuerdig
27 * This is the C implementation of the mst algorithm
28 * originally written in Java by Sebastian Hack.
29 * (also known as "heur3" :)
30 * Performs simple copy minimization.
34 #define DISABLE_STATEV
41 #include "raw_bitset.h"
42 #include "irphase_t.h"
58 #include "becopyopt_t.h"
62 #define COL_COST_INFEASIBLE DBL_MAX
63 #define AFF_NEIGHBOUR_FIX_BENEFIT 128.0
64 #define NEIGHBOUR_CONSTR_COSTS 64.0
69 #define DBG_AFF_CHUNK(env, level, chunk) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_aff_chunk((env), (chunk)); } while(0)
70 #define DBG_COL_COST(env, level, cost) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_col_cost((env), (cost)); } while(0)
72 static firm_dbg_module_t *dbg = NULL;
76 #define DBG_AFF_CHUNK(env, level, chunk)
77 #define DBG_COL_COST(env, level, cost)
82 #define REAL(C) (C ## f)
84 static unsigned last_chunk_id = 0;
85 static int recolor_limit = 7;
86 static real_t dislike_influence = REAL(0.1);
88 typedef struct _col_cost_t {
96 typedef struct _aff_chunk_t {
97 const ir_node **n; /**< An ARR_F containing all nodes of the chunk. */
98 const ir_node **interfere; /**< An ARR_F containing all inference. */
99 int weight; /**< Weight of this chunk */
100 unsigned weight_consistent : 1; /**< Set if the weight is consistent. */
101 unsigned deleted : 1; /**< For debugging: Set if the was deleted. */
102 unsigned id; /**< An id of this chunk. */
104 col_cost_t color_affinity[1];
110 typedef struct _aff_edge_t {
111 const ir_node *src; /**< Source node. */
112 const ir_node *tgt; /**< Target node. */
113 int weight; /**< The weight of this edge. */
116 /* main coalescing environment */
117 typedef struct _co_mst_env_t {
118 int n_regs; /**< number of regs in class */
119 int k; /**< number of non-ignore registers in class */
120 bitset_t *ignore_regs; /**< set containing all global ignore registers */
121 ir_phase ph; /**< phase object holding data for nodes */
122 pqueue_t *chunks; /**< priority queue for chunks */
123 pset *chunkset; /**< set holding all chunks */
124 be_ifg_t *ifg; /**< the interference graph */
125 copy_opt_t *co; /**< the copy opt object */
126 unsigned chunk_visited;
127 col_cost_t **single_cols;
130 /* stores coalescing related information for a node */
131 typedef struct _co_mst_irn_t {
132 const ir_node *irn; /**< the irn this information belongs to */
133 aff_chunk_t *chunk; /**< the chunk this irn belongs to */
134 bitset_t *adm_colors; /**< set of admissible colors for this irn */
135 ir_node **int_neighs; /**< array of all interfering neighbours (cached for speed reasons) */
136 int n_neighs; /**< length of the interfering neighbours array. */
137 int int_aff_neigh; /**< number of interfering affinity neighbours */
138 int col; /**< color currently assigned */
139 int init_col; /**< the initial color */
140 int tmp_col; /**< a temporary assigned color */
141 unsigned fixed : 1; /**< the color is fixed */
142 struct list_head list; /**< Queue for coloring undo. */
143 real_t constr_factor;
146 #define get_co_mst_irn(mst_env, irn) (phase_get_or_set_irn_data(&(mst_env)->ph, (irn)))
148 typedef int decide_func_t(const co_mst_irn_t *node, int col);
153 * Write a chunk to stderr for debugging.
155 static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c) {
158 if (c->weight_consistent)
159 ir_fprintf(stderr, " $%d ", c->weight);
160 ir_fprintf(stderr, "{");
161 for (i = 0, l = ARR_LEN(c->n); i < l; ++i) {
162 const ir_node *n = c->n[i];
163 ir_fprintf(stderr, " %+F,", n);
165 ir_fprintf(stderr, "}");
169 * Dump all admissible colors to stderr.
171 static void dbg_admissible_colors(const co_mst_env_t *env, const co_mst_irn_t *node) {
175 if (bitset_popcnt(node->adm_colors) < 1)
176 fprintf(stderr, "no admissible colors?!?");
178 bitset_foreach(node->adm_colors, idx) {
179 fprintf(stderr, " %d", idx);
185 * Dump color-cost pairs to stderr.
187 static void dbg_col_cost(const co_mst_env_t *env, const col_cost_t *cost) {
189 for (i = 0; i < env->n_regs; ++i)
190 fprintf(stderr, " (%d, %.4f)", cost[i].col, cost[i].cost);
193 #endif /* DEBUG_libfirm */
195 static inline int get_mst_irn_col(const co_mst_irn_t *node) {
196 return node->tmp_col >= 0 ? node->tmp_col : node->col;
200 * @return 1 if node @p node has color @p col, 0 otherwise.
202 static int decider_has_color(const co_mst_irn_t *node, int col) {
203 return get_mst_irn_col(node) == col;
207 * @return 1 if node @p node has not color @p col, 0 otherwise.
209 static int decider_hasnot_color(const co_mst_irn_t *node, int col) {
210 return get_mst_irn_col(node) != col;
214 * Always returns true.
216 static int decider_always_yes(const co_mst_irn_t *node, int col) {
222 /** compares two affinity edges by its weight */
223 static int cmp_aff_edge(const void *a, const void *b) {
224 const aff_edge_t *e1 = a;
225 const aff_edge_t *e2 = b;
227 if (e2->weight == e1->weight) {
228 if (e2->src->node_idx == e1->src->node_idx)
229 return QSORT_CMP(e2->tgt->node_idx, e1->tgt->node_idx);
231 return QSORT_CMP(e2->src->node_idx, e1->src->node_idx);
233 /* sort in descending order */
234 return QSORT_CMP(e2->weight, e1->weight);
237 /** compares to color-cost pairs */
238 static __attribute__((unused)) int cmp_col_cost_lt(const void *a, const void *b) {
239 const col_cost_t *c1 = a;
240 const col_cost_t *c2 = b;
241 real_t diff = c1->cost - c2->cost;
242 return (diff > 0) - (diff < 0);
245 static int cmp_col_cost_gt(const void *a, const void *b) {
246 const col_cost_t *c1 = a;
247 const col_cost_t *c2 = b;
248 real_t diff = c2->cost - c1->cost;
249 return (diff > 0) - (diff < 0);
253 * Creates a new affinity chunk
255 static inline aff_chunk_t *new_aff_chunk(co_mst_env_t *env) {
256 aff_chunk_t *c = XMALLOCF(aff_chunk_t, color_affinity, env->n_regs);
257 c->n = NEW_ARR_F(const ir_node *, 0);
258 c->interfere = NEW_ARR_F(const ir_node *, 0);
260 c->weight_consistent = 0;
262 c->id = ++last_chunk_id;
264 pset_insert(env->chunkset, c, c->id);
269 * Frees all memory allocated by an affinity chunk.
271 static inline void delete_aff_chunk(co_mst_env_t *env, aff_chunk_t *c) {
272 pset_remove(env->chunkset, c, c->id);
273 DEL_ARR_F(c->interfere);
280 * binary search of sorted nodes.
282 * @return the position where n is found in the array arr or ~pos
283 * if the nodes is not here.
285 static inline int nodes_bsearch(const ir_node **arr, const ir_node *n) {
286 int hi = ARR_LEN(arr);
290 int md = lo + ((hi - lo) >> 1);
303 /** Check if a node n can be found inside arr. */
304 static int node_contains(const ir_node **arr, const ir_node *n) {
305 int i = nodes_bsearch(arr, n);
310 * Insert a node into the sorted nodes list.
312 * @return 1 if the node was inserted, 0 else
314 static int nodes_insert(const ir_node ***arr, const ir_node *irn) {
315 int idx = nodes_bsearch(*arr, irn);
318 int i, n = ARR_LEN(*arr);
321 ARR_APP1(const ir_node *, *arr, irn);
326 for (i = n - 1; i >= idx; --i)
335 * Adds a node to an affinity chunk
337 static inline void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node) {
340 if (! nodes_insert(&c->n, node->irn))
343 c->weight_consistent = 0;
346 for (i = node->n_neighs - 1; i >= 0; --i) {
347 ir_node *neigh = node->int_neighs[i];
348 nodes_insert(&c->interfere, neigh);
353 * In case there is no phase information for irn, initialize it.
355 static void *co_mst_irn_init(ir_phase *ph, const ir_node *irn, void *old) {
356 co_mst_irn_t *res = old ? old : phase_alloc(ph, sizeof(res[0]));
357 co_mst_env_t *env = ph->priv;
360 const arch_register_req_t *req;
361 void *nodes_it = be_ifg_nodes_iter_alloca(env->ifg);
369 res->int_neighs = NULL;
370 res->int_aff_neigh = 0;
371 res->col = arch_register_get_index(arch_get_irn_register(irn));
372 res->init_col = res->col;
373 INIT_LIST_HEAD(&res->list);
375 DB((dbg, LEVEL_4, "Creating phase info for %+F\n", irn));
377 /* set admissible registers */
378 res->adm_colors = bitset_obstack_alloc(phase_obst(ph), env->n_regs);
380 /* Exclude colors not assignable to the irn */
381 req = arch_get_register_req_out(irn);
382 if (arch_register_req_is(req, limited))
383 rbitset_copy_to_bitset(req->limited, res->adm_colors);
385 bitset_set_all(res->adm_colors);
387 /* exclude global ignore registers as well */
388 bitset_andnot(res->adm_colors, env->ignore_regs);
390 /* compute the constraint factor */
391 res->constr_factor = (real_t) (1 + env->n_regs - bitset_popcnt(res->adm_colors)) / env->n_regs;
393 /* set the number of interfering affinity neighbours to -1, they are calculated later */
394 res->int_aff_neigh = -1;
396 /* build list of interfering neighbours */
398 be_ifg_foreach_neighbour(env->ifg, nodes_it, irn, neigh) {
399 if (!arch_irn_is_ignore(neigh)) {
400 obstack_ptr_grow(phase_obst(ph), neigh);
404 res->int_neighs = obstack_finish(phase_obst(ph));
411 * Check if affinity chunk @p chunk interferes with node @p irn.
413 static inline int aff_chunk_interferes(const aff_chunk_t *chunk, const ir_node *irn) {
414 return node_contains(chunk->interfere, irn);
418 * Check if there are interference edges from c1 to c2.
420 * @param c2 Another chunk
421 * @return 1 if there are interferences between nodes of c1 and c2, 0 otherwise.
423 static inline int aff_chunks_interfere(const aff_chunk_t *c1, const aff_chunk_t *c2) {
429 /* check if there is a node in c2 having an interfering neighbor in c1 */
430 for (i = ARR_LEN(c2->n) - 1; i >= 0; --i) {
431 const ir_node *irn = c2->n[i];
433 if (node_contains(c1->interfere, irn))
440 * Returns the affinity chunk of @p irn or creates a new
441 * one with @p irn as element if there is none assigned.
443 static inline aff_chunk_t *get_aff_chunk(co_mst_env_t *env, const ir_node *irn) {
444 co_mst_irn_t *node = get_co_mst_irn(env, irn);
449 * Let chunk(src) absorb the nodes of chunk(tgt) (only possible when there
450 * are no interference edges from chunk(src) to chunk(tgt)).
451 * @return 1 if successful, 0 if not possible
453 static int aff_chunk_absorb(co_mst_env_t *env, const ir_node *src, const ir_node *tgt) {
454 aff_chunk_t *c1 = get_aff_chunk(env, src);
455 aff_chunk_t *c2 = get_aff_chunk(env, tgt);
458 DB((dbg, LEVEL_4, "Attempt to let c1 (id %u): ", c1 ? c1->id : 0));
460 DBG_AFF_CHUNK(env, LEVEL_4, c1);
462 DB((dbg, LEVEL_4, "{%+F}", src));
464 DB((dbg, LEVEL_4, "\n\tabsorb c2 (id %u): ", c2 ? c2->id : 0));
466 DBG_AFF_CHUNK(env, LEVEL_4, c2);
468 DB((dbg, LEVEL_4, "{%+F}", tgt));
470 DB((dbg, LEVEL_4, "\n"));
475 /* no chunk exists */
476 co_mst_irn_t *mirn = get_co_mst_irn(env, src);
479 for (i = mirn->n_neighs - 1; i >= 0; --i) {
480 if (mirn->int_neighs[i] == tgt)
484 /* create one containing both nodes */
485 c1 = new_aff_chunk(env);
486 aff_chunk_add_node(c1, get_co_mst_irn(env, src));
487 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
491 /* c2 already exists */
492 if (! aff_chunk_interferes(c2, src)) {
493 aff_chunk_add_node(c2, get_co_mst_irn(env, src));
497 } else if (c2 == NULL) {
498 /* c1 already exists */
499 if (! aff_chunk_interferes(c1, tgt)) {
500 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
503 } else if (c1 != c2 && ! aff_chunks_interfere(c1, c2)) {
506 for (idx = 0, len = ARR_LEN(c2->n); idx < len; ++idx)
507 aff_chunk_add_node(c1, get_co_mst_irn(env, c2->n[idx]));
509 for (idx = 0, len = ARR_LEN(c2->interfere); idx < len; ++idx) {
510 const ir_node *irn = c2->interfere[idx];
511 nodes_insert(&c1->interfere, irn);
514 c1->weight_consistent = 0;
516 delete_aff_chunk(env, c2);
519 DB((dbg, LEVEL_4, " ... c1 interferes with c2, skipped\n"));
523 DB((dbg, LEVEL_4, " ... absorbed\n"));
528 * Assures that the weight of the given chunk is consistent.
530 static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c) {
531 if (! c->weight_consistent) {
535 for (i = 0; i < env->n_regs; ++i) {
536 c->color_affinity[i].col = i;
537 c->color_affinity[i].cost = REAL(0.0);
540 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
541 const ir_node *n = c->n[idx];
542 const affinity_node_t *an = get_affinity_info(env->co, n);
543 co_mst_irn_t *node = get_co_mst_irn(env, n);
546 if (node->constr_factor > REAL(0.0)) {
548 bitset_foreach (node->adm_colors, col)
549 c->color_affinity[col].cost += node->constr_factor;
554 co_gs_foreach_neighb(an, neigh) {
555 const ir_node *m = neigh->irn;
557 if (arch_irn_is_ignore(m))
560 w += node_contains(c->n, m) ? neigh->costs : 0;
565 for (i = 0; i < env->n_regs; ++i)
566 c->color_affinity[i].cost *= (REAL(1.0) / ARR_LEN(c->n));
569 // c->weight = bitset_popcnt(c->nodes);
570 c->weight_consistent = 1;
575 * Count the number of interfering affinity neighbours
577 static int count_interfering_aff_neighs(co_mst_env_t *env, const affinity_node_t *an) {
578 const neighb_t *neigh;
579 const ir_node *irn = an->irn;
580 const co_mst_irn_t *node = get_co_mst_irn(env, irn);
583 co_gs_foreach_neighb(an, neigh) {
584 const ir_node *n = neigh->irn;
587 if (arch_irn_is_ignore(n))
590 /* check if the affinity neighbour interfere */
591 for (i = 0; i < node->n_neighs; ++i) {
592 if (node->int_neighs[i] == n) {
603 * Build chunks of nodes connected by affinity edges.
604 * We start at the heaviest affinity edge.
605 * The chunks of the two edge-defining nodes will be
606 * merged if there are no interference edges from one
607 * chunk to the other.
609 static void build_affinity_chunks(co_mst_env_t *env) {
610 void *nodes_it = be_ifg_nodes_iter_alloca(env->ifg);
611 aff_edge_t *edges = NEW_ARR_F(aff_edge_t, 0);
614 aff_chunk_t *curr_chunk;
616 /* at first we create the affinity edge objects */
617 be_ifg_foreach_node(env->ifg, nodes_it, n) {
618 int n_idx = get_irn_idx(n);
622 if (arch_irn_is_ignore(n))
625 n1 = get_co_mst_irn(env, n);
626 an = get_affinity_info(env->co, n);
631 if (n1->int_aff_neigh < 0)
632 n1->int_aff_neigh = count_interfering_aff_neighs(env, an);
634 /* build the affinity edges */
635 co_gs_foreach_neighb(an, neigh) {
636 const ir_node *m = neigh->irn;
637 int m_idx = get_irn_idx(m);
639 /* record the edge in only one direction */
644 /* skip ignore nodes */
645 if (arch_irn_is_ignore(m))
651 n2 = get_co_mst_irn(env, m);
652 if (n2->int_aff_neigh < 0) {
653 affinity_node_t *am = get_affinity_info(env->co, m);
654 n2->int_aff_neigh = count_interfering_aff_neighs(env, am);
657 * these weights are pure hackery ;-).
658 * It's not chriswue's fault but mine.
660 edge.weight = neigh->costs;
661 ARR_APP1(aff_edge_t, edges, edge);
667 /* now: sort edges and build the affinity chunks */
668 len = ARR_LEN(edges);
669 qsort(edges, len, sizeof(edges[0]), cmp_aff_edge);
670 for (i = 0; i < len; ++i) {
671 DBG((dbg, LEVEL_1, "edge (%u,%u) %f\n", edges[i].src->node_idx, edges[i].tgt->node_idx, edges[i].weight));
673 (void)aff_chunk_absorb(env, edges[i].src, edges[i].tgt);
676 /* now insert all chunks into a priority queue */
677 foreach_pset(env->chunkset, curr_chunk) {
678 aff_chunk_assure_weight(env, curr_chunk);
680 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
681 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
682 DBG((dbg, LEVEL_1, "\n"));
684 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
687 foreach_phase_irn(&env->ph, n) {
688 co_mst_irn_t *mirn = get_co_mst_irn(env, n);
690 if (mirn->chunk == NULL) {
691 /* no chunk is allocated so far, do it now */
692 aff_chunk_t *curr_chunk = new_aff_chunk(env);
693 aff_chunk_add_node(curr_chunk, mirn);
695 aff_chunk_assure_weight(env, curr_chunk);
697 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
698 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
699 DBG((dbg, LEVEL_1, "\n"));
701 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
708 static __attribute__((unused)) void chunk_order_nodes(co_mst_env_t *env, aff_chunk_t *chunk)
710 pqueue_t *grow = new_pqueue();
711 const ir_node *max_node = NULL;
715 for (i = ARR_LEN(chunk->n) - 1; i >= 0; i--) {
716 const ir_node *irn = chunk->n[i];
717 affinity_node_t *an = get_affinity_info(env->co, irn);
721 if (arch_irn_is_ignore(irn))
725 co_gs_foreach_neighb(an, neigh)
728 if (w > max_weight) {
736 bitset_t *visited = bitset_irg_malloc(env->co->irg);
738 for (i = ARR_LEN(chunk->n) - 1; i >= 0; --i)
739 bitset_add_irn(visited, chunk->n[i]);
741 pqueue_put(grow, (void *) max_node, max_weight);
742 bitset_remv_irn(visited, max_node);
744 while (!pqueue_empty(grow)) {
745 ir_node *irn = pqueue_pop_front(grow);
746 affinity_node_t *an = get_affinity_info(env->co, irn);
749 if (arch_irn_is_ignore(irn))
752 assert(i <= ARR_LEN(chunk->n));
757 /* build the affinity edges */
758 co_gs_foreach_neighb(an, neigh) {
759 co_mst_irn_t *node = get_co_mst_irn(env, neigh->irn);
761 if (bitset_contains_irn(visited, node->irn)) {
762 pqueue_put(grow, (void *) neigh->irn, neigh->costs);
763 bitset_remv_irn(visited, node->irn);
769 bitset_free(visited);
774 * Greedy collect affinity neighbours into thew new chunk @p chunk starting at node @p node.
776 static void expand_chunk_from(co_mst_env_t *env, co_mst_irn_t *node, bitset_t *visited,
777 aff_chunk_t *chunk, aff_chunk_t *orig_chunk, decide_func_t *decider, int col)
779 waitq *nodes = new_waitq();
781 DBG((dbg, LEVEL_1, "\n\tExpanding new chunk (#%u) from %+F, color %d:", chunk->id, node->irn, col));
783 /* init queue and chunk */
784 waitq_put(nodes, node);
785 bitset_set(visited, get_irn_idx(node->irn));
786 aff_chunk_add_node(chunk, node);
787 DB((dbg, LEVEL_1, " %+F", node->irn));
789 /* as long as there are nodes in the queue */
790 while (! waitq_empty(nodes)) {
791 co_mst_irn_t *n = waitq_get(nodes);
792 affinity_node_t *an = get_affinity_info(env->co, n->irn);
794 /* check all affinity neighbors */
797 co_gs_foreach_neighb(an, neigh) {
798 const ir_node *m = neigh->irn;
799 int m_idx = get_irn_idx(m);
802 if (arch_irn_is_ignore(m))
805 n2 = get_co_mst_irn(env, m);
807 if (! bitset_is_set(visited, m_idx) &&
810 ! aff_chunk_interferes(chunk, m) &&
811 node_contains(orig_chunk->n, m))
814 following conditions are met:
815 - neighbour is not visited
816 - neighbour likes the color
817 - neighbour has not yet a fixed color
818 - the new chunk doesn't interfere with the neighbour
819 - neighbour belongs or belonged once to the original chunk
821 bitset_set(visited, m_idx);
822 aff_chunk_add_node(chunk, n2);
823 DB((dbg, LEVEL_1, " %+F", n2->irn));
824 /* enqueue for further search */
825 waitq_put(nodes, n2);
831 DB((dbg, LEVEL_1, "\n"));
837 * Fragment the given chunk into chunks having given color and not having given color.
839 static aff_chunk_t *fragment_chunk(co_mst_env_t *env, int col, aff_chunk_t *c, waitq *tmp) {
840 bitset_t *visited = bitset_irg_malloc(env->co->irg);
842 aff_chunk_t *best = NULL;
844 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
847 aff_chunk_t *tmp_chunk;
848 decide_func_t *decider;
852 if (bitset_is_set(visited, get_irn_idx(irn)))
855 node = get_co_mst_irn(env, irn);
857 if (get_mst_irn_col(node) == col) {
858 decider = decider_has_color;
860 DBG((dbg, LEVEL_4, "\tcolor %d wanted\n", col));
863 decider = decider_hasnot_color;
865 DBG((dbg, LEVEL_4, "\tcolor %d forbidden\n", col));
868 /* create a new chunk starting at current node */
869 tmp_chunk = new_aff_chunk(env);
870 waitq_put(tmp, tmp_chunk);
871 expand_chunk_from(env, node, visited, tmp_chunk, c, decider, col);
872 assert(ARR_LEN(tmp_chunk->n) > 0 && "No nodes added to chunk");
874 /* remember the local best */
875 aff_chunk_assure_weight(env, tmp_chunk);
876 if (check_for_best && (! best || best->weight < tmp_chunk->weight))
880 assert(best && "No chunk found?");
881 bitset_free(visited);
886 * Resets the temporary fixed color of all nodes within wait queue @p nodes.
887 * ATTENTION: the queue is empty after calling this function!
889 static inline void reject_coloring(struct list_head *nodes) {
890 co_mst_irn_t *n, *temp;
891 DB((dbg, LEVEL_4, "\treject coloring for"));
892 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
893 DB((dbg, LEVEL_4, " %+F", n->irn));
894 assert(n->tmp_col >= 0);
896 list_del_init(&n->list);
898 DB((dbg, LEVEL_4, "\n"));
901 static inline void materialize_coloring(struct list_head *nodes) {
902 co_mst_irn_t *n, *temp;
903 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
904 assert(n->tmp_col >= 0);
907 list_del_init(&n->list);
911 static inline void set_temp_color(co_mst_irn_t *node, int col, struct list_head *changed)
914 assert(!node->fixed);
915 assert(node->tmp_col < 0);
916 assert(node->list.next == &node->list && node->list.prev == &node->list);
917 assert(bitset_is_set(node->adm_colors, col));
919 list_add_tail(&node->list, changed);
923 static inline int is_loose(co_mst_irn_t *node)
925 return !node->fixed && node->tmp_col < 0;
929 * Determines the costs for each color if it would be assigned to node @p node.
931 static void determine_color_costs(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs)
933 int *neigh_cols = ALLOCAN(int, env->n_regs);
938 for (i = 0; i < env->n_regs; ++i) {
941 costs[i].cost = bitset_is_set(node->adm_colors, i) ? node->constr_factor : REAL(0.0);
944 for (i = 0; i < node->n_neighs; ++i) {
945 co_mst_irn_t *n = get_co_mst_irn(env, node->int_neighs[i]);
946 int col = get_mst_irn_col(n);
951 costs[col].cost = REAL(0.0);
955 coeff = REAL(1.0) / n_loose;
956 for (i = 0; i < env->n_regs; ++i)
957 costs[i].cost *= REAL(1.0) - coeff * neigh_cols[i];
961 /* need forward declaration due to recursive call */
962 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed_ones, int depth, int *max_depth, int *trip);
965 * Tries to change node to a color but @p explude_col.
966 * @return 1 if succeeded, 0 otherwise.
968 static int change_node_color_excluded(co_mst_env_t *env, co_mst_irn_t *node, int exclude_col, struct list_head *changed, int depth, int *max_depth, int *trip) {
969 int col = get_mst_irn_col(node);
972 /* neighbours has already a different color -> good, temporary fix it */
973 if (col != exclude_col) {
975 set_temp_color(node, col, changed);
979 /* The node has the color it should not have _and_ has not been visited yet. */
980 if (is_loose(node)) {
981 col_cost_t *costs = ALLOCAN(col_cost_t, env->n_regs);
983 /* Get the costs for giving the node a specific color. */
984 determine_color_costs(env, node, costs);
986 /* Since the node must not have the not_col, set the costs for that color to "infinity" */
987 costs[exclude_col].cost = REAL(0.0);
989 /* sort the colors according costs, cheapest first. */
990 qsort(costs, env->n_regs, sizeof(costs[0]), cmp_col_cost_gt);
992 /* Try recoloring the node using the color list. */
993 res = recolor_nodes(env, node, costs, changed, depth + 1, max_depth, trip);
1000 * Tries to bring node @p node to cheapest color and color all interfering neighbours with other colors.
1001 * ATTENTION: Expect @p costs already sorted by increasing costs.
1002 * @return 1 if coloring could be applied, 0 otherwise.
1004 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed, int depth, int *max_depth, int *trip) {
1006 struct list_head local_changed;
1009 if (depth > *max_depth)
1012 DBG((dbg, LEVEL_4, "\tRecoloring %+F with color-costs", node->irn));
1013 DBG_COL_COST(env, LEVEL_4, costs);
1014 DB((dbg, LEVEL_4, "\n"));
1016 if (depth >= recolor_limit) {
1017 DBG((dbg, LEVEL_4, "\tHit recolor limit\n"));
1021 for (i = 0; i < env->n_regs; ++i) {
1022 int tgt_col = costs[i].col;
1026 /* If the costs for that color (and all successive) are infinite, bail out we won't make it anyway. */
1027 if (costs[i].cost == REAL(0.0)) {
1028 DBG((dbg, LEVEL_4, "\tAll further colors forbidden\n"));
1032 /* Set the new color of the node and mark the node as temporarily fixed. */
1033 assert(node->tmp_col < 0 && "Node must not have been temporary fixed.");
1034 INIT_LIST_HEAD(&local_changed);
1035 set_temp_color(node, tgt_col, &local_changed);
1036 DBG((dbg, LEVEL_4, "\tTemporary setting %+F to color %d\n", node->irn, tgt_col));
1038 /* try to color all interfering neighbours with current color forbidden */
1039 for (j = 0; j < node->n_neighs; ++j) {
1043 neigh = node->int_neighs[j];
1045 if (arch_irn_is_ignore(neigh))
1048 nn = get_co_mst_irn(env, neigh);
1049 DB((dbg, LEVEL_4, "\tHandling neighbour %+F, at position %d (fixed: %d, tmp_col: %d, col: %d)\n",
1050 neigh, j, nn->fixed, nn->tmp_col, nn->col));
1053 Try to change the color of the neighbor and record all nodes which
1054 get changed in the tmp list. Add this list to the "changed" list for
1055 that color. If we did not succeed to change the color of the neighbor,
1056 we bail out and try the next color.
1058 if (get_mst_irn_col(nn) == tgt_col) {
1059 /* try to color neighbour with tgt_col forbidden */
1060 neigh_ok = change_node_color_excluded(env, nn, tgt_col, &local_changed, depth + 1, max_depth, trip);
1068 We managed to assign the target color to all neighbors, so from the perspective
1069 of the current node, every thing was ok and we can return safely.
1072 /* append the local_changed ones to global ones */
1073 list_splice(&local_changed, changed);
1077 /* coloring of neighbours failed, so we try next color */
1078 reject_coloring(&local_changed);
1082 DBG((dbg, LEVEL_4, "\tAll colors failed\n"));
1087 * Tries to bring node @p node and all it's neighbours to color @p tgt_col.
1088 * @return 1 if color @p col could be applied, 0 otherwise
1090 static int change_node_color(co_mst_env_t *env, co_mst_irn_t *node, int tgt_col, struct list_head *changed) {
1091 int col = get_mst_irn_col(node);
1093 /* if node already has the target color -> good, temporary fix it */
1094 if (col == tgt_col) {
1095 DBG((dbg, LEVEL_4, "\t\tCNC: %+F has already color %d, fix temporary\n", node->irn, tgt_col));
1097 set_temp_color(node, tgt_col, changed);
1102 Node has not yet a fixed color and target color is admissible
1103 -> try to recolor node and it's affinity neighbours
1105 if (is_loose(node) && bitset_is_set(node->adm_colors, tgt_col)) {
1106 col_cost_t *costs = env->single_cols[tgt_col];
1107 int res, max_depth, trip;
1112 DBG((dbg, LEVEL_4, "\t\tCNC: Attempt to recolor %+F ===>>\n", node->irn));
1113 res = recolor_nodes(env, node, costs, changed, 0, &max_depth, &trip);
1114 DBG((dbg, LEVEL_4, "\t\tCNC: <<=== Recoloring of %+F %s\n", node->irn, res ? "succeeded" : "failed"));
1115 stat_ev_int("heur4_recolor_depth_max", max_depth);
1116 stat_ev_int("heur4_recolor_trip", trip);
1122 #ifdef DEBUG_libfirm
1123 if (firm_dbg_get_mask(dbg) & LEVEL_4) {
1124 if (!is_loose(node))
1125 DB((dbg, LEVEL_4, "\t\tCNC: %+F has already fixed color %d\n", node->irn, col));
1127 DB((dbg, LEVEL_4, "\t\tCNC: color %d not admissible for %+F (", tgt_col, node->irn));
1128 dbg_admissible_colors(env, node);
1129 DB((dbg, LEVEL_4, ")\n"));
1138 * Tries to color an affinity chunk (or at least a part of it).
1139 * Inserts uncolored parts of the chunk as a new chunk into the priority queue.
1141 static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c) {
1142 aff_chunk_t *best_chunk = NULL;
1143 int n_nodes = ARR_LEN(c->n);
1144 int best_color = -1;
1145 int n_int_chunks = 0;
1146 waitq *tmp_chunks = new_waitq();
1147 waitq *best_starts = NULL;
1148 col_cost_t *order = ALLOCANZ(col_cost_t, env->n_regs);
1150 int idx, len, i, nidx, pos;
1151 struct list_head changed;
1153 DB((dbg, LEVEL_2, "fragmentizing chunk #%u", c->id));
1154 DBG_AFF_CHUNK(env, LEVEL_2, c);
1155 DB((dbg, LEVEL_2, "\n"));
1157 stat_ev_ctx_push_fmt("heur4_color_chunk", "%u", c->id);
1159 ++env->chunk_visited;
1161 /* compute color preference */
1162 for (pos = 0, len = ARR_LEN(c->interfere); pos < len; ++pos) {
1163 const ir_node *n = c->interfere[pos];
1164 co_mst_irn_t *node = get_co_mst_irn(env, n);
1165 aff_chunk_t *chunk = node->chunk;
1167 if (is_loose(node) && chunk && chunk->visited < env->chunk_visited) {
1168 assert(!chunk->deleted);
1169 chunk->visited = env->chunk_visited;
1172 aff_chunk_assure_weight(env, chunk);
1173 for (i = 0; i < env->n_regs; ++i)
1174 order[i].cost += chunk->color_affinity[i].cost;
1178 for (i = 0; i < env->n_regs; ++i) {
1179 real_t dislike = n_int_chunks > 0 ? REAL(1.0) - order[i].cost / n_int_chunks : REAL(0.0);
1181 order[i].cost = (REAL(1.0) - dislike_influence) * c->color_affinity[i].cost + dislike_influence * dislike;
1184 qsort(order, env->n_regs, sizeof(order[0]), cmp_col_cost_gt);
1186 DBG_COL_COST(env, LEVEL_2, order);
1187 DB((dbg, LEVEL_2, "\n"));
1189 /* check which color is the "best" for the given chunk.
1190 * if we found a color which was ok for all nodes, we take it
1191 * and do not look further. (see did_all flag usage below.)
1192 * If we have many colors which fit all nodes it is hard to decide
1193 * which one to take anyway.
1194 * TODO Sebastian: Perhaps we should at all nodes and figure out
1195 * a suitable color using costs as done above (determine_color_costs).
1197 for (i = 0; i < env->k; ++i) {
1198 int col = order[i].col;
1199 waitq *good_starts = new_waitq();
1200 aff_chunk_t *local_best;
1203 /* skip ignore colors */
1204 if (bitset_is_set(env->ignore_regs, col))
1207 DB((dbg, LEVEL_2, "\ttrying color %d\n", col));
1211 /* try to bring all nodes of given chunk to the current color. */
1212 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1213 const ir_node *irn = c->n[idx];
1214 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1217 assert(! node->fixed && "Node must not have a fixed color.");
1218 DB((dbg, LEVEL_4, "\t\tBringing %+F from color %d to color %d ...\n", irn, node->col, col));
1221 The order of the colored nodes is important, so we record the successfully
1222 colored ones in the order they appeared.
1224 INIT_LIST_HEAD(&changed);
1226 good = change_node_color(env, node, col, &changed);
1227 stat_ev_tim_pop("heur4_recolor");
1229 waitq_put(good_starts, node);
1230 materialize_coloring(&changed);
1235 reject_coloring(&changed);
1237 n_succeeded += good;
1238 DB((dbg, LEVEL_4, "\t\t... %+F attempt from %d to %d %s\n", irn, node->col, col, good ? "succeeded" : "failed"));
1241 /* unfix all nodes */
1242 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1243 co_mst_irn_t *node = get_co_mst_irn(env, c->n[idx]);
1247 /* try next color when failed */
1248 if (n_succeeded == 0)
1251 /* fragment the chunk according to the coloring */
1252 local_best = fragment_chunk(env, col, c, tmp_chunks);
1254 /* search the best of the good list
1255 and make it the new best if it is better than the current */
1257 aff_chunk_assure_weight(env, local_best);
1259 DB((dbg, LEVEL_3, "\t\tlocal best chunk (id %u) for color %d: ", local_best->id, col));
1260 DBG_AFF_CHUNK(env, LEVEL_3, local_best);
1262 if (! best_chunk || best_chunk->weight < local_best->weight) {
1263 best_chunk = local_best;
1266 del_waitq(best_starts);
1267 best_starts = good_starts;
1268 DB((dbg, LEVEL_3, "\n\t\t... setting global best chunk (id %u), color %d\n", best_chunk->id, best_color));
1270 DB((dbg, LEVEL_3, "\n\t\t... omitting, global best is better\n"));
1271 del_waitq(good_starts);
1275 del_waitq(good_starts);
1278 /* if all nodes were recolored, bail out */
1279 if (n_succeeded == n_nodes)
1283 stat_ev_int("heur4_colors_tried", i);
1285 /* free all intermediate created chunks except best one */
1286 while (! waitq_empty(tmp_chunks)) {
1287 aff_chunk_t *tmp = waitq_get(tmp_chunks);
1288 if (tmp != best_chunk)
1289 delete_aff_chunk(env, tmp);
1291 del_waitq(tmp_chunks);
1293 /* return if coloring failed */
1296 del_waitq(best_starts);
1300 DB((dbg, LEVEL_2, "\tbest chunk #%u ", best_chunk->id));
1301 DBG_AFF_CHUNK(env, LEVEL_2, best_chunk);
1302 DB((dbg, LEVEL_2, "using color %d\n", best_color));
1304 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1305 const ir_node *irn = best_chunk->n[idx];
1306 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1309 /* bring the node to the color. */
1310 DB((dbg, LEVEL_4, "\tManifesting color %d for %+F, chunk #%u\n", best_color, node->irn, best_chunk->id));
1311 INIT_LIST_HEAD(&changed);
1313 res = change_node_color(env, node, best_color, &changed);
1314 stat_ev_tim_pop("heur4_recolor");
1316 materialize_coloring(&changed);
1319 assert(list_empty(&changed));
1322 /* remove the nodes in best chunk from original chunk */
1323 len = ARR_LEN(best_chunk->n);
1324 for (idx = 0; idx < len; ++idx) {
1325 const ir_node *irn = best_chunk->n[idx];
1326 int pos = nodes_bsearch(c->n, irn);
1331 len = ARR_LEN(c->n);
1332 for (idx = nidx = 0; idx < len; ++idx) {
1333 const ir_node *irn = c->n[idx];
1339 ARR_SHRINKLEN(c->n, nidx);
1342 /* we have to get the nodes back into the original chunk because they are scattered over temporary chunks */
1343 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1344 const ir_node *n = c->n[idx];
1345 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1349 /* fragment the remaining chunk */
1350 visited = bitset_irg_malloc(env->co->irg);
1351 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx)
1352 bitset_set(visited, get_irn_idx(best_chunk->n[idx]));
1354 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1355 const ir_node *irn = c->n[idx];
1356 if (! bitset_is_set(visited, get_irn_idx(irn))) {
1357 aff_chunk_t *new_chunk = new_aff_chunk(env);
1358 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1360 expand_chunk_from(env, node, visited, new_chunk, c, decider_always_yes, 0);
1361 aff_chunk_assure_weight(env, new_chunk);
1362 pqueue_put(env->chunks, new_chunk, new_chunk->weight);
1366 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1367 const ir_node *n = best_chunk->n[idx];
1368 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1372 /* clear obsolete chunks and free some memory */
1373 delete_aff_chunk(env, best_chunk);
1374 bitset_free(visited);
1376 del_waitq(best_starts);
1378 stat_ev_ctx_pop("heur4_color_chunk");
1382 * Main driver for mst safe coalescing algorithm.
1384 int co_solve_heuristic_mst(copy_opt_t *co) {
1385 unsigned n_regs = co->cls->n_regs;
1386 bitset_t *ignore_regs = bitset_alloca(n_regs);
1389 co_mst_env_t mst_env;
1396 phase_init(&mst_env.ph, "co_mst", co->irg, PHASE_DEFAULT_GROWTH, co_mst_irn_init, &mst_env);
1398 k = be_put_ignore_regs(co->cenv->birg, co->cls, ignore_regs);
1401 mst_env.n_regs = n_regs;
1403 mst_env.chunks = new_pqueue();
1405 mst_env.ignore_regs = ignore_regs;
1406 mst_env.ifg = co->cenv->ifg;
1407 mst_env.chunkset = pset_new_ptr(512);
1408 mst_env.chunk_visited = 0;
1409 mst_env.single_cols = phase_alloc(&mst_env.ph, sizeof(*mst_env.single_cols) * n_regs);
1411 for (i = 0; i < n_regs; ++i) {
1412 col_cost_t *vec = phase_alloc(&mst_env.ph, sizeof(*vec) * n_regs);
1414 mst_env.single_cols[i] = vec;
1415 for (j = 0; j < n_regs; ++j) {
1417 vec[j].cost = REAL(0.0);
1421 vec[0].cost = REAL(1.0);
1424 DBG((dbg, LEVEL_1, "==== Coloring %+F, class %s ====\n", co->irg, co->cls->name));
1426 /* build affinity chunks */
1428 build_affinity_chunks(&mst_env);
1429 stat_ev_tim_pop("heur4_initial_chunk");
1431 /* color chunks as long as there are some */
1432 while (! pqueue_empty(mst_env.chunks)) {
1433 aff_chunk_t *chunk = pqueue_pop_front(mst_env.chunks);
1435 color_aff_chunk(&mst_env, chunk);
1436 DB((dbg, LEVEL_4, "<<<====== Coloring chunk (%u) done\n", chunk->id));
1437 delete_aff_chunk(&mst_env, chunk);
1440 /* apply coloring */
1441 foreach_phase_irn(&mst_env.ph, irn) {
1443 const arch_register_t *reg;
1445 if (arch_irn_is_ignore(irn))
1448 mirn = get_co_mst_irn(&mst_env, irn);
1449 // assert(mirn->fixed && "Node should have fixed color");
1451 /* skip nodes where color hasn't changed */
1452 if (mirn->init_col == mirn->col)
1455 reg = arch_register_for_index(co->cls, mirn->col);
1456 arch_set_irn_register(irn, reg);
1457 DB((dbg, LEVEL_1, "%+F set color from %d to %d\n", irn, mirn->init_col, mirn->col));
1460 /* free allocated memory */
1461 del_pqueue(mst_env.chunks);
1462 phase_free(&mst_env.ph);
1463 del_pset(mst_env.chunkset);
1465 stat_ev_tim_pop("heur4_total");
1470 static const lc_opt_table_entry_t options[] = {
1471 LC_OPT_ENT_INT ("limit", "limit recoloring", &recolor_limit),
1472 LC_OPT_ENT_DBL ("di", "dislike influence", &dislike_influence),
1477 void be_init_copyheur4(void) {
1478 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
1479 lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
1480 lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
1481 lc_opt_entry_t *co_grp = lc_opt_get_grp(chordal_grp, "co");
1482 lc_opt_entry_t *heur4_grp = lc_opt_get_grp(co_grp, "heur4");
1484 lc_opt_add_table(heur4_grp, options);
1485 FIRM_DBG_REGISTER(dbg, "firm.be.co.heur4");
1489 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur4);