2 * Copyright (C) 1995-2008 University of Karlsruhe. All right reserved.
4 * This file is part of libFirm.
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * @brief Simple copy minimization heuristics.
23 * @author Christian Wuerdig
27 * This is the C implementation of the mst algorithm
28 * originally written in Java by Sebastian Hack.
29 * (also known as "heur3" :)
30 * Performs simple copy minimization.
34 #define DISABLE_STATEV
41 #include "raw_bitset.h"
42 #include "irphase_t.h"
58 #include "becopyopt_t.h"
62 #define COL_COST_INFEASIBLE DBL_MAX
63 #define AFF_NEIGHBOUR_FIX_BENEFIT 128.0
64 #define NEIGHBOUR_CONSTR_COSTS 64.0
69 #define DBG_AFF_CHUNK(env, level, chunk) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_aff_chunk((env), (chunk)); } while(0)
70 #define DBG_COL_COST(env, level, cost) do { if (firm_dbg_get_mask(dbg) & (level)) dbg_col_cost((env), (cost)); } while(0)
72 static firm_dbg_module_t *dbg = NULL;
76 #define DBG_AFF_CHUNK(env, level, chunk)
77 #define DBG_COL_COST(env, level, cost)
82 #define REAL(C) (C ## f)
84 static unsigned last_chunk_id = 0;
85 static int recolor_limit = 7;
86 static real_t dislike_influence = REAL(0.1);
88 typedef struct _col_cost_t {
96 typedef struct _aff_chunk_t {
97 const ir_node **n; /**< An ARR_F containing all nodes of the chunk. */
98 const ir_node **interfere; /**< An ARR_F containing all inference. */
99 int weight; /**< Weight of this chunk */
100 unsigned weight_consistent : 1; /**< Set if the weight is consistent. */
101 unsigned deleted : 1; /**< For debugging: Set if the was deleted. */
102 unsigned id; /**< An id of this chunk. */
104 col_cost_t color_affinity[1];
110 typedef struct _aff_edge_t {
111 const ir_node *src; /**< Source node. */
112 const ir_node *tgt; /**< Target node. */
113 int weight; /**< The weight of this edge. */
116 /* main coalescing environment */
117 typedef struct _co_mst_env_t {
118 int n_regs; /**< number of regs in class */
119 int k; /**< number of non-ignore registers in class */
120 bitset_t *ignore_regs; /**< set containing all global ignore registers */
121 ir_phase ph; /**< phase object holding data for nodes */
122 pqueue_t *chunks; /**< priority queue for chunks */
123 pset *chunkset; /**< set holding all chunks */
124 be_ifg_t *ifg; /**< the interference graph */
125 copy_opt_t *co; /**< the copy opt object */
126 unsigned chunk_visited;
127 col_cost_t **single_cols;
130 /* stores coalescing related information for a node */
131 typedef struct _co_mst_irn_t {
132 const ir_node *irn; /**< the irn this information belongs to */
133 aff_chunk_t *chunk; /**< the chunk this irn belongs to */
134 bitset_t *adm_colors; /**< set of admissible colors for this irn */
135 ir_node **int_neighs; /**< array of all interfering neighbours (cached for speed reasons) */
136 int n_neighs; /**< length of the interfering neighbours array. */
137 int int_aff_neigh; /**< number of interfering affinity neighbours */
138 int col; /**< color currently assigned */
139 int init_col; /**< the initial color */
140 int tmp_col; /**< a temporary assigned color */
141 unsigned fixed : 1; /**< the color is fixed */
142 struct list_head list; /**< Queue for coloring undo. */
143 real_t constr_factor;
146 #define get_co_mst_irn(mst_env, irn) (phase_get_or_set_irn_data(&(mst_env)->ph, (irn)))
148 typedef int decide_func_t(const co_mst_irn_t *node, int col);
153 * Write a chunk to stderr for debugging.
155 static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c) {
158 if (c->weight_consistent)
159 ir_fprintf(stderr, " $%d ", c->weight);
160 ir_fprintf(stderr, "{");
161 for (i = 0, l = ARR_LEN(c->n); i < l; ++i) {
162 const ir_node *n = c->n[i];
163 ir_fprintf(stderr, " %+F,", n);
165 ir_fprintf(stderr, "}");
169 * Dump all admissible colors to stderr.
171 static void dbg_admissible_colors(const co_mst_env_t *env, const co_mst_irn_t *node) {
175 if (bitset_popcnt(node->adm_colors) < 1)
176 fprintf(stderr, "no admissible colors?!?");
178 bitset_foreach(node->adm_colors, idx) {
179 fprintf(stderr, " %d", idx);
185 * Dump color-cost pairs to stderr.
187 static void dbg_col_cost(const co_mst_env_t *env, const col_cost_t *cost) {
189 for (i = 0; i < env->n_regs; ++i)
190 fprintf(stderr, " (%d, %.4f)", cost[i].col, cost[i].cost);
193 #endif /* DEBUG_libfirm */
195 static INLINE int get_mst_irn_col(const co_mst_irn_t *node) {
196 return node->tmp_col >= 0 ? node->tmp_col : node->col;
200 * @return 1 if node @p node has color @p col, 0 otherwise.
202 static int decider_has_color(const co_mst_irn_t *node, int col) {
203 return get_mst_irn_col(node) == col;
207 * @return 1 if node @p node has not color @p col, 0 otherwise.
209 static int decider_hasnot_color(const co_mst_irn_t *node, int col) {
210 return get_mst_irn_col(node) != col;
214 * Always returns true.
216 static int decider_always_yes(const co_mst_irn_t *node, int col) {
222 /** compares two affinity edges by its weight */
223 static int cmp_aff_edge(const void *a, const void *b) {
224 const aff_edge_t *e1 = a;
225 const aff_edge_t *e2 = b;
227 if (e2->weight == e1->weight) {
228 if (e2->src->node_idx == e1->src->node_idx)
229 return QSORT_CMP(e2->tgt->node_idx, e1->tgt->node_idx);
231 return QSORT_CMP(e2->src->node_idx, e1->src->node_idx);
233 /* sort in descending order */
234 return QSORT_CMP(e2->weight, e1->weight);
237 /** compares to color-cost pairs */
238 static __attribute__((unused)) int cmp_col_cost_lt(const void *a, const void *b) {
239 const col_cost_t *c1 = a;
240 const col_cost_t *c2 = b;
241 real_t diff = c1->cost - c2->cost;
242 return (diff > 0) - (diff < 0);
245 static int cmp_col_cost_gt(const void *a, const void *b) {
246 const col_cost_t *c1 = a;
247 const col_cost_t *c2 = b;
248 real_t diff = c2->cost - c1->cost;
249 return (diff > 0) - (diff < 0);
253 * Creates a new affinity chunk
255 static INLINE aff_chunk_t *new_aff_chunk(co_mst_env_t *env) {
256 aff_chunk_t *c = XMALLOCF(aff_chunk_t, color_affinity, env->n_regs);
257 c->n = NEW_ARR_F(const ir_node *, 0);
258 c->interfere = NEW_ARR_F(const ir_node *, 0);
260 c->weight_consistent = 0;
262 c->id = ++last_chunk_id;
264 pset_insert(env->chunkset, c, c->id);
269 * Frees all memory allocated by an affinity chunk.
271 static INLINE void delete_aff_chunk(co_mst_env_t *env, aff_chunk_t *c) {
272 pset_remove(env->chunkset, c, c->id);
273 DEL_ARR_F(c->interfere);
280 * binary search of sorted nodes.
282 * @return the position where n is found in the array arr or ~pos
283 * if the nodes is not here.
285 static INLINE int nodes_bsearch(const ir_node **arr, const ir_node *n) {
286 int hi = ARR_LEN(arr);
290 int md = lo + ((hi - lo) >> 1);
303 /** Check if a node n can be found inside arr. */
304 static int node_contains(const ir_node **arr, const ir_node *n) {
305 int i = nodes_bsearch(arr, n);
310 * Insert a node into the sorted nodes list.
312 * @return 1 if the node was inserted, 0 else
314 static int nodes_insert(const ir_node ***arr, const ir_node *irn) {
315 int idx = nodes_bsearch(*arr, irn);
318 int i, n = ARR_LEN(*arr);
321 ARR_APP1(const ir_node *, *arr, irn);
326 for (i = n - 1; i >= idx; --i)
335 * Adds a node to an affinity chunk
337 static INLINE void aff_chunk_add_node(aff_chunk_t *c, co_mst_irn_t *node) {
340 if (! nodes_insert(&c->n, node->irn))
343 c->weight_consistent = 0;
346 for (i = node->n_neighs - 1; i >= 0; --i) {
347 ir_node *neigh = node->int_neighs[i];
348 nodes_insert(&c->interfere, neigh);
353 * In case there is no phase information for irn, initialize it.
355 static void *co_mst_irn_init(ir_phase *ph, const ir_node *irn, void *old) {
356 co_mst_irn_t *res = old ? old : phase_alloc(ph, sizeof(res[0]));
357 co_mst_env_t *env = ph->priv;
360 const arch_register_req_t *req;
361 void *nodes_it = be_ifg_nodes_iter_alloca(env->ifg);
369 res->int_neighs = NULL;
370 res->int_aff_neigh = 0;
371 res->col = arch_register_get_index(arch_get_irn_register(irn));
372 res->init_col = res->col;
373 INIT_LIST_HEAD(&res->list);
375 DB((dbg, LEVEL_4, "Creating phase info for %+F\n", irn));
377 /* set admissible registers */
378 res->adm_colors = bitset_obstack_alloc(phase_obst(ph), env->n_regs);
380 /* Exclude colors not assignable to the irn */
381 req = arch_get_register_req(irn, -1);
382 if (arch_register_req_is(req, limited))
383 rbitset_copy_to_bitset(req->limited, res->adm_colors);
385 bitset_set_all(res->adm_colors);
387 /* exclude global ignore registers as well */
388 bitset_andnot(res->adm_colors, env->ignore_regs);
390 /* compute the constraint factor */
391 res->constr_factor = (real_t) (1 + env->n_regs - bitset_popcnt(res->adm_colors)) / env->n_regs;
393 /* set the number of interfering affinity neighbours to -1, they are calculated later */
394 res->int_aff_neigh = -1;
396 /* build list of interfering neighbours */
398 be_ifg_foreach_neighbour(env->ifg, nodes_it, irn, neigh) {
399 if (!arch_irn_is(neigh, ignore)) {
400 obstack_ptr_grow(phase_obst(ph), neigh);
404 res->int_neighs = obstack_finish(phase_obst(ph));
411 * Check if affinity chunk @p chunk interferes with node @p irn.
413 static INLINE int aff_chunk_interferes(const aff_chunk_t *chunk, const ir_node *irn) {
414 return node_contains(chunk->interfere, irn);
418 * Check if there are interference edges from c1 to c2.
420 * @param c2 Another chunk
421 * @return 1 if there are interferences between nodes of c1 and c2, 0 otherwise.
423 static INLINE int aff_chunks_interfere(const aff_chunk_t *c1, const aff_chunk_t *c2) {
429 /* check if there is a node in c2 having an interfering neighbor in c1 */
430 for (i = ARR_LEN(c2->n) - 1; i >= 0; --i) {
431 const ir_node *irn = c2->n[i];
433 if (node_contains(c1->interfere, irn))
440 * Returns the affinity chunk of @p irn or creates a new
441 * one with @p irn as element if there is none assigned.
443 static INLINE aff_chunk_t *get_aff_chunk(co_mst_env_t *env, const ir_node *irn) {
444 co_mst_irn_t *node = get_co_mst_irn(env, irn);
449 * Let chunk(src) absorb the nodes of chunk(tgt) (only possible when there
450 * are no interference edges from chunk(src) to chunk(tgt)).
451 * @return 1 if successful, 0 if not possible
453 static int aff_chunk_absorb(co_mst_env_t *env, const ir_node *src, const ir_node *tgt) {
454 aff_chunk_t *c1 = get_aff_chunk(env, src);
455 aff_chunk_t *c2 = get_aff_chunk(env, tgt);
458 DB((dbg, LEVEL_4, "Attempt to let c1 (id %u): ", c1 ? c1->id : 0));
460 DBG_AFF_CHUNK(env, LEVEL_4, c1);
462 DB((dbg, LEVEL_4, "{%+F}", src));
464 DB((dbg, LEVEL_4, "\n\tabsorb c2 (id %u): ", c2 ? c2->id : 0));
466 DBG_AFF_CHUNK(env, LEVEL_4, c2);
468 DB((dbg, LEVEL_4, "{%+F}", tgt));
470 DB((dbg, LEVEL_4, "\n"));
475 /* no chunk exists */
476 co_mst_irn_t *mirn = get_co_mst_irn(env, src);
479 for (i = mirn->n_neighs - 1; i >= 0; --i) {
480 if (mirn->int_neighs[i] == tgt)
484 /* create one containing both nodes */
485 c1 = new_aff_chunk(env);
486 aff_chunk_add_node(c1, get_co_mst_irn(env, src));
487 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
491 /* c2 already exists */
492 if (! aff_chunk_interferes(c2, src)) {
493 aff_chunk_add_node(c2, get_co_mst_irn(env, src));
497 } else if (c2 == NULL) {
498 /* c1 already exists */
499 if (! aff_chunk_interferes(c1, tgt)) {
500 aff_chunk_add_node(c1, get_co_mst_irn(env, tgt));
503 } else if (c1 != c2 && ! aff_chunks_interfere(c1, c2)) {
506 for (idx = 0, len = ARR_LEN(c2->n); idx < len; ++idx)
507 aff_chunk_add_node(c1, get_co_mst_irn(env, c2->n[idx]));
509 for (idx = 0, len = ARR_LEN(c2->interfere); idx < len; ++idx) {
510 const ir_node *irn = c2->interfere[idx];
511 nodes_insert(&c1->interfere, irn);
514 c1->weight_consistent = 0;
516 delete_aff_chunk(env, c2);
519 DB((dbg, LEVEL_4, " ... c1 interferes with c2, skipped\n"));
523 DB((dbg, LEVEL_4, " ... absorbed\n"));
528 * Assures that the weight of the given chunk is consistent.
530 static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c) {
531 if (! c->weight_consistent) {
535 for (i = 0; i < env->n_regs; ++i) {
536 c->color_affinity[i].col = i;
537 c->color_affinity[i].cost = REAL(0.0);
540 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
541 const ir_node *n = c->n[idx];
542 const affinity_node_t *an = get_affinity_info(env->co, n);
543 co_mst_irn_t *node = get_co_mst_irn(env, n);
546 if (node->constr_factor > REAL(0.0)) {
548 bitset_foreach (node->adm_colors, col)
549 c->color_affinity[col].cost += node->constr_factor;
554 co_gs_foreach_neighb(an, neigh) {
555 const ir_node *m = neigh->irn;
557 /* skip ignore nodes */
558 if (arch_irn_is(m, ignore))
561 w += node_contains(c->n, m) ? neigh->costs : 0;
566 for (i = 0; i < env->n_regs; ++i)
567 c->color_affinity[i].cost *= (REAL(1.0) / ARR_LEN(c->n));
570 // c->weight = bitset_popcnt(c->nodes);
571 c->weight_consistent = 1;
576 * Count the number of interfering affinity neighbours
578 static int count_interfering_aff_neighs(co_mst_env_t *env, const affinity_node_t *an) {
579 const neighb_t *neigh;
580 const ir_node *irn = an->irn;
581 const co_mst_irn_t *node = get_co_mst_irn(env, irn);
584 co_gs_foreach_neighb(an, neigh) {
585 const ir_node *n = neigh->irn;
588 /* skip ignore nodes */
589 if (arch_irn_is(n, ignore))
592 /* check if the affinity neighbour interfere */
593 for (i = 0; i < node->n_neighs; ++i) {
594 if (node->int_neighs[i] == n) {
605 * Build chunks of nodes connected by affinity edges.
606 * We start at the heaviest affinity edge.
607 * The chunks of the two edge-defining nodes will be
608 * merged if there are no interference edges from one
609 * chunk to the other.
611 static void build_affinity_chunks(co_mst_env_t *env) {
612 void *nodes_it = be_ifg_nodes_iter_alloca(env->ifg);
613 aff_edge_t *edges = NEW_ARR_F(aff_edge_t, 0);
616 aff_chunk_t *curr_chunk;
618 /* at first we create the affinity edge objects */
619 be_ifg_foreach_node(env->ifg, nodes_it, n) {
620 int n_idx = get_irn_idx(n);
624 /* skip ignore nodes */
625 if (arch_irn_is(n, ignore))
628 n1 = get_co_mst_irn(env, n);
629 an = get_affinity_info(env->co, n);
634 if (n1->int_aff_neigh < 0)
635 n1->int_aff_neigh = count_interfering_aff_neighs(env, an);
637 /* build the affinity edges */
638 co_gs_foreach_neighb(an, neigh) {
639 const ir_node *m = neigh->irn;
640 int m_idx = get_irn_idx(m);
642 /* record the edge in only one direction */
647 /* skip ignore nodes */
648 if (arch_irn_is(m, ignore))
654 n2 = get_co_mst_irn(env, m);
655 if (n2->int_aff_neigh < 0) {
656 affinity_node_t *am = get_affinity_info(env->co, m);
657 n2->int_aff_neigh = count_interfering_aff_neighs(env, am);
660 * these weights are pure hackery ;-).
661 * It's not chriswue's fault but mine.
663 edge.weight = neigh->costs;
664 ARR_APP1(aff_edge_t, edges, edge);
670 /* now: sort edges and build the affinity chunks */
671 len = ARR_LEN(edges);
672 qsort(edges, len, sizeof(edges[0]), cmp_aff_edge);
673 for (i = 0; i < len; ++i) {
674 DBG((dbg, LEVEL_1, "edge (%u,%u) %f\n", edges[i].src->node_idx, edges[i].tgt->node_idx, edges[i].weight));
676 (void)aff_chunk_absorb(env, edges[i].src, edges[i].tgt);
679 /* now insert all chunks into a priority queue */
680 foreach_pset(env->chunkset, curr_chunk) {
681 aff_chunk_assure_weight(env, curr_chunk);
683 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
684 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
685 DBG((dbg, LEVEL_1, "\n"));
687 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
690 foreach_phase_irn(&env->ph, n) {
691 co_mst_irn_t *mirn = get_co_mst_irn(env, n);
693 if (mirn->chunk == NULL) {
694 /* no chunk is allocated so far, do it now */
695 aff_chunk_t *curr_chunk = new_aff_chunk(env);
696 aff_chunk_add_node(curr_chunk, mirn);
698 aff_chunk_assure_weight(env, curr_chunk);
700 DBG((dbg, LEVEL_1, "entry #%u", curr_chunk->id));
701 DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
702 DBG((dbg, LEVEL_1, "\n"));
704 pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
711 static __attribute__((unused)) void chunk_order_nodes(co_mst_env_t *env, aff_chunk_t *chunk)
713 pqueue_t *grow = new_pqueue();
714 const ir_node *max_node = NULL;
718 for (i = ARR_LEN(chunk->n) - 1; i >= 0; i--) {
719 const ir_node *irn = chunk->n[i];
720 affinity_node_t *an = get_affinity_info(env->co, irn);
724 if (arch_irn_is(irn, ignore))
728 co_gs_foreach_neighb(an, neigh)
731 if (w > max_weight) {
739 bitset_t *visited = bitset_irg_malloc(env->co->irg);
741 for (i = ARR_LEN(chunk->n) - 1; i >= 0; --i)
742 bitset_add_irn(visited, chunk->n[i]);
744 pqueue_put(grow, (void *) max_node, max_weight);
745 bitset_remv_irn(visited, max_node);
747 while (!pqueue_empty(grow)) {
748 ir_node *irn = pqueue_pop_front(grow);
749 affinity_node_t *an = get_affinity_info(env->co, irn);
752 if (arch_irn_is(irn, ignore))
755 assert(i <= ARR_LEN(chunk->n));
760 /* build the affinity edges */
761 co_gs_foreach_neighb(an, neigh) {
762 co_mst_irn_t *node = get_co_mst_irn(env, neigh->irn);
764 if (bitset_contains_irn(visited, node->irn)) {
765 pqueue_put(grow, (void *) neigh->irn, neigh->costs);
766 bitset_remv_irn(visited, node->irn);
772 bitset_free(visited);
777 * Greedy collect affinity neighbours into thew new chunk @p chunk starting at node @p node.
779 static void expand_chunk_from(co_mst_env_t *env, co_mst_irn_t *node, bitset_t *visited,
780 aff_chunk_t *chunk, aff_chunk_t *orig_chunk, decide_func_t *decider, int col)
782 waitq *nodes = new_waitq();
784 DBG((dbg, LEVEL_1, "\n\tExpanding new chunk (#%u) from %+F, color %d:", chunk->id, node->irn, col));
786 /* init queue and chunk */
787 waitq_put(nodes, node);
788 bitset_set(visited, get_irn_idx(node->irn));
789 aff_chunk_add_node(chunk, node);
790 DB((dbg, LEVEL_1, " %+F", node->irn));
792 /* as long as there are nodes in the queue */
793 while (! waitq_empty(nodes)) {
794 co_mst_irn_t *n = waitq_get(nodes);
795 affinity_node_t *an = get_affinity_info(env->co, n->irn);
797 /* check all affinity neighbors */
800 co_gs_foreach_neighb(an, neigh) {
801 const ir_node *m = neigh->irn;
802 int m_idx = get_irn_idx(m);
805 /* skip ignore nodes */
806 if (arch_irn_is(m, ignore))
809 n2 = get_co_mst_irn(env, m);
811 if (! bitset_is_set(visited, m_idx) &&
814 ! aff_chunk_interferes(chunk, m) &&
815 node_contains(orig_chunk->n, m))
818 following conditions are met:
819 - neighbour is not visited
820 - neighbour likes the color
821 - neighbour has not yet a fixed color
822 - the new chunk doesn't interfere with the neighbour
823 - neighbour belongs or belonged once to the original chunk
825 bitset_set(visited, m_idx);
826 aff_chunk_add_node(chunk, n2);
827 DB((dbg, LEVEL_1, " %+F", n2->irn));
828 /* enqueue for further search */
829 waitq_put(nodes, n2);
835 DB((dbg, LEVEL_1, "\n"));
841 * Fragment the given chunk into chunks having given color and not having given color.
843 static aff_chunk_t *fragment_chunk(co_mst_env_t *env, int col, aff_chunk_t *c, waitq *tmp) {
844 bitset_t *visited = bitset_irg_malloc(env->co->irg);
846 aff_chunk_t *best = NULL;
848 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
851 aff_chunk_t *tmp_chunk;
852 decide_func_t *decider;
856 if (bitset_is_set(visited, get_irn_idx(irn)))
859 node = get_co_mst_irn(env, irn);
861 if (get_mst_irn_col(node) == col) {
862 decider = decider_has_color;
864 DBG((dbg, LEVEL_4, "\tcolor %d wanted\n", col));
867 decider = decider_hasnot_color;
869 DBG((dbg, LEVEL_4, "\tcolor %d forbidden\n", col));
872 /* create a new chunk starting at current node */
873 tmp_chunk = new_aff_chunk(env);
874 waitq_put(tmp, tmp_chunk);
875 expand_chunk_from(env, node, visited, tmp_chunk, c, decider, col);
876 assert(ARR_LEN(tmp_chunk->n) > 0 && "No nodes added to chunk");
878 /* remember the local best */
879 aff_chunk_assure_weight(env, tmp_chunk);
880 if (check_for_best && (! best || best->weight < tmp_chunk->weight))
884 assert(best && "No chunk found?");
885 bitset_free(visited);
890 * Resets the temporary fixed color of all nodes within wait queue @p nodes.
891 * ATTENTION: the queue is empty after calling this function!
893 static INLINE void reject_coloring(struct list_head *nodes) {
894 co_mst_irn_t *n, *temp;
895 DB((dbg, LEVEL_4, "\treject coloring for"));
896 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
897 DB((dbg, LEVEL_4, " %+F", n->irn));
898 assert(n->tmp_col >= 0);
900 list_del_init(&n->list);
902 DB((dbg, LEVEL_4, "\n"));
905 static INLINE void materialize_coloring(struct list_head *nodes) {
906 co_mst_irn_t *n, *temp;
907 list_for_each_entry_safe(co_mst_irn_t, n, temp, nodes, list) {
908 assert(n->tmp_col >= 0);
911 list_del_init(&n->list);
915 static INLINE void set_temp_color(co_mst_irn_t *node, int col, struct list_head *changed)
918 assert(!node->fixed);
919 assert(node->tmp_col < 0);
920 assert(node->list.next == &node->list && node->list.prev == &node->list);
921 assert(bitset_is_set(node->adm_colors, col));
923 list_add_tail(&node->list, changed);
927 static INLINE int is_loose(co_mst_irn_t *node)
929 return !node->fixed && node->tmp_col < 0;
933 * Determines the costs for each color if it would be assigned to node @p node.
935 static void determine_color_costs(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs) {
936 int *neigh_cols = alloca(env->n_regs * sizeof(*neigh_cols));
941 for (i = 0; i < env->n_regs; ++i) {
944 costs[i].cost = bitset_is_set(node->adm_colors, i) ? node->constr_factor : REAL(0.0);
947 for (i = 0; i < node->n_neighs; ++i) {
948 co_mst_irn_t *n = get_co_mst_irn(env, node->int_neighs[i]);
949 int col = get_mst_irn_col(n);
954 costs[col].cost = REAL(0.0);
958 coeff = REAL(1.0) / n_loose;
959 for (i = 0; i < env->n_regs; ++i)
960 costs[i].cost *= REAL(1.0) - coeff * neigh_cols[i];
964 /* need forward declaration due to recursive call */
965 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed_ones, int depth, int *max_depth, int *trip);
968 * Tries to change node to a color but @p explude_col.
969 * @return 1 if succeeded, 0 otherwise.
971 static int change_node_color_excluded(co_mst_env_t *env, co_mst_irn_t *node, int exclude_col, struct list_head *changed, int depth, int *max_depth, int *trip) {
972 int col = get_mst_irn_col(node);
975 /* neighbours has already a different color -> good, temporary fix it */
976 if (col != exclude_col) {
978 set_temp_color(node, col, changed);
982 /* The node has the color it should not have _and_ has not been visited yet. */
983 if (is_loose(node)) {
984 col_cost_t *costs = alloca(env->n_regs * sizeof(costs[0]));
986 /* Get the costs for giving the node a specific color. */
987 determine_color_costs(env, node, costs);
989 /* Since the node must not have the not_col, set the costs for that color to "infinity" */
990 costs[exclude_col].cost = REAL(0.0);
992 /* sort the colors according costs, cheapest first. */
993 qsort(costs, env->n_regs, sizeof(costs[0]), cmp_col_cost_gt);
995 /* Try recoloring the node using the color list. */
996 res = recolor_nodes(env, node, costs, changed, depth + 1, max_depth, trip);
1003 * Tries to bring node @p node to cheapest color and color all interfering neighbours with other colors.
1004 * ATTENTION: Expect @p costs already sorted by increasing costs.
1005 * @return 1 if coloring could be applied, 0 otherwise.
1007 static int recolor_nodes(co_mst_env_t *env, co_mst_irn_t *node, col_cost_t *costs, struct list_head *changed, int depth, int *max_depth, int *trip) {
1009 struct list_head local_changed;
1012 if (depth > *max_depth)
1015 DBG((dbg, LEVEL_4, "\tRecoloring %+F with color-costs", node->irn));
1016 DBG_COL_COST(env, LEVEL_4, costs);
1017 DB((dbg, LEVEL_4, "\n"));
1019 if (depth >= recolor_limit) {
1020 DBG((dbg, LEVEL_4, "\tHit recolor limit\n"));
1024 for (i = 0; i < env->n_regs; ++i) {
1025 int tgt_col = costs[i].col;
1029 /* If the costs for that color (and all successive) are infinite, bail out we won't make it anyway. */
1030 if (costs[i].cost == REAL(0.0)) {
1031 DBG((dbg, LEVEL_4, "\tAll further colors forbidden\n"));
1035 /* Set the new color of the node and mark the node as temporarily fixed. */
1036 assert(node->tmp_col < 0 && "Node must not have been temporary fixed.");
1037 INIT_LIST_HEAD(&local_changed);
1038 set_temp_color(node, tgt_col, &local_changed);
1039 DBG((dbg, LEVEL_4, "\tTemporary setting %+F to color %d\n", node->irn, tgt_col));
1041 /* try to color all interfering neighbours with current color forbidden */
1042 for (j = 0; j < node->n_neighs; ++j) {
1046 neigh = node->int_neighs[j];
1048 /* skip ignore nodes */
1049 if (arch_irn_is(neigh, ignore))
1052 nn = get_co_mst_irn(env, neigh);
1053 DB((dbg, LEVEL_4, "\tHandling neighbour %+F, at position %d (fixed: %d, tmp_col: %d, col: %d)\n",
1054 neigh, j, nn->fixed, nn->tmp_col, nn->col));
1057 Try to change the color of the neighbor and record all nodes which
1058 get changed in the tmp list. Add this list to the "changed" list for
1059 that color. If we did not succeed to change the color of the neighbor,
1060 we bail out and try the next color.
1062 if (get_mst_irn_col(nn) == tgt_col) {
1063 /* try to color neighbour with tgt_col forbidden */
1064 neigh_ok = change_node_color_excluded(env, nn, tgt_col, &local_changed, depth + 1, max_depth, trip);
1072 We managed to assign the target color to all neighbors, so from the perspective
1073 of the current node, every thing was ok and we can return safely.
1076 /* append the local_changed ones to global ones */
1077 list_splice(&local_changed, changed);
1081 /* coloring of neighbours failed, so we try next color */
1082 reject_coloring(&local_changed);
1086 DBG((dbg, LEVEL_4, "\tAll colors failed\n"));
1091 * Tries to bring node @p node and all it's neighbours to color @p tgt_col.
1092 * @return 1 if color @p col could be applied, 0 otherwise
1094 static int change_node_color(co_mst_env_t *env, co_mst_irn_t *node, int tgt_col, struct list_head *changed) {
1095 int col = get_mst_irn_col(node);
1097 /* if node already has the target color -> good, temporary fix it */
1098 if (col == tgt_col) {
1099 DBG((dbg, LEVEL_4, "\t\tCNC: %+F has already color %d, fix temporary\n", node->irn, tgt_col));
1101 set_temp_color(node, tgt_col, changed);
1106 Node has not yet a fixed color and target color is admissible
1107 -> try to recolor node and it's affinity neighbours
1109 if (is_loose(node) && bitset_is_set(node->adm_colors, tgt_col)) {
1110 col_cost_t *costs = env->single_cols[tgt_col];
1111 int res, max_depth, trip;
1116 DBG((dbg, LEVEL_4, "\t\tCNC: Attempt to recolor %+F ===>>\n", node->irn));
1117 res = recolor_nodes(env, node, costs, changed, 0, &max_depth, &trip);
1118 DBG((dbg, LEVEL_4, "\t\tCNC: <<=== Recoloring of %+F %s\n", node->irn, res ? "succeeded" : "failed"));
1119 stat_ev_int("heur4_recolor_depth_max", max_depth);
1120 stat_ev_int("heur4_recolor_trip", trip);
1126 #ifdef DEBUG_libfirm
1127 if (firm_dbg_get_mask(dbg) & LEVEL_4) {
1128 if (!is_loose(node))
1129 DB((dbg, LEVEL_4, "\t\tCNC: %+F has already fixed color %d\n", node->irn, col));
1131 DB((dbg, LEVEL_4, "\t\tCNC: color %d not admissible for %+F (", tgt_col, node->irn));
1132 dbg_admissible_colors(env, node);
1133 DB((dbg, LEVEL_4, ")\n"));
1142 * Tries to color an affinity chunk (or at least a part of it).
1143 * Inserts uncolored parts of the chunk as a new chunk into the priority queue.
1145 static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c) {
1146 aff_chunk_t *best_chunk = NULL;
1147 int n_nodes = ARR_LEN(c->n);
1148 int best_color = -1;
1149 int n_int_chunks = 0;
1150 waitq *tmp_chunks = new_waitq();
1151 waitq *best_starts = NULL;
1152 col_cost_t *order = alloca(env->n_regs * sizeof(order[0]));
1154 int idx, len, i, nidx, pos;
1155 struct list_head changed;
1157 DB((dbg, LEVEL_2, "fragmentizing chunk #%u", c->id));
1158 DBG_AFF_CHUNK(env, LEVEL_2, c);
1159 DB((dbg, LEVEL_2, "\n"));
1161 stat_ev_ctx_push_fmt("heur4_color_chunk", "%u", c->id);
1163 ++env->chunk_visited;
1165 /* compute color preference */
1166 memset(order, 0, env->n_regs * sizeof(order[0]));
1168 for (pos = 0, len = ARR_LEN(c->interfere); pos < len; ++pos) {
1169 const ir_node *n = c->interfere[pos];
1170 co_mst_irn_t *node = get_co_mst_irn(env, n);
1171 aff_chunk_t *chunk = node->chunk;
1173 if (is_loose(node) && chunk && chunk->visited < env->chunk_visited) {
1174 assert(!chunk->deleted);
1175 chunk->visited = env->chunk_visited;
1178 aff_chunk_assure_weight(env, chunk);
1179 for (i = 0; i < env->n_regs; ++i)
1180 order[i].cost += chunk->color_affinity[i].cost;
1184 for (i = 0; i < env->n_regs; ++i) {
1185 real_t dislike = n_int_chunks > 0 ? REAL(1.0) - order[i].cost / n_int_chunks : REAL(0.0);
1187 order[i].cost = (REAL(1.0) - dislike_influence) * c->color_affinity[i].cost + dislike_influence * dislike;
1190 qsort(order, env->n_regs, sizeof(order[0]), cmp_col_cost_gt);
1192 DBG_COL_COST(env, LEVEL_2, order);
1193 DB((dbg, LEVEL_2, "\n"));
1195 /* check which color is the "best" for the given chunk.
1196 * if we found a color which was ok for all nodes, we take it
1197 * and do not look further. (see did_all flag usage below.)
1198 * If we have many colors which fit all nodes it is hard to decide
1199 * which one to take anyway.
1200 * TODO Sebastian: Perhaps we should at all nodes and figure out
1201 * a suitable color using costs as done above (determine_color_costs).
1203 for (i = 0; i < env->k; ++i) {
1204 int col = order[i].col;
1205 waitq *good_starts = new_waitq();
1206 aff_chunk_t *local_best;
1209 /* skip ignore colors */
1210 if (bitset_is_set(env->ignore_regs, col))
1213 DB((dbg, LEVEL_2, "\ttrying color %d\n", col));
1217 /* try to bring all nodes of given chunk to the current color. */
1218 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1219 const ir_node *irn = c->n[idx];
1220 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1223 assert(! node->fixed && "Node must not have a fixed color.");
1224 DB((dbg, LEVEL_4, "\t\tBringing %+F from color %d to color %d ...\n", irn, node->col, col));
1227 The order of the colored nodes is important, so we record the successfully
1228 colored ones in the order they appeared.
1230 INIT_LIST_HEAD(&changed);
1232 good = change_node_color(env, node, col, &changed);
1233 stat_ev_tim_pop("heur4_recolor");
1235 waitq_put(good_starts, node);
1236 materialize_coloring(&changed);
1241 reject_coloring(&changed);
1243 n_succeeded += good;
1244 DB((dbg, LEVEL_4, "\t\t... %+F attempt from %d to %d %s\n", irn, node->col, col, good ? "succeeded" : "failed"));
1247 /* unfix all nodes */
1248 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1249 co_mst_irn_t *node = get_co_mst_irn(env, c->n[idx]);
1253 /* try next color when failed */
1254 if (n_succeeded == 0)
1257 /* fragment the chunk according to the coloring */
1258 local_best = fragment_chunk(env, col, c, tmp_chunks);
1260 /* search the best of the good list
1261 and make it the new best if it is better than the current */
1263 aff_chunk_assure_weight(env, local_best);
1265 DB((dbg, LEVEL_3, "\t\tlocal best chunk (id %u) for color %d: ", local_best->id, col));
1266 DBG_AFF_CHUNK(env, LEVEL_3, local_best);
1268 if (! best_chunk || best_chunk->weight < local_best->weight) {
1269 best_chunk = local_best;
1272 del_waitq(best_starts);
1273 best_starts = good_starts;
1274 DB((dbg, LEVEL_3, "\n\t\t... setting global best chunk (id %u), color %d\n", best_chunk->id, best_color));
1276 DB((dbg, LEVEL_3, "\n\t\t... omitting, global best is better\n"));
1277 del_waitq(good_starts);
1281 del_waitq(good_starts);
1284 /* if all nodes were recolored, bail out */
1285 if (n_succeeded == n_nodes)
1289 stat_ev_int("heur4_colors_tried", i);
1291 /* free all intermediate created chunks except best one */
1292 while (! waitq_empty(tmp_chunks)) {
1293 aff_chunk_t *tmp = waitq_get(tmp_chunks);
1294 if (tmp != best_chunk)
1295 delete_aff_chunk(env, tmp);
1297 del_waitq(tmp_chunks);
1299 /* return if coloring failed */
1302 del_waitq(best_starts);
1306 DB((dbg, LEVEL_2, "\tbest chunk #%u ", best_chunk->id));
1307 DBG_AFF_CHUNK(env, LEVEL_2, best_chunk);
1308 DB((dbg, LEVEL_2, "using color %d\n", best_color));
1310 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1311 const ir_node *irn = best_chunk->n[idx];
1312 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1315 /* bring the node to the color. */
1316 DB((dbg, LEVEL_4, "\tManifesting color %d for %+F, chunk #%u\n", best_color, node->irn, best_chunk->id));
1317 INIT_LIST_HEAD(&changed);
1319 res = change_node_color(env, node, best_color, &changed);
1320 stat_ev_tim_pop("heur4_recolor");
1322 materialize_coloring(&changed);
1325 assert(list_empty(&changed));
1328 /* remove the nodes in best chunk from original chunk */
1329 len = ARR_LEN(best_chunk->n);
1330 for (idx = 0; idx < len; ++idx) {
1331 const ir_node *irn = best_chunk->n[idx];
1332 int pos = nodes_bsearch(c->n, irn);
1337 len = ARR_LEN(c->n);
1338 for (idx = nidx = 0; idx < len; ++idx) {
1339 const ir_node *irn = c->n[idx];
1345 ARR_SHRINKLEN(c->n, nidx);
1348 /* we have to get the nodes back into the original chunk because they are scattered over temporary chunks */
1349 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1350 const ir_node *n = c->n[idx];
1351 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1355 /* fragment the remaining chunk */
1356 visited = bitset_irg_malloc(env->co->irg);
1357 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx)
1358 bitset_set(visited, get_irn_idx(best_chunk->n[idx]));
1360 for (idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
1361 const ir_node *irn = c->n[idx];
1362 if (! bitset_is_set(visited, get_irn_idx(irn))) {
1363 aff_chunk_t *new_chunk = new_aff_chunk(env);
1364 co_mst_irn_t *node = get_co_mst_irn(env, irn);
1366 expand_chunk_from(env, node, visited, new_chunk, c, decider_always_yes, 0);
1367 aff_chunk_assure_weight(env, new_chunk);
1368 pqueue_put(env->chunks, new_chunk, new_chunk->weight);
1372 for (idx = 0, len = ARR_LEN(best_chunk->n); idx < len; ++idx) {
1373 const ir_node *n = best_chunk->n[idx];
1374 co_mst_irn_t *nn = get_co_mst_irn(env, n);
1378 /* clear obsolete chunks and free some memory */
1379 delete_aff_chunk(env, best_chunk);
1380 bitset_free(visited);
1382 del_waitq(best_starts);
1384 stat_ev_ctx_pop("heur4_color_chunk");
1388 * Main driver for mst safe coalescing algorithm.
1390 int co_solve_heuristic_mst(copy_opt_t *co) {
1391 unsigned n_regs = co->cls->n_regs;
1392 bitset_t *ignore_regs = bitset_alloca(n_regs);
1395 co_mst_env_t mst_env;
1402 phase_init(&mst_env.ph, "co_mst", co->irg, PHASE_DEFAULT_GROWTH, co_mst_irn_init, &mst_env);
1404 k = be_put_ignore_regs(co->cenv->birg, co->cls, ignore_regs);
1407 mst_env.n_regs = n_regs;
1409 mst_env.chunks = new_pqueue();
1411 mst_env.ignore_regs = ignore_regs;
1412 mst_env.ifg = co->cenv->ifg;
1413 mst_env.chunkset = pset_new_ptr(512);
1414 mst_env.chunk_visited = 0;
1415 mst_env.single_cols = phase_alloc(&mst_env.ph, sizeof(*mst_env.single_cols) * n_regs);
1417 for (i = 0; i < n_regs; ++i) {
1418 col_cost_t *vec = phase_alloc(&mst_env.ph, sizeof(*vec) * n_regs);
1420 mst_env.single_cols[i] = vec;
1421 for (j = 0; j < n_regs; ++j) {
1423 vec[j].cost = REAL(0.0);
1427 vec[0].cost = REAL(1.0);
1430 DBG((dbg, LEVEL_1, "==== Coloring %+F, class %s ====\n", co->irg, co->cls->name));
1432 /* build affinity chunks */
1434 build_affinity_chunks(&mst_env);
1435 stat_ev_tim_pop("heur4_initial_chunk");
1437 /* color chunks as long as there are some */
1438 while (! pqueue_empty(mst_env.chunks)) {
1439 aff_chunk_t *chunk = pqueue_pop_front(mst_env.chunks);
1441 color_aff_chunk(&mst_env, chunk);
1442 DB((dbg, LEVEL_4, "<<<====== Coloring chunk (%u) done\n", chunk->id));
1443 delete_aff_chunk(&mst_env, chunk);
1446 /* apply coloring */
1447 foreach_phase_irn(&mst_env.ph, irn) {
1449 const arch_register_t *reg;
1451 if (arch_irn_is(irn, ignore))
1454 mirn = get_co_mst_irn(&mst_env, irn);
1455 // assert(mirn->fixed && "Node should have fixed color");
1457 /* skip nodes where color hasn't changed */
1458 if (mirn->init_col == mirn->col)
1461 reg = arch_register_for_index(co->cls, mirn->col);
1462 arch_set_irn_register(irn, reg);
1463 DB((dbg, LEVEL_1, "%+F set color from %d to %d\n", irn, mirn->init_col, mirn->col));
1466 /* free allocated memory */
1467 del_pqueue(mst_env.chunks);
1468 phase_free(&mst_env.ph);
1469 del_pset(mst_env.chunkset);
1471 stat_ev_tim_pop("heur4_total");
1476 static const lc_opt_table_entry_t options[] = {
1477 LC_OPT_ENT_INT ("limit", "limit recoloring", &recolor_limit),
1478 LC_OPT_ENT_DBL ("di", "dislike influence", &dislike_influence),
1483 void be_init_copyheur4(void) {
1484 lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
1485 lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
1486 lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
1487 lc_opt_entry_t *co_grp = lc_opt_get_grp(chordal_grp, "co");
1488 lc_opt_entry_t *heur4_grp = lc_opt_get_grp(co_grp, "heur4");
1490 lc_opt_add_table(heur4_grp, options);
1491 FIRM_DBG_REGISTER(dbg, "firm.be.co.heur4");
1495 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur4);