4 * Copyright: (c) Universitaet Karlsruhe
5 * Licence: This file protected by GPL - GNU GENERAL PUBLIC LICENSE.
7 * Heuristic for minimizing copies using a queue which holds 'qnodes' not yet
8 * examined. A qnode has a 'target color', nodes out of the opt unit and
9 * a 'conflict graph'. 'Conflict graph' = "Interference graph' + 'conflict edges'
10 * A 'max indep set' is determined form these. We try to color this mis using a
11 * color-exchanging mechanism. Occuring conflicts are modeled with 'conflict edges'
12 * and the qnode is reinserted in the queue. The first qnode colored without
13 * conflicts is the best one.
28 #include "becopyopt_t.h"
29 #include "becopystat.h"
30 #include "benodesets.h"
33 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
35 #define SEARCH_FREE_COLORS
37 #define SLOTS_PINNED_GLOBAL 64
38 #define SLOTS_CONFLICTS 8
39 #define SLOTS_CHANGED_NODES 32
41 #define list_entry_queue(lh) list_entry(lh, qnode_t, queue)
42 #define HASH_CONFLICT(c) (nodeset_hash(c.n1) ^ nodeset_hash(c.n2))
45 * Modeling additional conflicts between nodes. NOT live range interference
47 typedef struct _conflict_t {
48 const ir_node *n1, *n2;
52 * If an irn is changed, the changes first get stored in a node_stat_t,
53 * to allow undo of changes (=drop new data) in case of conflicts.
55 typedef struct _node_stat_t {
62 * Represents a node in the optimization queue.
64 typedef struct _qnode_t {
65 struct list_head queue; /**< chaining of unit_t->queue */
66 const unit_t *ou; /**< the opt unit this qnode belongs to */
67 int color; /**< target color */
68 set *conflicts; /**< contains conflict_t's. All internal conflicts */
69 int mis_costs; /**< costs of nodes/copies in the mis. */
70 int mis_size; /**< size of the array below */
71 ir_node **mis; /**< the nodes of unit_t->nodes[] being part of the max independent set */
72 set *changed_nodes; /**< contains node_stat_t's. */
75 static pset *pinned_global; /**< optimized nodes should not be altered any more */
77 static INLINE int nodes_interfere(const be_chordal_env_t *env, const ir_node *a, const ir_node *b)
80 return be_ifg_connected(env->ifg, a, b);
82 return values_interfere(env->birg->lv, a, b);
85 static int set_cmp_conflict_t(const void *x, const void *y, size_t size) {
86 const conflict_t *xx = x;
87 const conflict_t *yy = y;
88 return ! (xx->n1 == yy->n1 && xx->n2 == yy->n2);
92 * If a local pinned conflict occurs, a new edge in the conflict graph is added.
93 * The next maximum independent set build, will regard it.
95 static INLINE void qnode_add_conflict(const qnode_t *qn, const ir_node *n1, const ir_node *n2) {
97 DBG((dbg, LEVEL_4, "\t %+F -- %+F\n", n1, n2));
99 if ((int)n1 < (int)n2) {
106 set_insert(qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c));
110 * Checks if two nodes are in a conflict.
112 static INLINE int qnode_are_conflicting(const qnode_t *qn, const ir_node *n1, const ir_node *n2) {
114 /* search for live range interference */
115 if (n1!=n2 && nodes_interfere(qn->ou->co->cenv, n1, n2))
117 /* search for recoloring conflicts */
118 if ((int)n1 < (int)n2) {
125 return (int) set_find(qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c));
128 static int set_cmp_node_stat_t(const void *x, const void *y, size_t size) {
129 return ((node_stat_t *)x)->irn != ((node_stat_t *)y)->irn;
133 * Finds a node status entry of a node if existent. Otherwise return NULL
135 static INLINE node_stat_t *qnode_find_node(const qnode_t *qn, ir_node *irn) {
138 return set_find(qn->changed_nodes, &find, sizeof(find), nodeset_hash(irn));
142 * Finds a node status entry of a node if existent. Otherwise it will return
143 * an initialized new entry for this node.
145 static INLINE node_stat_t *qnode_find_or_insert_node(const qnode_t *qn, ir_node *irn) {
148 find.new_color = NO_COLOR;
149 find.pinned_local = 0;
150 return set_insert(qn->changed_nodes, &find, sizeof(find), nodeset_hash(irn));
154 * Returns the virtual color of a node if set before, else returns the real color.
156 static INLINE int qnode_get_new_color(const qnode_t *qn, ir_node *irn) {
157 node_stat_t *found = qnode_find_node(qn, irn);
159 return found->new_color;
161 return get_irn_col(qn->ou->co, irn);
165 * Sets the virtual color of a node.
167 static INLINE void qnode_set_new_color(const qnode_t *qn, ir_node *irn, int color) {
168 node_stat_t *found = qnode_find_or_insert_node(qn, irn);
169 found->new_color = color;
170 DBG((dbg, LEVEL_3, "\t col(%+F) := %d\n", irn, color));
174 * Checks if a node is local pinned. A node is local pinned, iff it belongs
175 * to the same optimization unit and has been optimized before the current
178 static INLINE int qnode_is_pinned_local(const qnode_t *qn, ir_node *irn) {
179 node_stat_t *found = qnode_find_node(qn, irn);
181 return found->pinned_local;
187 * Local-pins a node, so optimizations of further nodes of the same opt unit
188 * can handle situations in which a color change would undo prior optimizations.
190 static INLINE void qnode_pin_local(const qnode_t *qn, ir_node *irn) {
191 node_stat_t *found = qnode_find_or_insert_node(qn, irn);
192 found->pinned_local = 1;
193 if (found->new_color == NO_COLOR)
194 found->new_color = get_irn_col(qn->ou->co, irn);
199 * Possible return values of qnode_color_irn()
201 #define CHANGE_SAVE NULL
202 #define CHANGE_IMPOSSIBLE (ir_node *)1
203 #define is_conflicting_node(n) (((int)n) > 1)
206 * Performs virtual re-coloring of node @p n to color @p col. Virtual colors of
207 * other nodes are changed too, as required to preserve correctness. Function is
208 * aware of local and global pinning. Recursive.
210 * If irn == trigger the color @p col must be used. (the first recoloring)
211 * If irn != trigger an arbitrary free color may be used. If no color is free, @p col is used.
213 * @param irn The node to set the color for
214 * @param col The color to set
215 * @param trigger The irn that caused the wish to change the color of the irn
216 * External callers must call with trigger = irn
218 * @return CHANGE_SAVE iff setting the color is possible, with all transitive effects.
219 * CHANGE_IMPOSSIBLE iff conflicts with reg-constraintsis occured.
220 * Else the first conflicting ir_node encountered is returned.
223 static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const ir_node *trigger) {
224 copy_opt_t *co = qn->ou->co;
225 const be_chordal_env_t *chordal_env = co->cenv;
226 const arch_register_class_t *cls = co->cls;
227 const arch_env_t *arch_env = co->aenv;
228 int irn_col = qnode_get_new_color(qn, irn);
229 ir_node *sub_res, *curr;
230 be_ifg_t *ifg = chordal_env->ifg;
231 void *iter = be_ifg_neighbours_iter_alloca(ifg);
234 DBG((dbg, LEVEL_3, "\t %+F \tcaused col(%+F) \t%2d --> %2d\n", trigger, irn, irn_col, col));
236 /* If the target color is already set do nothing */
237 if (irn_col == col) {
238 DBG((dbg, LEVEL_3, "\t %+F same color\n", irn));
242 /* If the irn is pinned, changing color is impossible */
243 if (pset_find_ptr(pinned_global, irn) || qnode_is_pinned_local(qn, irn)) {
244 DBG((dbg, LEVEL_3, "\t %+F conflicting\n", irn));
248 #ifdef SEARCH_FREE_COLORS
249 /* If we resolve conflicts (recursive calls) we can use any unused color.
250 * In case of the first call @p col must be used.
252 if (irn != trigger) {
253 bitset_t *free_cols = bitset_alloca(cls->n_regs);
254 arch_register_req_t req;
258 /* Get all possible colors */
259 bitset_copy(free_cols, co->cenv->ignore_colors);
260 bitset_flip_all(free_cols);
262 /* Exclude colors not assignable to the irn */
263 arch_get_register_req(arch_env, &req, irn, -1);
264 if (arch_register_req_is(&req, limited)) {
265 bitset_t *limited = bitset_alloca(cls->n_regs);
266 req.limited(req.limited_env, limited);
267 bitset_and(free_cols, limited);
270 /* Exclude the color of the irn, because it must _change_ its color */
271 bitset_clear(free_cols, irn_col);
273 /* Exclude all colors used by adjacent nodes */
274 be_ifg_foreach_neighbour(ifg, iter, irn, curr)
275 bitset_clear(free_cols, qnode_get_new_color(qn, curr));
277 free_col = bitset_next_set(free_cols, 0);
279 if (free_col != -1) {
280 qnode_set_new_color(qn, irn, free_col);
284 #endif /* SEARCH_FREE_COLORS */
286 /* If target color is not allocatable changing color is impossible */
287 if (!arch_reg_is_allocatable(arch_env, irn, -1, arch_register_for_index(cls, col))) {
288 DBG((dbg, LEVEL_3, "\t %+F impossible\n", irn));
289 return CHANGE_IMPOSSIBLE;
293 * If we arrive here changing color may be possible, but there may be conflicts.
294 * Try to color all conflicting nodes 'curr' with the color of the irn itself.
296 be_ifg_foreach_neighbour(ifg, iter, irn, curr) {
297 DBG((dbg, LEVEL_3, "\t Confl %+F(%d)\n", curr, qnode_get_new_color(qn, curr)));
298 if (qnode_get_new_color(qn, curr) == col && curr != trigger) {
299 sub_res = qnode_color_irn(qn, curr, irn_col, irn);
300 if (sub_res != CHANGE_SAVE) {
301 be_ifg_neighbours_break(ifg, iter);
308 * If we arrive here, all conflicts were resolved.
309 * So it is save to change this irn
311 qnode_set_new_color(qn, irn, col);
317 * Tries to set the colors for all members of this queue node;
318 * to the target color qn->color
319 * @returns 1 iff all members colors could be set
322 static int qnode_try_color(const qnode_t *qn) {
324 for (i=0; i<qn->mis_size; ++i) {
325 ir_node *test_node, *confl_node;
327 test_node = qn->mis[i];
328 DBG((dbg, LEVEL_3, "\t Testing %+F\n", test_node));
329 confl_node = qnode_color_irn(qn, test_node, qn->color, test_node);
331 if (confl_node == CHANGE_SAVE) {
332 DBG((dbg, LEVEL_3, "\t Save --> pin local\n"));
333 qnode_pin_local(qn, test_node);
334 } else if (confl_node == CHANGE_IMPOSSIBLE) {
335 DBG((dbg, LEVEL_3, "\t Impossible --> remove from qnode\n"));
336 qnode_add_conflict(qn, test_node, test_node);
338 if (qnode_is_pinned_local(qn, confl_node)) {
339 /* changing test_node would change back a node of current ou */
340 if (confl_node == qn->ou->nodes[0]) {
341 /* Adding a conflict edge between testnode and conflnode
342 * would introduce a root -- arg interference.
343 * So remove the arg of the qn */
344 DBG((dbg, LEVEL_3, "\t Conflicting local with phi --> remove from qnode\n"));
345 qnode_add_conflict(qn, test_node, test_node);
347 DBG((dbg, LEVEL_3, "\t Conflicting local --> add conflict\n"));
348 qnode_add_conflict(qn, confl_node, test_node);
351 if (pset_find_ptr(pinned_global, confl_node)) {
352 /* changing test_node would change back a node of a prior ou */
353 DBG((dbg, LEVEL_3, "\t Conflicting global --> remove from qnode\n"));
354 qnode_add_conflict(qn, test_node, test_node);
358 if (confl_node != CHANGE_SAVE)
365 * Determines a maximum weighted independent set with respect to
366 * the interference and conflict edges of all nodes in a qnode.
368 static INLINE void qnode_max_ind_set(qnode_t *qn, const unit_t *ou) {
369 ir_node **safe, **unsafe;
370 int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs;
371 bitset_t *curr, *best;
372 int max, next, pos, curr_weight, best_weight = 0;
374 /* assign the nodes into two groups.
375 * safe: node has no interference, hence it is in every max stable set.
376 * unsafe: node has an interference
378 safe = alloca((ou->node_count-1) * sizeof(*safe));
381 unsafe = alloca((ou->node_count-1) * sizeof(*unsafe));
382 unsafe_costs = alloca((ou->node_count-1) * sizeof(*unsafe_costs));
384 for(i=1; i<ou->node_count; ++i) {
386 for(o=1; o<ou->node_count; ++o) {
387 if (qnode_are_conflicting(qn, ou->nodes[i], ou->nodes[o])) {
389 unsafe_costs[unsafe_count] = ou->costs[i];
390 unsafe[unsafe_count] = ou->nodes[i];
398 safe_costs += ou->costs[i];
399 safe[safe_count++] = ou->nodes[i];
405 /* now compute the best set out of the unsafe nodes*/
406 best = bitset_alloca(unsafe_count);
408 if (unsafe_count > MIS_HEUR_TRIGGER) {
409 /* Heuristic: Greedy trial and error form index 0 to unsafe_count-1 */
410 for (i=0; i<unsafe_count; ++i) {
412 /* check if it is a stable set */
413 for (o=bitset_next_set(best, 0); o!=-1 && o<=i; o=bitset_next_set(best, o+1))
414 if (qnode_are_conflicting(qn, unsafe[i], unsafe[o])) {
415 bitset_clear(best, i); /* clear the bit and try next one */
419 /* compute the weight */
420 bitset_foreach(best, pos)
421 best_weight += unsafe_costs[pos];
423 /* Exact Algorithm: Brute force */
424 curr = bitset_alloca(unsafe_count);
425 bitset_set_all(curr);
426 while ((max = bitset_popcnt(curr)) != 0) {
427 /* check if curr is a stable set */
428 for (i=bitset_next_set(curr, 0); i!=-1; i=bitset_next_set(curr, i+1))
429 for (o=bitset_next_set(curr, i); o!=-1; o=bitset_next_set(curr, o+1)) /* !!!!! difference to ou_max_ind_set_costs(): NOT (curr, i+1) */
430 if (qnode_are_conflicting(qn, unsafe[i], unsafe[o]))
433 /* if we arrive here, we have a stable set */
434 /* compute the weigth of the stable set*/
436 bitset_foreach(curr, pos)
437 curr_weight += unsafe_costs[pos];
440 if (curr_weight > best_weight) {
441 best_weight = curr_weight;
442 bitset_copy(best, curr);
450 /* transfer the best set into the qn */
451 qn->mis_size = 1+safe_count+bitset_popcnt(best);
452 qn->mis_costs = safe_costs+best_weight;
453 qn->mis[0] = ou->nodes[0]; /* the root is always in a max stable set */
455 for (i=0; i<safe_count; ++i)
456 qn->mis[next++] = safe[i];
457 bitset_foreach(best, pos)
458 qn->mis[next++] = unsafe[pos];
462 * Creates a new qnode
464 static INLINE qnode_t *new_qnode(const unit_t *ou, int color) {
465 qnode_t *qn = xmalloc(sizeof(*qn));
468 qn->mis = xmalloc(ou->node_count * sizeof(*qn->mis));
469 qn->conflicts = new_set(set_cmp_conflict_t, SLOTS_CONFLICTS);
470 qn->changed_nodes = new_set(set_cmp_node_stat_t, SLOTS_CHANGED_NODES);
475 * Frees space used by a queue node
477 static INLINE void free_qnode(qnode_t *qn) {
478 del_set(qn->conflicts);
479 del_set(qn->changed_nodes);
485 * Inserts a qnode in the sorted queue of the optimization unit. Queue is
486 * ordered by field 'size' (the size of the mis) in decreasing order.
488 static INLINE void ou_insert_qnode(unit_t *ou, qnode_t *qn) {
489 struct list_head *lh;
491 if (qnode_are_conflicting(qn, ou->nodes[0], ou->nodes[0])) {
492 /* root node is not in qnode */
497 qnode_max_ind_set(qn, ou);
498 /* do the insertion */
499 DBG((dbg, LEVEL_4, "\t Insert qnode color %d with cost %d\n", qn->color, qn->mis_costs));
501 while (lh->next != &ou->queue) {
502 qnode_t *curr = list_entry_queue(lh->next);
503 if (curr->mis_costs <= qn->mis_costs)
507 list_add(&qn->queue, lh);
511 * Tries to re-allocate colors of nodes in this opt unit, to achieve lower
512 * costs of copy instructions placed during SSA-destruction and lowering.
513 * Works only for opt units with exactly 1 root node, which is the
514 * case for approximately 80% of all phi classes and 100% of register constrained
515 * nodes. (All other phi classes are reduced to this case.)
517 static void ou_optimize(unit_t *ou) {
519 qnode_t *curr = NULL, *tmp;
520 const arch_env_t *aenv = ou->co->aenv;
521 const arch_register_class_t *cls = ou->co->cls;
522 bitset_t *pos_regs = bitset_alloca(cls->n_regs);
524 DBG((dbg, LEVEL_1, "\tOptimizing unit:\n"));
525 for (i=0; i<ou->node_count; ++i)
526 DBG((dbg, LEVEL_1, "\t %+F\n", ou->nodes[i]));
529 INIT_LIST_HEAD(&ou->queue);
531 arch_get_allocatable_regs(aenv, ou->nodes[0], -1, pos_regs);
533 /* exclude ingore colors */
534 bitset_andnot(pos_regs, ou->co->cenv->ignore_colors);
536 assert(bitset_popcnt(pos_regs) != 0 && "No register is allowed for this node !!?");
538 /* create new qnode */
539 bitset_foreach(pos_regs, i)
540 ou_insert_qnode(ou, new_qnode(ou, i));
543 while (!list_empty(&ou->queue)) {
544 /* get head of queue */
545 curr = list_entry_queue(ou->queue.next);
546 list_del(&curr->queue);
547 DBG((dbg, LEVEL_2, "\t Examine qnode color %d with cost %d\n", curr->color, curr->mis_costs));
550 if (qnode_try_color(curr))
553 /* no success, so re-insert */
554 del_set(curr->changed_nodes);
555 curr->changed_nodes = new_set(set_cmp_node_stat_t, SLOTS_CHANGED_NODES);
556 ou_insert_qnode(ou, curr);
559 /* apply the best found qnode */
560 if (curr->mis_size >= 2) {
562 int root_col = qnode_get_new_color(curr, ou->nodes[0]);
563 DBG((dbg, LEVEL_1, "\t Best color: %d Costs: %d << %d << %d\n", curr->color, ou->min_nodes_costs, ou->all_nodes_costs - curr->mis_costs, ou->all_nodes_costs));
564 /* globally pin root and all args which have the same color */
565 pset_insert_ptr(pinned_global, ou->nodes[0]);
566 for (i=1; i<ou->node_count; ++i) {
567 ir_node *irn = ou->nodes[i];
568 int nc = qnode_get_new_color(curr, irn);
569 if (nc != NO_COLOR && nc == root_col)
570 pset_insert_ptr(pinned_global, irn);
573 /* set color of all changed nodes */
574 for (ns = set_first(curr->changed_nodes); ns; ns = set_next(curr->changed_nodes)) {
575 /* NO_COLOR is possible, if we had an undo */
576 if (ns->new_color != NO_COLOR) {
577 DBG((dbg, LEVEL_1, "\t color(%+F) := %d\n", ns->irn, ns->new_color));
578 set_irn_col(ou->co, ns->irn, ns->new_color);
583 /* free best qnode (curr) and queue */
585 list_for_each_entry_safe(qnode_t, curr, tmp, &ou->queue, queue)
589 int co_solve_heuristic(copy_opt_t *co) {
591 FIRM_DBG_REGISTER(dbg, "ir.be.copyoptheur");
595 pinned_global = pset_new_ptr(SLOTS_PINNED_GLOBAL);
596 list_for_each_entry(unit_t, curr, &co->units, units)
597 if (curr->node_count > 1)
600 del_pset(pinned_global);