2 * This file is part of libFirm.
3 * Copyright (C) 2012 University of Karlsruhe.
8 * @brief First simple copy minimization heuristics.
12 * Heuristic for minimizing copies using a queue which holds 'qnodes' not yet
13 * examined. A qnode has a 'target color', nodes out of the opt unit and
14 * a 'conflict graph'. 'Conflict graph' = "Interference graph' + 'conflict edges'
15 * A 'max indep set' is determined from these. We try to color this mis using a
16 * color-exchanging mechanism. Occuring conflicts are modeled with 'conflict edges'
17 * and the qnode is reinserted in the queue. The first qnode colored without
18 * conflicts is the best one.
24 #include "raw_bitset.h"
27 #include "becopyopt_t.h"
28 #include "becopystat.h"
29 #include "beintlive_t.h"
33 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
35 /** Defines an invalid register index. */
38 #define SEARCH_FREE_COLORS
40 #define SLOTS_PINNED_GLOBAL 64
41 #define SLOTS_CONFLICTS 8
42 #define SLOTS_CHANGED_NODES 32
44 #define list_entry_queue(lh) list_entry(lh, qnode_t, queue)
45 #define HASH_CONFLICT(c) (hash_irn(c.n1) ^ hash_irn(c.n2))
48 * Modeling additional conflicts between nodes. NOT live range interference
50 typedef struct conflict_t {
51 const ir_node *n1, *n2;
55 * If an irn is changed, the changes first get stored in a node_stat_t,
56 * to allow undo of changes (=drop new data) in case of conflicts.
58 typedef struct node_stat_t {
65 * Represents a node in the optimization queue.
67 typedef struct qnode_t {
68 struct list_head queue; /**< chaining of unit_t->queue */
69 const unit_t *ou; /**< the opt unit this node belongs to */
70 int color; /**< target color */
71 set *conflicts; /**< contains conflict_t's. All internal conflicts */
72 int mis_costs; /**< costs of nodes/copies in the mis. */
73 int mis_size; /**< size of the array below */
74 ir_node **mis; /**< the nodes of unit_t->nodes[] being part of the max independent set */
75 set *changed_nodes; /**< contains node_stat_t's. */
78 static pset *pinned_global; /**< optimized nodes should not be altered any more */
80 static int set_cmp_conflict_t(const void *x, const void *y, size_t size)
82 const conflict_t *xx = (const conflict_t*)x;
83 const conflict_t *yy = (const conflict_t*)y;
86 return xx->n1 != yy->n1 || xx->n2 != yy->n2;
90 * If a local pinned conflict occurs, a new edge in the conflict graph is added.
91 * The next maximum independent set build, will regard it.
93 static inline void qnode_add_conflict(const qnode_t *qn, const ir_node *n1, const ir_node *n2)
96 DBG((dbg, LEVEL_4, "\t %+F -- %+F\n", n1, n2));
98 if (get_irn_idx(n1) < get_irn_idx(n2)) {
105 (void)set_insert(conflict_t, qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c));
109 * Checks if two nodes are in a conflict.
111 static inline int qnode_are_conflicting(const qnode_t *qn, const ir_node *n1, const ir_node *n2)
114 /* search for live range interference */
116 be_lv_t *const lv = be_get_irg_liveness(qn->ou->co->irg);
117 if (be_values_interfere(lv, n1, n2))
120 /* search for recoloring conflicts */
121 if (get_irn_idx(n1) < get_irn_idx(n2)) {
128 return set_find(conflict_t, qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c)) != 0;
131 static int set_cmp_node_stat_t(const void *x, const void *y, size_t size)
134 return ((const node_stat_t*)x)->irn != ((const node_stat_t*)y)->irn;
138 * Finds a node status entry of a node if existent. Otherwise return NULL
140 static inline const node_stat_t *qnode_find_node(const qnode_t *qn, ir_node *irn)
144 return set_find(node_stat_t, qn->changed_nodes, &find, sizeof(find), hash_irn(irn));
148 * Finds a node status entry of a node if existent. Otherwise it will return
149 * an initialized new entry for this node.
151 static inline node_stat_t *qnode_find_or_insert_node(const qnode_t *qn, ir_node *irn)
155 find.new_color = NO_COLOR;
156 find.pinned_local = 0;
157 return set_insert(node_stat_t, qn->changed_nodes, &find, sizeof(find), hash_irn(irn));
161 * Returns the virtual color of a node if set before, else returns the real color.
163 static inline int qnode_get_new_color(const qnode_t *qn, ir_node *irn)
165 const node_stat_t *found = qnode_find_node(qn, irn);
167 return found->new_color;
169 return get_irn_col(irn);
173 * Sets the virtual color of a node.
175 static inline void qnode_set_new_color(const qnode_t *qn, ir_node *irn, int color)
177 node_stat_t *found = qnode_find_or_insert_node(qn, irn);
178 found->new_color = color;
179 DBG((dbg, LEVEL_3, "\t col(%+F) := %d\n", irn, color));
183 * Checks if a node is local pinned. A node is local pinned, iff it belongs
184 * to the same optimization unit and has been optimized before the current
187 static inline int qnode_is_pinned_local(const qnode_t *qn, ir_node *irn)
189 const node_stat_t *found = qnode_find_node(qn, irn);
191 return found->pinned_local;
197 * Local-pins a node, so optimizations of further nodes of the same opt unit
198 * can handle situations in which a color change would undo prior optimizations.
200 static inline void qnode_pin_local(const qnode_t *qn, ir_node *irn)
202 node_stat_t *found = qnode_find_or_insert_node(qn, irn);
203 found->pinned_local = 1;
204 if (found->new_color == NO_COLOR)
205 found->new_color = get_irn_col(irn);
210 * Possible return values of qnode_color_irn()
212 #define CHANGE_SAVE NULL
213 #define CHANGE_IMPOSSIBLE (ir_node *)1
216 * Performs virtual re-coloring of node @p n to color @p col. Virtual colors of
217 * other nodes are changed too, as required to preserve correctness. Function is
218 * aware of local and global pinning. Recursive.
220 * If irn == trigger the color @p col must be used. (the first recoloring)
221 * If irn != trigger an arbitrary free color may be used. If no color is free, @p col is used.
223 * @param irn The node to set the color for
224 * @param col The color to set
225 * @param trigger The irn that caused the wish to change the color of the irn
226 * External callers must call with trigger = irn
228 * @return CHANGE_SAVE iff setting the color is possible, with all transitive effects.
229 * CHANGE_IMPOSSIBLE iff conflicts with reg-constraintsis occured.
230 * Else the first conflicting ir_node encountered is returned.
233 static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const ir_node *trigger)
235 copy_opt_t *co = qn->ou->co;
236 const be_chordal_env_t *chordal_env = co->cenv;
237 const arch_register_class_t *cls = co->cls;
238 int irn_col = qnode_get_new_color(qn, irn);
239 be_ifg_t *ifg = chordal_env->ifg;
240 neighbours_iter_t iter;
241 const arch_register_req_t *req;
243 DBG((dbg, LEVEL_3, "\t %+F \tcaused col(%+F) \t%2d --> %2d\n", trigger, irn, irn_col, col));
245 /* If the target color is already set do nothing */
246 if (irn_col == col) {
247 DBG((dbg, LEVEL_3, "\t %+F same color\n", irn));
251 /* If the irn is pinned, changing color is impossible */
252 if (pset_find_ptr(pinned_global, irn) || qnode_is_pinned_local(qn, irn)) {
253 DBG((dbg, LEVEL_3, "\t %+F conflicting\n", irn));
257 req = arch_get_irn_register_req(irn);
258 #ifdef SEARCH_FREE_COLORS
259 /* If we resolve conflicts (recursive calls) we can use any unused color.
260 * In case of the first call @p col must be used.
262 if (irn != trigger) {
263 bitset_t *free_cols = bitset_alloca(cls->n_regs);
266 /* Get all possible colors */
267 bitset_copy(free_cols, co->cenv->allocatable_regs);
269 /* Exclude colors not assignable to the irn */
270 if (arch_register_req_is(req, limited)) {
271 bitset_t *limited = bitset_alloca(cls->n_regs);
272 rbitset_copy_to_bitset(req->limited, limited);
273 bitset_and(free_cols, limited);
276 /* Exclude the color of the irn, because it must _change_ its color */
277 bitset_clear(free_cols, irn_col);
279 /* Exclude all colors used by adjacent nodes */
280 be_ifg_foreach_neighbour(ifg, &iter, irn, curr)
281 bitset_clear(free_cols, qnode_get_new_color(qn, curr));
283 free_col = bitset_next_set(free_cols, 0);
285 if (free_col != -1) {
286 qnode_set_new_color(qn, irn, free_col);
290 #endif /* SEARCH_FREE_COLORS */
292 /* If target color is not allocatable changing color is impossible */
293 if (!arch_reg_is_allocatable(req, arch_register_for_index(cls, col))) {
294 DBG((dbg, LEVEL_3, "\t %+F impossible\n", irn));
295 return CHANGE_IMPOSSIBLE;
299 * If we arrive here changing color may be possible, but there may be conflicts.
300 * Try to color all conflicting nodes 'curr' with the color of the irn itself.
302 be_ifg_foreach_neighbour(ifg, &iter, irn, curr) {
303 DBG((dbg, LEVEL_3, "\t Confl %+F(%d)\n", curr, qnode_get_new_color(qn, curr)));
304 if (qnode_get_new_color(qn, curr) == col && curr != trigger) {
305 ir_node *const sub_res = qnode_color_irn(qn, curr, irn_col, irn);
306 if (sub_res != CHANGE_SAVE) {
307 be_ifg_neighbours_break(&iter);
314 * If we arrive here, all conflicts were resolved.
315 * So it is save to change this irn
317 qnode_set_new_color(qn, irn, col);
323 * Tries to set the colors for all members of this queue node;
324 * to the target color qn->color
325 * @returns 1 iff all members colors could be set
328 static int qnode_try_color(const qnode_t *qn)
331 for (i=0; i<qn->mis_size; ++i) {
332 ir_node *test_node, *confl_node;
334 test_node = qn->mis[i];
335 DBG((dbg, LEVEL_3, "\t Testing %+F\n", test_node));
336 confl_node = qnode_color_irn(qn, test_node, qn->color, test_node);
338 if (confl_node == CHANGE_SAVE) {
339 DBG((dbg, LEVEL_3, "\t Save --> pin local\n"));
340 qnode_pin_local(qn, test_node);
341 } else if (confl_node == CHANGE_IMPOSSIBLE) {
342 DBG((dbg, LEVEL_3, "\t Impossible --> remove from qnode\n"));
343 qnode_add_conflict(qn, test_node, test_node);
346 if (qnode_is_pinned_local(qn, confl_node)) {
347 /* changing test_node would change back a node of current ou */
348 if (confl_node == qn->ou->nodes[0]) {
349 /* Adding a conflict edge between testnode and conflnode
350 * would introduce a root -- arg interference.
351 * So remove the arg of the qn */
352 DBG((dbg, LEVEL_3, "\t Conflicting local with phi --> remove from qnode\n"));
353 qnode_add_conflict(qn, test_node, test_node);
355 DBG((dbg, LEVEL_3, "\t Conflicting local --> add conflict\n"));
356 qnode_add_conflict(qn, confl_node, test_node);
359 if (pset_find_ptr(pinned_global, confl_node)) {
360 /* changing test_node would change back a node of a prior ou */
361 DBG((dbg, LEVEL_3, "\t Conflicting global --> remove from qnode\n"));
362 qnode_add_conflict(qn, test_node, test_node);
371 * Determines a maximum weighted independent set with respect to
372 * the interference and conflict edges of all nodes in a qnode.
374 static inline void qnode_max_ind_set(qnode_t *qn, const unit_t *ou)
376 ir_node **safe, **unsafe;
377 int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs;
378 bitset_t *curr, *best;
379 int next, curr_weight, best_weight = 0;
381 /* assign the nodes into two groups.
382 * safe: node has no interference, hence it is in every max stable set.
383 * unsafe: node has an interference
385 safe = ALLOCAN(ir_node*, ou->node_count - 1);
388 unsafe = ALLOCAN(ir_node*, ou->node_count - 1);
389 unsafe_costs = ALLOCAN(int, ou->node_count - 1);
391 for (i=1; i<ou->node_count; ++i) {
393 for (o=1; o<ou->node_count; ++o) {
394 if (qnode_are_conflicting(qn, ou->nodes[i], ou->nodes[o])) {
396 unsafe_costs[unsafe_count] = ou->costs[i];
397 unsafe[unsafe_count] = ou->nodes[i];
405 safe_costs += ou->costs[i];
406 safe[safe_count++] = ou->nodes[i];
412 /* now compute the best set out of the unsafe nodes*/
413 best = bitset_alloca(unsafe_count);
415 if (unsafe_count > MIS_HEUR_TRIGGER) {
416 /* Heuristic: Greedy trial and error form index 0 to unsafe_count-1 */
417 for (i=0; i<unsafe_count; ++i) {
419 /* check if it is a stable set */
420 for (o=bitset_next_set(best, 0); o!=-1 && o<=i; o=bitset_next_set(best, o+1))
421 if (qnode_are_conflicting(qn, unsafe[i], unsafe[o])) {
422 bitset_clear(best, i); /* clear the bit and try next one */
426 /* compute the weight */
427 bitset_foreach(best, pos)
428 best_weight += unsafe_costs[pos];
430 /* Exact Algorithm: Brute force */
431 curr = bitset_alloca(unsafe_count);
432 bitset_set_all(curr);
433 while (!bitset_is_empty(curr)) {
434 /* check if curr is a stable set */
435 for (i=bitset_next_set(curr, 0); i!=-1; i=bitset_next_set(curr, i+1))
436 for (o=bitset_next_set(curr, i); o!=-1; o=bitset_next_set(curr, o+1)) /* !!!!! difference to ou_max_ind_set_costs(): NOT (curr, i+1) */
437 if (qnode_are_conflicting(qn, unsafe[i], unsafe[o]))
440 /* if we arrive here, we have a stable set */
441 /* compute the weight of the stable set*/
443 bitset_foreach(curr, pos)
444 curr_weight += unsafe_costs[pos];
447 if (curr_weight > best_weight) {
448 best_weight = curr_weight;
449 bitset_copy(best, curr);
457 /* transfer the best set into the qn */
458 qn->mis_size = 1+safe_count+bitset_popcount(best);
459 qn->mis_costs = safe_costs+best_weight;
460 qn->mis[0] = ou->nodes[0]; /* the root is always in a max stable set */
462 for (i=0; i<safe_count; ++i)
463 qn->mis[next++] = safe[i];
464 bitset_foreach(best, pos)
465 qn->mis[next++] = unsafe[pos];
469 * Creates a new qnode
471 static inline qnode_t *new_qnode(const unit_t *ou, int color)
473 qnode_t *qn = XMALLOC(qnode_t);
476 qn->mis = XMALLOCN(ir_node*, ou->node_count);
477 qn->conflicts = new_set(set_cmp_conflict_t, SLOTS_CONFLICTS);
478 qn->changed_nodes = new_set(set_cmp_node_stat_t, SLOTS_CHANGED_NODES);
483 * Frees space used by a queue node
485 static inline void free_qnode(qnode_t *qn)
487 del_set(qn->conflicts);
488 del_set(qn->changed_nodes);
494 * Inserts a qnode in the sorted queue of the optimization unit. Queue is
495 * ordered by field 'size' (the size of the mis) in decreasing order.
497 static inline void ou_insert_qnode(unit_t *ou, qnode_t *qn)
499 struct list_head *lh;
501 if (qnode_are_conflicting(qn, ou->nodes[0], ou->nodes[0])) {
502 /* root node is not in qnode */
507 qnode_max_ind_set(qn, ou);
508 /* do the insertion */
509 DBG((dbg, LEVEL_4, "\t Insert qnode color %d with cost %d\n", qn->color, qn->mis_costs));
511 while (lh->next != &ou->queue) {
512 qnode_t *curr = list_entry_queue(lh->next);
513 if (curr->mis_costs <= qn->mis_costs)
517 list_add(&qn->queue, lh);
521 * Tries to re-allocate colors of nodes in this opt unit, to achieve lower
522 * costs of copy instructions placed during SSA-destruction and lowering.
523 * Works only for opt units with exactly 1 root node, which is the
524 * case for approximately 80% of all phi classes and 100% of register constrained
525 * nodes. (All other phi classes are reduced to this case.)
527 static void ou_optimize(unit_t *ou)
529 qnode_t *curr = NULL;
530 const arch_register_req_t *req;
531 bitset_t const* allocatable_regs;
536 DBG((dbg, LEVEL_1, "\tOptimizing unit:\n"));
537 for (i=0; i<ou->node_count; ++i)
538 DBG((dbg, LEVEL_1, "\t %+F\n", ou->nodes[i]));
541 INIT_LIST_HEAD(&ou->queue);
543 req = arch_get_irn_register_req(ou->nodes[0]);
544 allocatable_regs = ou->co->cenv->allocatable_regs;
545 n_regs = req->cls->n_regs;
546 if (arch_register_req_is(req, limited)) {
547 unsigned const* limited = req->limited;
549 for (idx = 0; idx != n_regs; ++idx) {
550 if (!bitset_is_set(allocatable_regs, idx))
552 if (!rbitset_is_set(limited, idx))
555 ou_insert_qnode(ou, new_qnode(ou, idx));
558 for (idx = 0; idx != n_regs; ++idx) {
559 if (!bitset_is_set(allocatable_regs, idx))
562 ou_insert_qnode(ou, new_qnode(ou, idx));
568 assert(!list_empty(&ou->queue));
569 /* get head of queue */
570 curr = list_entry_queue(ou->queue.next);
571 list_del(&curr->queue);
572 DBG((dbg, LEVEL_2, "\t Examine qnode color %d with cost %d\n", curr->color, curr->mis_costs));
575 if (qnode_try_color(curr))
578 /* no success, so re-insert */
579 del_set(curr->changed_nodes);
580 curr->changed_nodes = new_set(set_cmp_node_stat_t, SLOTS_CHANGED_NODES);
581 ou_insert_qnode(ou, curr);
584 /* apply the best found qnode */
585 if (curr->mis_size >= 2) {
586 int root_col = qnode_get_new_color(curr, ou->nodes[0]);
587 DBG((dbg, LEVEL_1, "\t Best color: %d Costs: %d << %d << %d\n", curr->color, ou->min_nodes_costs, ou->all_nodes_costs - curr->mis_costs, ou->all_nodes_costs));
588 /* globally pin root and all args which have the same color */
589 pset_insert_ptr(pinned_global, ou->nodes[0]);
590 for (i=1; i<ou->node_count; ++i) {
591 ir_node *irn = ou->nodes[i];
592 int nc = qnode_get_new_color(curr, irn);
593 if (nc != NO_COLOR && nc == root_col)
594 pset_insert_ptr(pinned_global, irn);
597 /* set color of all changed nodes */
598 foreach_set(curr->changed_nodes, node_stat_t, ns) {
599 /* NO_COLOR is possible, if we had an undo */
600 if (ns->new_color != NO_COLOR) {
601 DBG((dbg, LEVEL_1, "\t color(%+F) := %d\n", ns->irn, ns->new_color));
602 set_irn_col(ou->co->cls, ns->irn, ns->new_color);
607 /* free best qnode (curr) and queue */
609 list_for_each_entry_safe(qnode_t, curr, tmp, &ou->queue, queue)
614 * Solves the problem using a heuristic approach
615 * Uses the OU data structure
617 int co_solve_heuristic(copy_opt_t *co)
621 pinned_global = pset_new_ptr(SLOTS_PINNED_GLOBAL);
622 list_for_each_entry(unit_t, curr, &co->units, units)
623 if (curr->node_count > 1)
626 del_pset(pinned_global);
630 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur)
631 void be_init_copyheur(void)
633 static co_algo_info copyheur = {
634 co_solve_heuristic, 0
637 be_register_copyopt("heur1", ©heur);
638 FIRM_DBG_REGISTER(dbg, "ir.be.copyoptheur");