bechordal: Handle Phis more like live-ins instead of regular scheduled nodes in creat...
[libfirm] / ir / be / becopyopt.c
1 /*
2  * This file is part of libFirm.
3  * Copyright (C) 2012 University of Karlsruhe.
4  */
5
6 /**
7  * @file
8  * @brief       Copy minimization driver.
9  * @author      Daniel Grund
10  * @date        12.04.2005
11  *
12  * Main file for the optimization reducing the copies needed for:
13  * - Phi coalescing
14  * - Register-constrained nodes
15  * - Two-address code instructions
16  */
17 #include "config.h"
18
19 #include "debug.h"
20 #include "error.h"
21 #include "execfreq_t.h"
22 #include "irdump_t.h"
23 #include "iredges_t.h"
24 #include "irgraph.h"
25 #include "irgwalk.h"
26 #include "irloop_t.h"
27 #include "irnode.h"
28 #include "irprintf.h"
29 #include "irprog.h"
30 #include "irtools.h"
31 #include "pmap.h"
32 #include "raw_bitset.h"
33 #include "util.h"
34 #include "xmalloc.h"
35
36 #include "bearch.h"
37 #include "becopyopt_t.h"
38 #include "becopystat.h"
39 #include "bedump.h"
40 #include "beifg.h"
41 #include "beinsn_t.h"
42 #include "beintlive_t.h"
43 #include "beirg.h"
44 #include "belive_t.h"
45 #include "bemodule.h"
46 #include "benode.h"
47 #include "besched.h"
48 #include "statev_t.h"
49 #include "beutil.h"
50
51 #include "lc_opts.h"
52 #include "lc_opts_enum.h"
53
54 #define DUMP_BEFORE 1
55 #define DUMP_AFTER  2
56 #define DUMP_APPEL  4
57 #define DUMP_ALL    2 * DUMP_APPEL - 1
58
59 #define COST_FUNC_FREQ     1
60 #define COST_FUNC_LOOP     2
61 #define COST_FUNC_ALL_ONE  3
62
63 /**
64  * Flags for dumping the IFG.
65  */
66 enum {
67         CO_IFG_DUMP_COLORS = 1 << 0, /**< Dump the graph colored. */
68         CO_IFG_DUMP_LABELS = 1 << 1, /**< Dump node/edge labels. */
69         CO_IFG_DUMP_SHAPE  = 1 << 2, /**< Give constrained nodes special shapes. */
70         CO_IFG_DUMP_CONSTR = 1 << 3, /**< Dump the node constraints in the label. */
71 };
72
73 static int co_get_costs_loop_depth(const ir_node *root, int pos);
74 static int co_get_costs_exec_freq(const ir_node *root, int pos);
75 static int co_get_costs_all_one(const ir_node *root, int pos);
76
77 static unsigned   dump_flags  = 0;
78 static unsigned   style_flags = CO_IFG_DUMP_COLORS;
79 static int        do_stats    = 0;
80 static cost_fct_t cost_func   = co_get_costs_exec_freq;
81 static int        improve     = 1;
82
83 static const lc_opt_enum_mask_items_t dump_items[] = {
84         { "before",  DUMP_BEFORE },
85         { "after",   DUMP_AFTER  },
86         { "appel",   DUMP_APPEL  },
87         { "all",     DUMP_ALL    },
88         { NULL,      0 }
89 };
90
91 static const lc_opt_enum_mask_items_t style_items[] = {
92         { "color",   CO_IFG_DUMP_COLORS },
93         { "labels",  CO_IFG_DUMP_LABELS },
94         { "constr",  CO_IFG_DUMP_CONSTR },
95         { "shape",   CO_IFG_DUMP_SHAPE  },
96         { "full",    2 * CO_IFG_DUMP_SHAPE - 1 },
97         { NULL,      0 }
98 };
99
100 typedef int (*opt_funcptr)(void);
101 static const lc_opt_enum_func_ptr_items_t cost_func_items[] = {
102         { "freq",   (opt_funcptr) co_get_costs_exec_freq },
103         { "loop",   (opt_funcptr) co_get_costs_loop_depth },
104         { "one",    (opt_funcptr) co_get_costs_all_one },
105         { NULL,     NULL }
106 };
107
108 static lc_opt_enum_mask_var_t dump_var = {
109         &dump_flags, dump_items
110 };
111
112 static lc_opt_enum_mask_var_t style_var = {
113         &style_flags, style_items
114 };
115
116 static lc_opt_enum_func_ptr_var_t cost_func_var = {
117         (opt_funcptr*) &cost_func, cost_func_items
118 };
119
120 static const lc_opt_table_entry_t options[] = {
121         LC_OPT_ENT_ENUM_FUNC_PTR ("cost",    "select a cost function",                                  &cost_func_var),
122         LC_OPT_ENT_ENUM_MASK     ("dump",    "dump ifg before or after copy optimization",              &dump_var),
123         LC_OPT_ENT_ENUM_MASK     ("style",   "dump style for ifg dumping",                              &style_var),
124         LC_OPT_ENT_BOOL          ("stats",   "dump statistics after each optimization",                 &do_stats),
125         LC_OPT_ENT_BOOL          ("improve", "run heur1 before if algo can exploit start solutions",    &improve),
126         LC_OPT_LAST
127 };
128
129 static be_module_list_entry_t *copyopts = NULL;
130 static const co_algo_info *selected_copyopt = NULL;
131
132 void be_register_copyopt(const char *name, co_algo_info *copyopt)
133 {
134         if (selected_copyopt == NULL)
135                 selected_copyopt = copyopt;
136         be_add_module_to_list(&copyopts, name, copyopt);
137 }
138
139 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyopt)
140 void be_init_copyopt(void)
141 {
142         lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
143         lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
144         lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
145         lc_opt_entry_t *co_grp = lc_opt_get_grp(chordal_grp, "co");
146
147         lc_opt_add_table(co_grp, options);
148         be_add_module_list_opt(co_grp, "algo", "select copy optimization algo",
149                                        &copyopts, (void**) &selected_copyopt);
150 }
151
152 static int void_algo(copy_opt_t *co)
153 {
154         (void) co;
155         return 0;
156 }
157
158 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copynone)
159 void be_init_copynone(void)
160 {
161         static co_algo_info copyheur = {
162                 void_algo, 0
163         };
164
165         be_register_copyopt("none", &copyheur);
166 }
167
168 #undef QUICK_AND_DIRTY_HACK
169
170 static int nodes_interfere(const be_chordal_env_t *env, const ir_node *a, const ir_node *b)
171 {
172         if (env->ifg)
173                 return be_ifg_connected(env->ifg, a, b);
174         else {
175                 be_lv_t *lv = be_get_irg_liveness(env->irg);
176                 return be_values_interfere(lv, a, b);
177         }
178 }
179
180
181 /******************************************************************************
182     _____                           _
183    / ____|                         | |
184   | |  __  ___ _ __   ___ _ __ __ _| |
185   | | |_ |/ _ \ '_ \ / _ \ '__/ _` | |
186   | |__| |  __/ | | |  __/ | | (_| | |
187    \_____|\___|_| |_|\___|_|  \__,_|_|
188
189  ******************************************************************************/
190
191 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
192
193
194 copy_opt_t *new_copy_opt(be_chordal_env_t *chordal_env, cost_fct_t get_costs)
195 {
196         const char *s1, *s2, *s3;
197         size_t len;
198         copy_opt_t *co;
199
200         FIRM_DBG_REGISTER(dbg, "ir.be.copyopt");
201
202         co = XMALLOCZ(copy_opt_t);
203         co->cenv      = chordal_env;
204         co->irg       = chordal_env->irg;
205         co->cls       = chordal_env->cls;
206         co->get_costs = get_costs;
207
208         s1 = get_irp_name();
209         s2 = get_entity_name(get_irg_entity(co->irg));
210         s3 = chordal_env->cls->name;
211         len = strlen(s1) + strlen(s2) + strlen(s3) + 5;
212         co->name = XMALLOCN(char, len);
213         snprintf(co->name, len, "%s__%s__%s", s1, s2, s3);
214
215         return co;
216 }
217
218 void free_copy_opt(copy_opt_t *co)
219 {
220         xfree(co->name);
221         free(co);
222 }
223
224 /**
225  * Checks if a node is optimizable, viz. has something to do with coalescing
226  * @param irn  The irn to check
227  */
228 static int co_is_optimizable_root(ir_node *irn)
229 {
230         arch_register_req_t const *const req = arch_get_irn_register_req(irn);
231         if (arch_register_req_is(req, ignore))
232                 return 0;
233
234         if (is_Reg_Phi(irn) || is_Perm_Proj(irn))
235                 return 1;
236
237         if (arch_register_req_is(req, should_be_same))
238                 return 1;
239
240         return 0;
241 }
242
243 /**
244  * Computes the costs of a copy according to loop depth
245  * @param pos  the argument position of arg in the root arguments
246  * @return     Must be >= 0 in all cases.
247  */
248 static int co_get_costs_loop_depth(const ir_node *root, int pos)
249 {
250         ir_node *block = get_nodes_block(root);
251         ir_loop *loop;
252         int      cost;
253
254         if (is_Phi(root)) {
255                 block = get_Block_cfgpred_block(block, pos);
256         }
257         loop = get_irn_loop(block);
258         if (loop) {
259                 int d = get_loop_depth(loop);
260                 cost = d*d;
261         } else {
262                 cost = 0;
263         }
264         return 1+cost;
265 }
266
267 static ir_execfreq_int_factors factors;
268
269 /**
270  * Computes the costs of a copy according to execution frequency
271  * @param pos  the argument position of arg in the root arguments
272  * @return Must be >= 0 in all cases.
273  */
274 static int co_get_costs_exec_freq(const ir_node *root, int pos)
275 {
276         ir_node *root_bl = get_nodes_block(root);
277         ir_node *copy_bl
278                 = is_Phi(root) ? get_Block_cfgpred_block(root_bl, pos) : root_bl;
279         int      res     = get_block_execfreq_int(&factors, copy_bl);
280
281         /* don't allow values smaller than one. */
282         return res < 1 ? 1 : res;
283 }
284
285 /**
286  * All costs equal 1. Using this will reduce the _number_ of copies.
287  * @param co   The copy opt object.
288  * @return Must be >= 0 in all cases.
289  */
290 static int co_get_costs_all_one(const ir_node *root, int pos)
291 {
292         (void) root;
293         (void) pos;
294         return 1;
295 }
296
297 /******************************************************************************
298    ____        _   _    _       _ _          _____ _
299   / __ \      | | | |  | |     (_) |        / ____| |
300  | |  | |_ __ | |_| |  | |_ __  _| |_ ___  | (___ | |_ ___  _ __ __ _  __ _  ___
301  | |  | | '_ \| __| |  | | '_ \| | __/ __|  \___ \| __/ _ \| '__/ _` |/ _` |/ _ \
302  | |__| | |_) | |_| |__| | | | | | |_\__ \  ____) | || (_) | | | (_| | (_| |  __/
303   \____/| .__/ \__|\____/|_| |_|_|\__|___/ |_____/ \__\___/|_|  \__,_|\__, |\___|
304         | |                                                            __/ |
305         |_|                                                           |___/
306  ******************************************************************************/
307
308 /**
309  * Determines a maximum weighted independent set with respect to
310  * the interference and conflict edges of all nodes in a qnode.
311  */
312 static int ou_max_ind_set_costs(unit_t *ou)
313 {
314         be_chordal_env_t *chordal_env = ou->co->cenv;
315         ir_node **safe, **unsafe;
316         int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs;
317         bitset_t *curr;
318         int curr_weight, best_weight = 0;
319
320         /* assign the nodes into two groups.
321          * safe: node has no interference, hence it is in every max stable set.
322          * unsafe: node has an interference
323          */
324         safe         = ALLOCAN(ir_node*, ou->node_count - 1);
325         safe_costs   = 0;
326         safe_count   = 0;
327         unsafe       = ALLOCAN(ir_node*, ou->node_count - 1);
328         unsafe_costs = ALLOCAN(int,      ou->node_count - 1);
329         unsafe_count = 0;
330         for (i=1; i<ou->node_count; ++i) {
331                 int is_safe = 1;
332                 for (o=1; o<ou->node_count; ++o) {
333                         if (i==o)
334                                 continue;
335                         if (nodes_interfere(chordal_env, ou->nodes[i], ou->nodes[o])) {
336                                 unsafe_costs[unsafe_count] = ou->costs[i];
337                                 unsafe[unsafe_count] = ou->nodes[i];
338                                 ++unsafe_count;
339                                 is_safe = 0;
340                                 break;
341                         }
342                 }
343                 if (is_safe) {
344                         safe_costs += ou->costs[i];
345                         safe[safe_count++] = ou->nodes[i];
346                 }
347         }
348
349
350         /* now compute the best set out of the unsafe nodes*/
351         if (unsafe_count > MIS_HEUR_TRIGGER) {
352                 bitset_t *best = bitset_alloca(unsafe_count);
353                 /* Heuristic: Greedy trial and error form index 0 to unsafe_count-1 */
354                 for (i=0; i<unsafe_count; ++i) {
355                         bitset_set(best, i);
356                         /* check if it is a stable set */
357                         for (o=bitset_next_set(best, 0); o!=-1 && o<i; o=bitset_next_set(best, o+1))
358                                 if (nodes_interfere(chordal_env, unsafe[i], unsafe[o])) {
359                                         bitset_clear(best, i); /* clear the bit and try next one */
360                                         break;
361                                 }
362                 }
363                 /* compute the weight */
364                 bitset_foreach(best, pos)
365                         best_weight += unsafe_costs[pos];
366         } else {
367                 /* Exact Algorithm: Brute force */
368                 curr = bitset_alloca(unsafe_count);
369                 bitset_set_all(curr);
370                 while (bitset_popcount(curr) != 0) {
371                         /* check if curr is a stable set */
372                         for (i=bitset_next_set(curr, 0); i!=-1; i=bitset_next_set(curr, i+1))
373                                 for (o=bitset_next_set(curr, i+1); o!=-1; o=bitset_next_set(curr, o+1)) /* !!!!! difference to qnode_max_ind_set(): NOT (curr, i) */
374                                                 if (nodes_interfere(chordal_env, unsafe[i], unsafe[o]))
375                                                         goto no_stable_set;
376
377                         /* if we arrive here, we have a stable set */
378                         /* compute the weight of the stable set*/
379                         curr_weight = 0;
380                         bitset_foreach(curr, pos)
381                                 curr_weight += unsafe_costs[pos];
382
383                         /* any better ? */
384                         if (curr_weight > best_weight) {
385                                 best_weight = curr_weight;
386                         }
387
388         no_stable_set:
389                         bitset_minus1(curr);
390                 }
391         }
392
393         return safe_costs+best_weight;
394 }
395
396 static void co_collect_units(ir_node *irn, void *env)
397 {
398         const arch_register_req_t *req;
399         copy_opt_t                *co  = (copy_opt_t*)env;
400         unit_t *unit;
401
402         if (get_irn_mode(irn) == mode_T)
403                 return;
404         req = arch_get_irn_register_req(irn);
405         if (req->cls != co->cls)
406                 return;
407         if (!co_is_optimizable_root(irn))
408                 return;
409
410         /* Init a new unit */
411         unit = XMALLOCZ(unit_t);
412         unit->co = co;
413         unit->node_count = 1;
414         INIT_LIST_HEAD(&unit->queue);
415
416         /* Phi with some/all of its arguments */
417         if (is_Reg_Phi(irn)) {
418                 int i, arity;
419
420                 /* init */
421                 arity = get_irn_arity(irn);
422                 unit->nodes = XMALLOCN(ir_node*, arity + 1);
423                 unit->costs = XMALLOCN(int,      arity + 1);
424                 unit->nodes[0] = irn;
425
426                 /* fill */
427                 for (i=0; i<arity; ++i) {
428                         int o, arg_pos;
429                         ir_node *arg = get_irn_n(irn, i);
430
431                         assert(arch_get_irn_reg_class(arg) == co->cls && "Argument not in same register class.");
432                         if (arg == irn)
433                                 continue;
434                         if (nodes_interfere(co->cenv, irn, arg)) {
435                                 unit->inevitable_costs += co->get_costs(irn, i);
436                                 continue;
437                         }
438
439                         /* Else insert the argument of the phi to the members of this ou */
440                         DBG((dbg, LEVEL_1, "\t   Member: %+F\n", arg));
441
442                         if (arch_irn_is_ignore(arg))
443                                 continue;
444
445                         /* Check if arg has occurred at a prior position in the arg/list */
446                         arg_pos = 0;
447                         for (o=1; o<unit->node_count; ++o) {
448                                 if (unit->nodes[o] == arg) {
449                                         arg_pos = o;
450                                         break;
451                                 }
452                         }
453
454                         if (!arg_pos) { /* a new argument */
455                                 /* insert node, set costs */
456                                 unit->nodes[unit->node_count] = arg;
457                                 unit->costs[unit->node_count] = co->get_costs(irn, i);
458                                 unit->node_count++;
459                         } else { /* arg has occurred before in same phi */
460                                 /* increase costs for existing arg */
461                                 unit->costs[arg_pos] += co->get_costs(irn, i);
462                         }
463                 }
464                 unit->nodes = XREALLOC(unit->nodes, ir_node*, unit->node_count);
465                 unit->costs = XREALLOC(unit->costs, int,      unit->node_count);
466         } else if (is_Perm_Proj(irn)) {
467                 /* Proj of a perm with corresponding arg */
468                 assert(!nodes_interfere(co->cenv, irn, get_Perm_src(irn)));
469                 unit->nodes = XMALLOCN(ir_node*, 2);
470                 unit->costs = XMALLOCN(int,      2);
471                 unit->node_count = 2;
472                 unit->nodes[0] = irn;
473                 unit->nodes[1] = get_Perm_src(irn);
474                 unit->costs[1] = co->get_costs(irn, -1);
475         } else if (arch_register_req_is(req, should_be_same)) {
476                 /* Src == Tgt of a 2-addr-code instruction */
477                 const unsigned other = req->other_same;
478                 int            count = 0;
479                 int            i;
480
481                 for (i = 0; (1U << i) <= other; ++i) {
482                         if (other & (1U << i)) {
483                                 ir_node *o  = get_irn_n(skip_Proj(irn), i);
484                                 if (arch_irn_is_ignore(o))
485                                         continue;
486                                 if (nodes_interfere(co->cenv, irn, o))
487                                         continue;
488                                 ++count;
489                         }
490                 }
491
492                 if (count != 0) {
493                         int k = 0;
494                         ++count;
495                         unit->nodes = XMALLOCN(ir_node*, count);
496                         unit->costs = XMALLOCN(int,      count);
497                         unit->node_count = count;
498                         unit->nodes[k++] = irn;
499
500                         for (i = 0; 1U << i <= other; ++i) {
501                                 if (other & (1U << i)) {
502                                         ir_node *o  = get_irn_n(skip_Proj(irn), i);
503                                         if (!arch_irn_is_ignore(o) &&
504                                                         !nodes_interfere(co->cenv, irn, o)) {
505                                                 unit->nodes[k] = o;
506                                                 unit->costs[k] = co->get_costs(irn, -1);
507                                                 ++k;
508                                         }
509                                 }
510                         }
511                 }
512         } else {
513                 assert(0 && "This is not an optimizable node!");
514         }
515
516         /* Insert the new unit at a position according to its costs */
517         if (unit->node_count > 1) {
518                 int i;
519                 struct list_head *tmp;
520
521                 /* Determine the maximum costs this unit can cause: all_nodes_cost */
522                 for (i=1; i<unit->node_count; ++i) {
523                         unit->sort_key = MAX(unit->sort_key, unit->costs[i]);
524                         unit->all_nodes_costs += unit->costs[i];
525                 }
526
527                 /* Determine the minimal costs this unit will cause: min_nodes_costs */
528                 unit->min_nodes_costs += unit->all_nodes_costs - ou_max_ind_set_costs(unit);
529                 /* Insert the new ou according to its sort_key */
530                 tmp = &co->units;
531                 while (tmp->next != &co->units && list_entry_units(tmp->next)->sort_key > unit->sort_key)
532                         tmp = tmp->next;
533                 list_add(&unit->units, tmp);
534         } else {
535                 free(unit);
536         }
537 }
538
539 #ifdef QUICK_AND_DIRTY_HACK
540
541 static int compare_ous(const void *k1, const void *k2)
542 {
543         const unit_t *u1 = *((const unit_t **) k1);
544         const unit_t *u2 = *((const unit_t **) k2);
545         int i, o, u1_has_constr, u2_has_constr;
546         arch_register_req_t req;
547
548         /* Units with constraints come first */
549         u1_has_constr = 0;
550         for (i=0; i<u1->node_count; ++i) {
551                 arch_get_irn_register_req(&req, u1->nodes[i]);
552                 if (arch_register_req_is(&req, limited)) {
553                         u1_has_constr = 1;
554                         break;
555                 }
556         }
557
558         u2_has_constr = 0;
559         for (i=0; i<u2->node_count; ++i) {
560                 arch_get_irn_register_req(&req, u2->nodes[i]);
561                 if (arch_register_req_is(&req, limited)) {
562                         u2_has_constr = 1;
563                         break;
564                 }
565         }
566
567         if (u1_has_constr != u2_has_constr)
568                 return u2_has_constr - u1_has_constr;
569
570         /* After all, the sort key decides. Greater keys come first. */
571         return u2->sort_key - u1->sort_key;
572
573 }
574
575 /**
576  * Sort the ou's according to constraints and their sort_key
577  */
578 static void co_sort_units(copy_opt_t *co)
579 {
580         int i, count = 0, costs;
581         unit_t **ous;
582
583         /* get the number of ous, remove them form the list and fill the array */
584         list_for_each_entry(unit_t, ou, &co->units, units)
585                 count++;
586         ous = ALLOCAN(unit_t, count);
587
588         costs = co_get_max_copy_costs(co);
589
590         i = 0;
591         list_for_each_entry(unit_t, ou, &co->units, units)
592                 ous[i++] = ou;
593
594         INIT_LIST_HEAD(&co->units);
595
596         assert(count == i && list_empty(&co->units));
597
598         for (i=0; i<count; ++i)
599                 ir_printf("%+F\n", ous[i]->nodes[0]);
600
601         qsort(ous, count, sizeof(*ous), compare_ous);
602
603         ir_printf("\n\n");
604         for (i=0; i<count; ++i)
605                 ir_printf("%+F\n", ous[i]->nodes[0]);
606
607         /* reinsert into list in correct order */
608         for (i=0; i<count; ++i)
609                 list_add_tail(&ous[i]->units, &co->units);
610
611         assert(costs == co_get_max_copy_costs(co));
612 }
613 #endif
614
615 void co_build_ou_structure(copy_opt_t *co)
616 {
617         DBG((dbg, LEVEL_1, "\tCollecting optimization units\n"));
618         INIT_LIST_HEAD(&co->units);
619         irg_walk_graph(co->irg, co_collect_units, NULL, co);
620 #ifdef QUICK_AND_DIRTY_HACK
621         co_sort_units(co);
622 #endif
623 }
624
625 void co_free_ou_structure(copy_opt_t *co)
626 {
627         ASSERT_OU_AVAIL(co);
628         list_for_each_entry_safe(unit_t, curr, tmp, &co->units, units) {
629                 xfree(curr->nodes);
630                 xfree(curr->costs);
631                 xfree(curr);
632         }
633         co->units.next = NULL;
634 }
635
636 /* co_solve_heuristic() is implemented in becopyheur.c */
637
638 int co_get_max_copy_costs(const copy_opt_t *co)
639 {
640         int i, res = 0;
641
642         ASSERT_OU_AVAIL(co);
643
644         list_for_each_entry(unit_t, curr, &co->units, units) {
645                 res += curr->inevitable_costs;
646                 for (i=1; i<curr->node_count; ++i)
647                         res += curr->costs[i];
648         }
649         return res;
650 }
651
652 int co_get_inevit_copy_costs(const copy_opt_t *co)
653 {
654         int res = 0;
655
656         ASSERT_OU_AVAIL(co);
657
658         list_for_each_entry(unit_t, curr, &co->units, units)
659                 res += curr->inevitable_costs;
660         return res;
661 }
662
663 int co_get_copy_costs(const copy_opt_t *co)
664 {
665         int i, res = 0;
666
667         ASSERT_OU_AVAIL(co);
668
669         list_for_each_entry(unit_t, curr, &co->units, units) {
670                 int root_col = get_irn_col(curr->nodes[0]);
671                 DBG((dbg, LEVEL_1, "  %3d costs for root %+F color %d\n", curr->inevitable_costs, curr->nodes[0], root_col));
672                 res += curr->inevitable_costs;
673                 for (i=1; i<curr->node_count; ++i) {
674                         int arg_col = get_irn_col(curr->nodes[i]);
675                         if (root_col != arg_col) {
676                                 DBG((dbg, LEVEL_1, "  %3d for arg %+F color %d\n", curr->costs[i], curr->nodes[i], arg_col));
677                                 res += curr->costs[i];
678                         }
679                 }
680         }
681         return res;
682 }
683
684 int co_get_lower_bound(const copy_opt_t *co)
685 {
686         int res = 0;
687
688         ASSERT_OU_AVAIL(co);
689
690         list_for_each_entry(unit_t, curr, &co->units, units)
691                 res += curr->inevitable_costs + curr->min_nodes_costs;
692         return res;
693 }
694
695 void co_complete_stats(const copy_opt_t *co, co_complete_stats_t *stat)
696 {
697         bitset_t *seen = bitset_malloc(get_irg_last_idx(co->irg));
698
699         memset(stat, 0, sizeof(stat[0]));
700
701         /* count affinity edges. */
702         co_gs_foreach_aff_node(co, an) {
703                 stat->aff_nodes += 1;
704                 bitset_set(seen, get_irn_idx(an->irn));
705                 co_gs_foreach_neighb(an, neigh) {
706                         if (!bitset_is_set(seen, get_irn_idx(neigh->irn))) {
707                                 stat->aff_edges += 1;
708                                 stat->max_costs += neigh->costs;
709
710                                 if (get_irn_col(an->irn) != get_irn_col(neigh->irn)) {
711                                         stat->costs += neigh->costs;
712                                         stat->unsatisfied_edges += 1;
713                                 }
714
715                                 if (nodes_interfere(co->cenv, an->irn, neigh->irn)) {
716                                         stat->aff_int += 1;
717                                         stat->inevit_costs += neigh->costs;
718                                 }
719
720                         }
721                 }
722         }
723
724         bitset_free(seen);
725 }
726
727 /******************************************************************************
728    _____                 _        _____ _
729   / ____|               | |      / ____| |
730  | |  __ _ __ __ _ _ __ | |__   | (___ | |_ ___  _ __ __ _  __ _  ___
731  | | |_ | '__/ _` | '_ \| '_ \   \___ \| __/ _ \| '__/ _` |/ _` |/ _ \
732  | |__| | | | (_| | |_) | | | |  ____) | || (_) | | | (_| | (_| |  __/
733   \_____|_|  \__,_| .__/|_| |_| |_____/ \__\___/|_|  \__,_|\__, |\___|
734                   | |                                       __/ |
735                   |_|                                      |___/
736  ******************************************************************************/
737
738 static int compare_affinity_node_t(const void *k1, const void *k2, size_t size)
739 {
740         const affinity_node_t *n1 = (const affinity_node_t*)k1;
741         const affinity_node_t *n2 = (const affinity_node_t*)k2;
742         (void) size;
743
744         return (n1->irn != n2->irn);
745 }
746
747 static void add_edge(copy_opt_t *co, ir_node *n1, ir_node *n2, int costs)
748 {
749         affinity_node_t new_node, *node;
750         neighb_t        *nbr;
751         int             allocnew = 1;
752
753         new_node.irn        = n1;
754         new_node.degree     = 0;
755         new_node.neighbours = NULL;
756         node = set_insert(affinity_node_t, co->nodes, &new_node, sizeof(new_node), hash_irn(new_node.irn));
757
758         for (nbr = node->neighbours; nbr; nbr = nbr->next)
759                 if (nbr->irn == n2) {
760                         allocnew = 0;
761                         break;
762                 }
763
764         /* if we did not find n2 in n1's neighbourhood insert it */
765         if (allocnew) {
766                 nbr        = OALLOC(&co->obst, neighb_t);
767                 nbr->irn   = n2;
768                 nbr->costs = 0;
769                 nbr->next  = node->neighbours;
770
771                 node->neighbours = nbr;
772                 node->degree++;
773         }
774
775         /* now nbr points to n1's neighbour-entry of n2 */
776         nbr->costs += costs;
777 }
778
779 static inline void add_edges(copy_opt_t *co, ir_node *n1, ir_node *n2, int costs)
780 {
781         if (! be_ifg_connected(co->cenv->ifg, n1, n2)) {
782                 add_edge(co, n1, n2, costs);
783                 add_edge(co, n2, n1, costs);
784         }
785 }
786
787 static void build_graph_walker(ir_node *irn, void *env)
788 {
789         const arch_register_req_t *req;
790         copy_opt_t                *co  = (copy_opt_t*)env;
791         int pos, max;
792
793         if (get_irn_mode(irn) == mode_T)
794                 return;
795         req = arch_get_irn_register_req(irn);
796         if (req->cls != co->cls || arch_register_req_is(req, ignore))
797                 return;
798
799         if (is_Reg_Phi(irn)) { /* Phis */
800                 for (pos=0, max=get_irn_arity(irn); pos<max; ++pos) {
801                         ir_node *arg = get_irn_n(irn, pos);
802                         add_edges(co, irn, arg, co->get_costs(irn, pos));
803                 }
804         } else if (is_Perm_Proj(irn)) { /* Perms */
805                 ir_node *arg = get_Perm_src(irn);
806                 add_edges(co, irn, arg, co->get_costs(irn, -1));
807         } else if (arch_register_req_is(req, should_be_same)) {
808                 const unsigned other = req->other_same;
809                 int i;
810
811                 for (i = 0; 1U << i <= other; ++i) {
812                         if (other & (1U << i)) {
813                                 ir_node *other = get_irn_n(skip_Proj(irn), i);
814                                 if (!arch_irn_is_ignore(other))
815                                         add_edges(co, irn, other, co->get_costs(irn, -1));
816                         }
817                 }
818         }
819 }
820
821 void co_build_graph_structure(copy_opt_t *co)
822 {
823         obstack_init(&co->obst);
824         co->nodes = new_set(compare_affinity_node_t, 32);
825
826         irg_walk_graph(co->irg, build_graph_walker, NULL, co);
827 }
828
829 void co_free_graph_structure(copy_opt_t *co)
830 {
831         ASSERT_GS_AVAIL(co);
832
833         del_set(co->nodes);
834         obstack_free(&co->obst, NULL);
835         co->nodes = NULL;
836 }
837
838 int co_gs_is_optimizable(copy_opt_t *co, ir_node *irn)
839 {
840         affinity_node_t new_node, *n;
841
842         ASSERT_GS_AVAIL(co);
843
844         new_node.irn = irn;
845         n = set_find(affinity_node_t, co->nodes, &new_node, sizeof(new_node), hash_irn(new_node.irn));
846         if (n) {
847                 return (n->degree > 0);
848         } else
849                 return 0;
850 }
851
852 static int co_dump_appel_disjoint_constraints(const copy_opt_t *co, ir_node *a, ir_node *b)
853 {
854         ir_node *nodes[]  = { a, b };
855         bitset_t *constr[] = { NULL, NULL };
856         int j;
857
858         constr[0] = bitset_alloca(co->cls->n_regs);
859         constr[1] = bitset_alloca(co->cls->n_regs);
860
861         for (j = 0; j < 2; ++j) {
862                 const arch_register_req_t *req = arch_get_irn_register_req(nodes[j]);
863                 if (arch_register_req_is(req, limited))
864                         rbitset_copy_to_bitset(req->limited, constr[j]);
865                 else
866                         bitset_set_all(constr[j]);
867
868         }
869
870         return !bitset_intersect(constr[0], constr[1]);
871 }
872
873 /**
874  * Dump the interference graph according to the Appel/George coalescing contest file format.
875  * See: http://www.cs.princeton.edu/~appel/coalesce/format.html
876  * @note Requires graph structure.
877  * @param co The copy opt object.
878  * @param f  A file to dump to.
879  */
880 static void co_dump_appel_graph(const copy_opt_t *co, FILE *f)
881 {
882         be_ifg_t *ifg       = co->cenv->ifg;
883         int      *color_map = ALLOCAN(int, co->cls->n_regs);
884         int      *node_map  = XMALLOCN(int, get_irg_last_idx(co->irg) + 1);
885         ir_graph *irg       = co->irg;
886         be_irg_t *birg      = be_birg_from_irg(irg);
887
888         nodes_iter_t it;
889         neighbours_iter_t nit;
890         int n, n_regs;
891         unsigned i;
892
893         n_regs = 0;
894         for (i = 0; i < co->cls->n_regs; ++i) {
895                 const arch_register_t *reg = &co->cls->regs[i];
896                 if (rbitset_is_set(birg->allocatable_regs, reg->global_index)) {
897                         color_map[i] = n_regs++;
898                 } else {
899                         color_map[i] = -1;
900                 }
901         }
902
903         /*
904          * n contains the first node number.
905          * the values below n are the pre-colored register nodes
906          */
907
908         n = n_regs;
909         be_ifg_foreach_node(ifg, &it, irn) {
910                 if (arch_irn_is_ignore(irn))
911                         continue;
912                 node_map[get_irn_idx(irn)] = n++;
913         }
914
915         fprintf(f, "%d %d\n", n, n_regs);
916
917         be_ifg_foreach_node(ifg, &it, irn) {
918                 arch_register_req_t const *const req = arch_get_irn_register_req(irn);
919                 if (arch_register_req_is(req, ignore))
920                         continue;
921
922                 int              idx = node_map[get_irn_idx(irn)];
923                 affinity_node_t *a   = get_affinity_info(co, irn);
924
925                 if (arch_register_req_is(req, limited)) {
926                         for (i = 0; i < co->cls->n_regs; ++i) {
927                                 if (!rbitset_is_set(req->limited, i) && color_map[i] >= 0)
928                                         fprintf(f, "%d %d -1\n", color_map[i], idx);
929                         }
930                 }
931
932                 be_ifg_foreach_neighbour(ifg, &nit, irn, adj) {
933                         if (!arch_irn_is_ignore(adj) &&
934                                         !co_dump_appel_disjoint_constraints(co, irn, adj)) {
935                                 int adj_idx = node_map[get_irn_idx(adj)];
936                                 if (idx < adj_idx)
937                                         fprintf(f, "%d %d -1\n", idx, adj_idx);
938                         }
939                 }
940
941                 if (a) {
942                         co_gs_foreach_neighb(a, n) {
943                                 if (!arch_irn_is_ignore(n->irn)) {
944                                         int n_idx = node_map[get_irn_idx(n->irn)];
945                                         if (idx < n_idx)
946                                                 fprintf(f, "%d %d %d\n", idx, n_idx, (int) n->costs);
947                                 }
948                         }
949                 }
950         }
951
952         xfree(node_map);
953 }
954
955 static FILE *my_open(const be_chordal_env_t *env, const char *prefix,
956                      const char *suffix)
957 {
958         FILE *result;
959         char buf[1024];
960         size_t i, n;
961         char *tu_name;
962         const char *cup_name = be_get_irg_main_env(env->irg)->cup_name;
963
964         n = strlen(cup_name);
965         tu_name = XMALLOCN(char, n + 1);
966         strcpy(tu_name, cup_name);
967         for (i = 0; i < n; ++i)
968                 if (tu_name[i] == '.')
969                         tu_name[i] = '_';
970
971
972         ir_snprintf(buf, sizeof(buf), "%s%s_%F_%s%s", prefix, tu_name, env->irg, env->cls->name, suffix);
973         xfree(tu_name);
974         result = fopen(buf, "wt");
975         if (result == NULL) {
976                 panic("Couldn't open '%s' for writing.", buf);
977         }
978
979         return result;
980 }
981
982 void co_driver(be_chordal_env_t *cenv)
983 {
984         ir_timer_t          *timer = ir_timer_new();
985         co_complete_stats_t before, after;
986         copy_opt_t          *co;
987         int                 was_optimal = 0;
988
989         assert(selected_copyopt);
990
991         /* skip copymin if algo is 'none' */
992         if (selected_copyopt->copyopt == void_algo)
993                 return;
994
995         be_assure_live_chk(cenv->irg);
996
997         co = new_copy_opt(cenv, cost_func);
998         co_build_ou_structure(co);
999         co_build_graph_structure(co);
1000
1001         co_complete_stats(co, &before);
1002
1003         stat_ev_ull("co_aff_nodes",    before.aff_nodes);
1004         stat_ev_ull("co_aff_edges",    before.aff_edges);
1005         stat_ev_ull("co_max_costs",    before.max_costs);
1006         stat_ev_ull("co_inevit_costs", before.inevit_costs);
1007         stat_ev_ull("co_aff_int",      before.aff_int);
1008
1009         stat_ev_ull("co_init_costs",   before.costs);
1010         stat_ev_ull("co_init_unsat",   before.unsatisfied_edges);
1011
1012         if (dump_flags & DUMP_BEFORE) {
1013                 FILE *f = my_open(cenv, "", "-before.vcg");
1014                 be_dump_ifg_co(f, co, style_flags & CO_IFG_DUMP_LABELS, style_flags & CO_IFG_DUMP_COLORS);
1015                 fclose(f);
1016         }
1017
1018         /* if the algo can improve results, provide an initial solution with heur1 */
1019         if (improve && selected_copyopt->can_improve_existing) {
1020                 co_complete_stats_t stats;
1021
1022                 /* produce a heuristic solution */
1023                 co_solve_heuristic(co);
1024
1025                 /* do the stats and provide the current costs */
1026                 co_complete_stats(co, &stats);
1027                 stat_ev_ull("co_prepare_costs", stats.costs);
1028         }
1029
1030         /* perform actual copy minimization */
1031         ir_timer_reset_and_start(timer);
1032         was_optimal = selected_copyopt->copyopt(co);
1033         ir_timer_stop(timer);
1034
1035         stat_ev_dbl("co_time", ir_timer_elapsed_msec(timer));
1036         stat_ev_ull("co_optimal", was_optimal);
1037         ir_timer_free(timer);
1038
1039         if (dump_flags & DUMP_AFTER) {
1040                 FILE *f = my_open(cenv, "", "-after.vcg");
1041                 be_dump_ifg_co(f, co, style_flags & CO_IFG_DUMP_LABELS, style_flags & CO_IFG_DUMP_COLORS);
1042                 fclose(f);
1043         }
1044
1045         co_complete_stats(co, &after);
1046
1047         if (do_stats) {
1048                 unsigned long long optimizable_costs = after.max_costs - after.inevit_costs;
1049                 unsigned long long evitable          = after.costs     - after.inevit_costs;
1050
1051                 ir_printf("%30F ", cenv->irg);
1052                 printf("%10s %10llu%10llu%10llu", cenv->cls->name, after.max_costs, before.costs, after.inevit_costs);
1053
1054                 if (optimizable_costs > 0)
1055                         printf("%10llu %5.2f\n", after.costs, (evitable * 100.0) / optimizable_costs);
1056                 else
1057                         printf("%10llu %5s\n", after.costs, "-");
1058         }
1059
1060         /* Dump the interference graph in Appel's format. */
1061         if (dump_flags & DUMP_APPEL) {
1062                 FILE *f = my_open(cenv, "", ".apl");
1063                 fprintf(f, "# %llu %llu\n", after.costs, after.unsatisfied_edges);
1064                 co_dump_appel_graph(co, f);
1065                 fclose(f);
1066         }
1067
1068         stat_ev_ull("co_after_costs", after.costs);
1069         stat_ev_ull("co_after_unsat", after.unsatisfied_edges);
1070
1071         co_free_graph_structure(co);
1072         co_free_ou_structure(co);
1073         free_copy_opt(co);
1074 }