e080dcf9165228c2de42927cf957551052c9de9b
[libfirm] / ir / be / becopyopt.c
1 /*
2  * This file is part of libFirm.
3  * Copyright (C) 2012 University of Karlsruhe.
4  */
5
6 /**
7  * @file
8  * @brief       Copy minimization driver.
9  * @author      Daniel Grund
10  * @date        12.04.2005
11  *
12  * Main file for the optimization reducing the copies needed for:
13  * - Phi coalescing
14  * - Register-constrained nodes
15  * - Two-address code instructions
16  */
17 #include "config.h"
18
19 #include "debug.h"
20 #include "error.h"
21 #include "execfreq_t.h"
22 #include "irdump_t.h"
23 #include "iredges_t.h"
24 #include "irgraph.h"
25 #include "irgwalk.h"
26 #include "irloop_t.h"
27 #include "irnode.h"
28 #include "irprintf.h"
29 #include "irprog.h"
30 #include "irtools.h"
31 #include "pmap.h"
32 #include "raw_bitset.h"
33 #include "util.h"
34 #include "xmalloc.h"
35
36 #include "bearch.h"
37 #include "becopyopt_t.h"
38 #include "becopystat.h"
39 #include "bedump.h"
40 #include "beifg.h"
41 #include "beinsn_t.h"
42 #include "beintlive_t.h"
43 #include "beirg.h"
44 #include "belive_t.h"
45 #include "bemodule.h"
46 #include "benode.h"
47 #include "besched.h"
48 #include "statev_t.h"
49 #include "beutil.h"
50
51 #include "lc_opts.h"
52 #include "lc_opts_enum.h"
53
54 #define DUMP_BEFORE 1
55 #define DUMP_AFTER  2
56 #define DUMP_APPEL  4
57 #define DUMP_ALL    2 * DUMP_APPEL - 1
58
59 #define COST_FUNC_FREQ     1
60 #define COST_FUNC_LOOP     2
61 #define COST_FUNC_ALL_ONE  3
62
63 /**
64  * Flags for dumping the IFG.
65  */
66 enum {
67         CO_IFG_DUMP_COLORS = 1 << 0, /**< Dump the graph colored. */
68         CO_IFG_DUMP_LABELS = 1 << 1, /**< Dump node/edge labels. */
69         CO_IFG_DUMP_SHAPE  = 1 << 2, /**< Give constrained nodes special shapes. */
70         CO_IFG_DUMP_CONSTR = 1 << 3, /**< Dump the node constraints in the label. */
71 };
72
73 static int co_get_costs_loop_depth(const ir_node *root, int pos);
74 static int co_get_costs_exec_freq(const ir_node *root, int pos);
75 static int co_get_costs_all_one(const ir_node *root, int pos);
76
77 static unsigned   dump_flags  = 0;
78 static unsigned   style_flags = CO_IFG_DUMP_COLORS;
79 static int        do_stats    = 0;
80 static cost_fct_t cost_func   = co_get_costs_exec_freq;
81 static int        improve     = 1;
82
83 static const lc_opt_enum_mask_items_t dump_items[] = {
84         { "before",  DUMP_BEFORE },
85         { "after",   DUMP_AFTER  },
86         { "appel",   DUMP_APPEL  },
87         { "all",     DUMP_ALL    },
88         { NULL,      0 }
89 };
90
91 static const lc_opt_enum_mask_items_t style_items[] = {
92         { "color",   CO_IFG_DUMP_COLORS },
93         { "labels",  CO_IFG_DUMP_LABELS },
94         { "constr",  CO_IFG_DUMP_CONSTR },
95         { "shape",   CO_IFG_DUMP_SHAPE  },
96         { "full",    2 * CO_IFG_DUMP_SHAPE - 1 },
97         { NULL,      0 }
98 };
99
100 typedef int (*opt_funcptr)(void);
101 static const lc_opt_enum_func_ptr_items_t cost_func_items[] = {
102         { "freq",   (opt_funcptr) co_get_costs_exec_freq },
103         { "loop",   (opt_funcptr) co_get_costs_loop_depth },
104         { "one",    (opt_funcptr) co_get_costs_all_one },
105         { NULL,     NULL }
106 };
107
108 static lc_opt_enum_mask_var_t dump_var = {
109         &dump_flags, dump_items
110 };
111
112 static lc_opt_enum_mask_var_t style_var = {
113         &style_flags, style_items
114 };
115
116 static lc_opt_enum_func_ptr_var_t cost_func_var = {
117         (opt_funcptr*) &cost_func, cost_func_items
118 };
119
120 static const lc_opt_table_entry_t options[] = {
121         LC_OPT_ENT_ENUM_FUNC_PTR ("cost",    "select a cost function",                                  &cost_func_var),
122         LC_OPT_ENT_ENUM_MASK     ("dump",    "dump ifg before or after copy optimization",              &dump_var),
123         LC_OPT_ENT_ENUM_MASK     ("style",   "dump style for ifg dumping",                              &style_var),
124         LC_OPT_ENT_BOOL          ("stats",   "dump statistics after each optimization",                 &do_stats),
125         LC_OPT_ENT_BOOL          ("improve", "run heur1 before if algo can exploit start solutions",    &improve),
126         LC_OPT_LAST
127 };
128
129 static be_module_list_entry_t *copyopts = NULL;
130 static const co_algo_info *selected_copyopt = NULL;
131
132 void be_register_copyopt(const char *name, co_algo_info *copyopt)
133 {
134         if (selected_copyopt == NULL)
135                 selected_copyopt = copyopt;
136         be_add_module_to_list(&copyopts, name, copyopt);
137 }
138
139 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyopt)
140 void be_init_copyopt(void)
141 {
142         lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
143         lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
144         lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
145         lc_opt_entry_t *co_grp = lc_opt_get_grp(chordal_grp, "co");
146
147         lc_opt_add_table(co_grp, options);
148         be_add_module_list_opt(co_grp, "algo", "select copy optimization algo",
149                                        &copyopts, (void**) &selected_copyopt);
150 }
151
152 static int void_algo(copy_opt_t *co)
153 {
154         (void) co;
155         return 0;
156 }
157
158 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copynone)
159 void be_init_copynone(void)
160 {
161         static co_algo_info copyheur = {
162                 void_algo, 0
163         };
164
165         be_register_copyopt("none", &copyheur);
166 }
167
168 #undef QUICK_AND_DIRTY_HACK
169
170 /******************************************************************************
171     _____                           _
172    / ____|                         | |
173   | |  __  ___ _ __   ___ _ __ __ _| |
174   | | |_ |/ _ \ '_ \ / _ \ '__/ _` | |
175   | |__| |  __/ | | |  __/ | | (_| | |
176    \_____|\___|_| |_|\___|_|  \__,_|_|
177
178  ******************************************************************************/
179
180 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
181
182
183 copy_opt_t *new_copy_opt(be_chordal_env_t *chordal_env, cost_fct_t get_costs)
184 {
185         FIRM_DBG_REGISTER(dbg, "ir.be.copyopt");
186
187         copy_opt_t *const co = XMALLOCZ(copy_opt_t);
188         co->cenv      = chordal_env;
189         co->irg       = chordal_env->irg;
190         co->cls       = chordal_env->cls;
191         co->get_costs = get_costs;
192         return co;
193 }
194
195 void free_copy_opt(copy_opt_t *co)
196 {
197         free(co);
198 }
199
200 /**
201  * Checks if a node is optimizable, viz. has something to do with coalescing
202  * @param irn  The irn to check
203  */
204 static int co_is_optimizable_root(ir_node *irn)
205 {
206         arch_register_req_t const *const req = arch_get_irn_register_req(irn);
207         if (arch_register_req_is(req, ignore))
208                 return 0;
209
210         if (is_Reg_Phi(irn) || is_Perm_Proj(irn))
211                 return 1;
212
213         if (arch_register_req_is(req, should_be_same))
214                 return 1;
215
216         return 0;
217 }
218
219 /**
220  * Computes the costs of a copy according to loop depth
221  * @param pos  the argument position of arg in the root arguments
222  * @return     Must be >= 0 in all cases.
223  */
224 static int co_get_costs_loop_depth(const ir_node *root, int pos)
225 {
226         ir_node *block = get_nodes_block(root);
227         ir_loop *loop;
228         int      cost;
229
230         if (is_Phi(root)) {
231                 block = get_Block_cfgpred_block(block, pos);
232         }
233         loop = get_irn_loop(block);
234         if (loop) {
235                 int d = get_loop_depth(loop);
236                 cost = d*d;
237         } else {
238                 cost = 0;
239         }
240         return 1+cost;
241 }
242
243 static ir_execfreq_int_factors factors;
244
245 /**
246  * Computes the costs of a copy according to execution frequency
247  * @param pos  the argument position of arg in the root arguments
248  * @return Must be >= 0 in all cases.
249  */
250 static int co_get_costs_exec_freq(const ir_node *root, int pos)
251 {
252         ir_node *root_bl = get_nodes_block(root);
253         ir_node *copy_bl
254                 = is_Phi(root) ? get_Block_cfgpred_block(root_bl, pos) : root_bl;
255         int      res     = get_block_execfreq_int(&factors, copy_bl);
256
257         /* don't allow values smaller than one. */
258         return res < 1 ? 1 : res;
259 }
260
261 /**
262  * All costs equal 1. Using this will reduce the _number_ of copies.
263  * @param co   The copy opt object.
264  * @return Must be >= 0 in all cases.
265  */
266 static int co_get_costs_all_one(const ir_node *root, int pos)
267 {
268         (void) root;
269         (void) pos;
270         return 1;
271 }
272
273 /******************************************************************************
274    ____        _   _    _       _ _          _____ _
275   / __ \      | | | |  | |     (_) |        / ____| |
276  | |  | |_ __ | |_| |  | |_ __  _| |_ ___  | (___ | |_ ___  _ __ __ _  __ _  ___
277  | |  | | '_ \| __| |  | | '_ \| | __/ __|  \___ \| __/ _ \| '__/ _` |/ _` |/ _ \
278  | |__| | |_) | |_| |__| | | | | | |_\__ \  ____) | || (_) | | | (_| | (_| |  __/
279   \____/| .__/ \__|\____/|_| |_|_|\__|___/ |_____/ \__\___/|_|  \__,_|\__, |\___|
280         | |                                                            __/ |
281         |_|                                                           |___/
282  ******************************************************************************/
283
284 /**
285  * Determines a maximum weighted independent set with respect to
286  * the interference and conflict edges of all nodes in a qnode.
287  */
288 static int ou_max_ind_set_costs(unit_t *const ou, be_lv_t const *const lv)
289 {
290         ir_node **safe, **unsafe;
291         int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs;
292         bitset_t *curr;
293         int curr_weight, best_weight = 0;
294
295         /* assign the nodes into two groups.
296          * safe: node has no interference, hence it is in every max stable set.
297          * unsafe: node has an interference
298          */
299         safe         = ALLOCAN(ir_node*, ou->node_count - 1);
300         safe_costs   = 0;
301         safe_count   = 0;
302         unsafe       = ALLOCAN(ir_node*, ou->node_count - 1);
303         unsafe_costs = ALLOCAN(int,      ou->node_count - 1);
304         unsafe_count = 0;
305         for (i=1; i<ou->node_count; ++i) {
306                 int is_safe = 1;
307                 for (o=1; o<ou->node_count; ++o) {
308                         if (i==o)
309                                 continue;
310                         if (be_values_interfere(lv, ou->nodes[i], ou->nodes[o])) {
311                                 unsafe_costs[unsafe_count] = ou->costs[i];
312                                 unsafe[unsafe_count] = ou->nodes[i];
313                                 ++unsafe_count;
314                                 is_safe = 0;
315                                 break;
316                         }
317                 }
318                 if (is_safe) {
319                         safe_costs += ou->costs[i];
320                         safe[safe_count++] = ou->nodes[i];
321                 }
322         }
323
324
325         /* now compute the best set out of the unsafe nodes*/
326         if (unsafe_count > MIS_HEUR_TRIGGER) {
327                 bitset_t *best = bitset_alloca(unsafe_count);
328                 /* Heuristic: Greedy trial and error form index 0 to unsafe_count-1 */
329                 for (i=0; i<unsafe_count; ++i) {
330                         bitset_set(best, i);
331                         /* check if it is a stable set */
332                         for (o=bitset_next_set(best, 0); o!=-1 && o<i; o=bitset_next_set(best, o+1))
333                                 if (be_values_interfere(lv, unsafe[i], unsafe[o])) {
334                                         bitset_clear(best, i); /* clear the bit and try next one */
335                                         break;
336                                 }
337                 }
338                 /* compute the weight */
339                 bitset_foreach(best, pos)
340                         best_weight += unsafe_costs[pos];
341         } else {
342                 /* Exact Algorithm: Brute force */
343                 curr = bitset_alloca(unsafe_count);
344                 bitset_set_all(curr);
345                 while (bitset_popcount(curr) != 0) {
346                         /* check if curr is a stable set */
347                         for (i=bitset_next_set(curr, 0); i!=-1; i=bitset_next_set(curr, i+1))
348                                 for (o=bitset_next_set(curr, i+1); o!=-1; o=bitset_next_set(curr, o+1)) /* !!!!! difference to qnode_max_ind_set(): NOT (curr, i) */
349                                                 if (be_values_interfere(lv, unsafe[i], unsafe[o]))
350                                                         goto no_stable_set;
351
352                         /* if we arrive here, we have a stable set */
353                         /* compute the weight of the stable set*/
354                         curr_weight = 0;
355                         bitset_foreach(curr, pos)
356                                 curr_weight += unsafe_costs[pos];
357
358                         /* any better ? */
359                         if (curr_weight > best_weight) {
360                                 best_weight = curr_weight;
361                         }
362
363         no_stable_set:
364                         bitset_minus1(curr);
365                 }
366         }
367
368         return safe_costs+best_weight;
369 }
370
371 static void co_collect_units(ir_node *irn, void *env)
372 {
373         const arch_register_req_t *req;
374         copy_opt_t                *co  = (copy_opt_t*)env;
375         unit_t *unit;
376
377         if (get_irn_mode(irn) == mode_T)
378                 return;
379         req = arch_get_irn_register_req(irn);
380         if (req->cls != co->cls)
381                 return;
382         if (!co_is_optimizable_root(irn))
383                 return;
384
385         /* Init a new unit */
386         unit = XMALLOCZ(unit_t);
387         unit->node_count = 1;
388         INIT_LIST_HEAD(&unit->queue);
389
390         be_lv_t *const lv = be_get_irg_liveness(co->irg);
391         /* Phi with some/all of its arguments */
392         if (is_Reg_Phi(irn)) {
393                 int i, arity;
394
395                 /* init */
396                 arity = get_irn_arity(irn);
397                 unit->nodes = XMALLOCN(ir_node*, arity + 1);
398                 unit->costs = XMALLOCN(int,      arity + 1);
399                 unit->nodes[0] = irn;
400
401                 /* fill */
402                 for (i=0; i<arity; ++i) {
403                         int o, arg_pos;
404                         ir_node *arg = get_irn_n(irn, i);
405
406                         assert(arch_get_irn_reg_class(arg) == co->cls && "Argument not in same register class.");
407                         if (arg == irn)
408                                 continue;
409                         if (be_values_interfere(lv, irn, arg)) {
410                                 unit->inevitable_costs += co->get_costs(irn, i);
411                                 continue;
412                         }
413
414                         /* Else insert the argument of the phi to the members of this ou */
415                         DBG((dbg, LEVEL_1, "\t   Member: %+F\n", arg));
416
417                         if (arch_irn_is_ignore(arg))
418                                 continue;
419
420                         /* Check if arg has occurred at a prior position in the arg/list */
421                         arg_pos = 0;
422                         for (o=1; o<unit->node_count; ++o) {
423                                 if (unit->nodes[o] == arg) {
424                                         arg_pos = o;
425                                         break;
426                                 }
427                         }
428
429                         if (!arg_pos) { /* a new argument */
430                                 /* insert node, set costs */
431                                 unit->nodes[unit->node_count] = arg;
432                                 unit->costs[unit->node_count] = co->get_costs(irn, i);
433                                 unit->node_count++;
434                         } else { /* arg has occurred before in same phi */
435                                 /* increase costs for existing arg */
436                                 unit->costs[arg_pos] += co->get_costs(irn, i);
437                         }
438                 }
439                 unit->nodes = XREALLOC(unit->nodes, ir_node*, unit->node_count);
440                 unit->costs = XREALLOC(unit->costs, int,      unit->node_count);
441         } else if (is_Perm_Proj(irn)) {
442                 /* Proj of a perm with corresponding arg */
443                 assert(!be_values_interfere(lv, irn, get_Perm_src(irn)));
444                 unit->nodes = XMALLOCN(ir_node*, 2);
445                 unit->costs = XMALLOCN(int,      2);
446                 unit->node_count = 2;
447                 unit->nodes[0] = irn;
448                 unit->nodes[1] = get_Perm_src(irn);
449                 unit->costs[1] = co->get_costs(irn, -1);
450         } else if (arch_register_req_is(req, should_be_same)) {
451                 /* Src == Tgt of a 2-addr-code instruction */
452                 const unsigned other = req->other_same;
453                 int            count = 0;
454                 int            i;
455
456                 for (i = 0; (1U << i) <= other; ++i) {
457                         if (other & (1U << i)) {
458                                 ir_node *o  = get_irn_n(skip_Proj(irn), i);
459                                 if (arch_irn_is_ignore(o))
460                                         continue;
461                                 if (be_values_interfere(lv, irn, o))
462                                         continue;
463                                 ++count;
464                         }
465                 }
466
467                 if (count != 0) {
468                         int k = 0;
469                         ++count;
470                         unit->nodes = XMALLOCN(ir_node*, count);
471                         unit->costs = XMALLOCN(int,      count);
472                         unit->node_count = count;
473                         unit->nodes[k++] = irn;
474
475                         for (i = 0; 1U << i <= other; ++i) {
476                                 if (other & (1U << i)) {
477                                         ir_node *o  = get_irn_n(skip_Proj(irn), i);
478                                         if (!arch_irn_is_ignore(o) &&
479                                                         !be_values_interfere(lv, irn, o)) {
480                                                 unit->nodes[k] = o;
481                                                 unit->costs[k] = co->get_costs(irn, -1);
482                                                 ++k;
483                                         }
484                                 }
485                         }
486                 }
487         } else {
488                 assert(0 && "This is not an optimizable node!");
489         }
490
491         /* Insert the new unit at a position according to its costs */
492         if (unit->node_count > 1) {
493                 int i;
494                 struct list_head *tmp;
495
496                 /* Determine the maximum costs this unit can cause: all_nodes_cost */
497                 for (i=1; i<unit->node_count; ++i) {
498                         unit->sort_key = MAX(unit->sort_key, unit->costs[i]);
499                         unit->all_nodes_costs += unit->costs[i];
500                 }
501
502                 /* Determine the minimal costs this unit will cause: min_nodes_costs */
503                 unit->min_nodes_costs += unit->all_nodes_costs - ou_max_ind_set_costs(unit, lv);
504                 /* Insert the new ou according to its sort_key */
505                 tmp = &co->units;
506                 while (tmp->next != &co->units && list_entry_units(tmp->next)->sort_key > unit->sort_key)
507                         tmp = tmp->next;
508                 list_add(&unit->units, tmp);
509         } else {
510                 free(unit);
511         }
512 }
513
514 #ifdef QUICK_AND_DIRTY_HACK
515
516 static int compare_ous(const void *k1, const void *k2)
517 {
518         const unit_t *u1 = *((const unit_t **) k1);
519         const unit_t *u2 = *((const unit_t **) k2);
520         int i, o, u1_has_constr, u2_has_constr;
521         arch_register_req_t req;
522
523         /* Units with constraints come first */
524         u1_has_constr = 0;
525         for (i=0; i<u1->node_count; ++i) {
526                 arch_get_irn_register_req(&req, u1->nodes[i]);
527                 if (arch_register_req_is(&req, limited)) {
528                         u1_has_constr = 1;
529                         break;
530                 }
531         }
532
533         u2_has_constr = 0;
534         for (i=0; i<u2->node_count; ++i) {
535                 arch_get_irn_register_req(&req, u2->nodes[i]);
536                 if (arch_register_req_is(&req, limited)) {
537                         u2_has_constr = 1;
538                         break;
539                 }
540         }
541
542         if (u1_has_constr != u2_has_constr)
543                 return u2_has_constr - u1_has_constr;
544
545         /* After all, the sort key decides. Greater keys come first. */
546         return u2->sort_key - u1->sort_key;
547
548 }
549
550 /**
551  * Sort the ou's according to constraints and their sort_key
552  */
553 static void co_sort_units(copy_opt_t *co)
554 {
555         int i, count = 0, costs;
556         unit_t **ous;
557
558         /* get the number of ous, remove them form the list and fill the array */
559         list_for_each_entry(unit_t, ou, &co->units, units)
560                 count++;
561         ous = ALLOCAN(unit_t, count);
562
563         costs = co_get_max_copy_costs(co);
564
565         i = 0;
566         list_for_each_entry(unit_t, ou, &co->units, units)
567                 ous[i++] = ou;
568
569         INIT_LIST_HEAD(&co->units);
570
571         assert(count == i && list_empty(&co->units));
572
573         for (i=0; i<count; ++i)
574                 ir_printf("%+F\n", ous[i]->nodes[0]);
575
576         qsort(ous, count, sizeof(*ous), compare_ous);
577
578         ir_printf("\n\n");
579         for (i=0; i<count; ++i)
580                 ir_printf("%+F\n", ous[i]->nodes[0]);
581
582         /* reinsert into list in correct order */
583         for (i=0; i<count; ++i)
584                 list_add_tail(&ous[i]->units, &co->units);
585
586         assert(costs == co_get_max_copy_costs(co));
587 }
588 #endif
589
590 void co_build_ou_structure(copy_opt_t *co)
591 {
592         DBG((dbg, LEVEL_1, "\tCollecting optimization units\n"));
593         INIT_LIST_HEAD(&co->units);
594         irg_walk_graph(co->irg, co_collect_units, NULL, co);
595 #ifdef QUICK_AND_DIRTY_HACK
596         co_sort_units(co);
597 #endif
598 }
599
600 void co_free_ou_structure(copy_opt_t *co)
601 {
602         ASSERT_OU_AVAIL(co);
603         list_for_each_entry_safe(unit_t, curr, tmp, &co->units, units) {
604                 xfree(curr->nodes);
605                 xfree(curr->costs);
606                 xfree(curr);
607         }
608         co->units.next = NULL;
609 }
610
611 /* co_solve_heuristic() is implemented in becopyheur.c */
612
613 int co_get_max_copy_costs(const copy_opt_t *co)
614 {
615         int i, res = 0;
616
617         ASSERT_OU_AVAIL(co);
618
619         list_for_each_entry(unit_t, curr, &co->units, units) {
620                 res += curr->inevitable_costs;
621                 for (i=1; i<curr->node_count; ++i)
622                         res += curr->costs[i];
623         }
624         return res;
625 }
626
627 int co_get_inevit_copy_costs(const copy_opt_t *co)
628 {
629         int res = 0;
630
631         ASSERT_OU_AVAIL(co);
632
633         list_for_each_entry(unit_t, curr, &co->units, units)
634                 res += curr->inevitable_costs;
635         return res;
636 }
637
638 int co_get_copy_costs(const copy_opt_t *co)
639 {
640         int i, res = 0;
641
642         ASSERT_OU_AVAIL(co);
643
644         list_for_each_entry(unit_t, curr, &co->units, units) {
645                 int root_col = get_irn_col(curr->nodes[0]);
646                 DBG((dbg, LEVEL_1, "  %3d costs for root %+F color %d\n", curr->inevitable_costs, curr->nodes[0], root_col));
647                 res += curr->inevitable_costs;
648                 for (i=1; i<curr->node_count; ++i) {
649                         int arg_col = get_irn_col(curr->nodes[i]);
650                         if (root_col != arg_col) {
651                                 DBG((dbg, LEVEL_1, "  %3d for arg %+F color %d\n", curr->costs[i], curr->nodes[i], arg_col));
652                                 res += curr->costs[i];
653                         }
654                 }
655         }
656         return res;
657 }
658
659 int co_get_lower_bound(const copy_opt_t *co)
660 {
661         int res = 0;
662
663         ASSERT_OU_AVAIL(co);
664
665         list_for_each_entry(unit_t, curr, &co->units, units)
666                 res += curr->inevitable_costs + curr->min_nodes_costs;
667         return res;
668 }
669
670 void co_complete_stats(const copy_opt_t *co, co_complete_stats_t *stat)
671 {
672         bitset_t *seen = bitset_malloc(get_irg_last_idx(co->irg));
673
674         memset(stat, 0, sizeof(stat[0]));
675
676         /* count affinity edges. */
677         be_lv_t *const lv = be_get_irg_liveness(co->irg);
678         co_gs_foreach_aff_node(co, an) {
679                 stat->aff_nodes += 1;
680                 bitset_set(seen, get_irn_idx(an->irn));
681                 co_gs_foreach_neighb(an, neigh) {
682                         if (!bitset_is_set(seen, get_irn_idx(neigh->irn))) {
683                                 stat->aff_edges += 1;
684                                 stat->max_costs += neigh->costs;
685
686                                 if (get_irn_col(an->irn) != get_irn_col(neigh->irn)) {
687                                         stat->costs += neigh->costs;
688                                         stat->unsatisfied_edges += 1;
689                                 }
690
691                                 if (be_values_interfere(lv, an->irn, neigh->irn)) {
692                                         stat->aff_int += 1;
693                                         stat->inevit_costs += neigh->costs;
694                                 }
695
696                         }
697                 }
698         }
699
700         bitset_free(seen);
701 }
702
703 /******************************************************************************
704    _____                 _        _____ _
705   / ____|               | |      / ____| |
706  | |  __ _ __ __ _ _ __ | |__   | (___ | |_ ___  _ __ __ _  __ _  ___
707  | | |_ | '__/ _` | '_ \| '_ \   \___ \| __/ _ \| '__/ _` |/ _` |/ _ \
708  | |__| | | | (_| | |_) | | | |  ____) | || (_) | | | (_| | (_| |  __/
709   \_____|_|  \__,_| .__/|_| |_| |_____/ \__\___/|_|  \__,_|\__, |\___|
710                   | |                                       __/ |
711                   |_|                                      |___/
712  ******************************************************************************/
713
714 static int compare_affinity_node_t(const void *k1, const void *k2, size_t size)
715 {
716         const affinity_node_t *n1 = (const affinity_node_t*)k1;
717         const affinity_node_t *n2 = (const affinity_node_t*)k2;
718         (void) size;
719
720         return (n1->irn != n2->irn);
721 }
722
723 static void add_edge(copy_opt_t *co, ir_node *n1, ir_node *n2, int costs)
724 {
725         affinity_node_t new_node, *node;
726         neighb_t        *nbr;
727         int             allocnew = 1;
728
729         new_node.irn        = n1;
730         new_node.neighbours = NULL;
731         node = set_insert(affinity_node_t, co->nodes, &new_node, sizeof(new_node), hash_irn(new_node.irn));
732
733         for (nbr = node->neighbours; nbr; nbr = nbr->next)
734                 if (nbr->irn == n2) {
735                         allocnew = 0;
736                         break;
737                 }
738
739         /* if we did not find n2 in n1's neighbourhood insert it */
740         if (allocnew) {
741                 nbr        = OALLOC(&co->obst, neighb_t);
742                 nbr->irn   = n2;
743                 nbr->costs = 0;
744                 nbr->next  = node->neighbours;
745
746                 node->neighbours = nbr;
747         }
748
749         /* now nbr points to n1's neighbour-entry of n2 */
750         nbr->costs += costs;
751 }
752
753 static inline void add_edges(copy_opt_t *co, ir_node *n1, ir_node *n2, int costs)
754 {
755         be_lv_t *const lv = be_get_irg_liveness(co->irg);
756         if (!be_values_interfere(lv, n1, n2)) {
757                 add_edge(co, n1, n2, costs);
758                 add_edge(co, n2, n1, costs);
759         }
760 }
761
762 static void build_graph_walker(ir_node *irn, void *env)
763 {
764         const arch_register_req_t *req;
765         copy_opt_t                *co  = (copy_opt_t*)env;
766         int pos, max;
767
768         if (get_irn_mode(irn) == mode_T)
769                 return;
770         req = arch_get_irn_register_req(irn);
771         if (req->cls != co->cls || arch_register_req_is(req, ignore))
772                 return;
773
774         if (is_Reg_Phi(irn)) { /* Phis */
775                 for (pos=0, max=get_irn_arity(irn); pos<max; ++pos) {
776                         ir_node *arg = get_irn_n(irn, pos);
777                         add_edges(co, irn, arg, co->get_costs(irn, pos));
778                 }
779         } else if (is_Perm_Proj(irn)) { /* Perms */
780                 ir_node *arg = get_Perm_src(irn);
781                 add_edges(co, irn, arg, co->get_costs(irn, -1));
782         } else if (arch_register_req_is(req, should_be_same)) {
783                 const unsigned other = req->other_same;
784                 int i;
785
786                 for (i = 0; 1U << i <= other; ++i) {
787                         if (other & (1U << i)) {
788                                 ir_node *other = get_irn_n(skip_Proj(irn), i);
789                                 if (!arch_irn_is_ignore(other))
790                                         add_edges(co, irn, other, co->get_costs(irn, -1));
791                         }
792                 }
793         }
794 }
795
796 void co_build_graph_structure(copy_opt_t *co)
797 {
798         obstack_init(&co->obst);
799         co->nodes = new_set(compare_affinity_node_t, 32);
800
801         irg_walk_graph(co->irg, build_graph_walker, NULL, co);
802 }
803
804 void co_free_graph_structure(copy_opt_t *co)
805 {
806         ASSERT_GS_AVAIL(co);
807
808         del_set(co->nodes);
809         obstack_free(&co->obst, NULL);
810         co->nodes = NULL;
811 }
812
813 int co_gs_is_optimizable(copy_opt_t const *const co, ir_node *const irn)
814 {
815         affinity_node_t new_node, *n;
816
817         ASSERT_GS_AVAIL(co);
818
819         new_node.irn = irn;
820         n = set_find(affinity_node_t, co->nodes, &new_node, sizeof(new_node), hash_irn(new_node.irn));
821         return n && n->neighbours;
822 }
823
824 static int co_dump_appel_disjoint_constraints(const copy_opt_t *co, ir_node *a, ir_node *b)
825 {
826         ir_node *nodes[]  = { a, b };
827         bitset_t *constr[] = { NULL, NULL };
828         int j;
829
830         constr[0] = bitset_alloca(co->cls->n_regs);
831         constr[1] = bitset_alloca(co->cls->n_regs);
832
833         for (j = 0; j < 2; ++j) {
834                 const arch_register_req_t *req = arch_get_irn_register_req(nodes[j]);
835                 if (arch_register_req_is(req, limited))
836                         rbitset_copy_to_bitset(req->limited, constr[j]);
837                 else
838                         bitset_set_all(constr[j]);
839
840         }
841
842         return !bitset_intersect(constr[0], constr[1]);
843 }
844
845 /**
846  * Dump the interference graph according to the Appel/George coalescing contest file format.
847  * See: http://www.cs.princeton.edu/~appel/coalesce/format.html
848  * @note Requires graph structure.
849  * @param co The copy opt object.
850  * @param f  A file to dump to.
851  */
852 static void co_dump_appel_graph(const copy_opt_t *co, FILE *f)
853 {
854         be_ifg_t *ifg       = co->cenv->ifg;
855         int      *color_map = ALLOCAN(int, co->cls->n_regs);
856         int      *node_map  = XMALLOCN(int, get_irg_last_idx(co->irg) + 1);
857         ir_graph *irg       = co->irg;
858         be_irg_t *birg      = be_birg_from_irg(irg);
859
860         neighbours_iter_t nit;
861         int n, n_regs;
862         unsigned i;
863
864         n_regs = 0;
865         for (i = 0; i < co->cls->n_regs; ++i) {
866                 const arch_register_t *reg = &co->cls->regs[i];
867                 if (rbitset_is_set(birg->allocatable_regs, reg->global_index)) {
868                         color_map[i] = n_regs++;
869                 } else {
870                         color_map[i] = -1;
871                 }
872         }
873
874         /*
875          * n contains the first node number.
876          * the values below n are the pre-colored register nodes
877          */
878
879         n = n_regs;
880         be_ifg_foreach_node(ifg, irn) {
881                 if (arch_irn_is_ignore(irn))
882                         continue;
883                 node_map[get_irn_idx(irn)] = n++;
884         }
885
886         fprintf(f, "%d %d\n", n, n_regs);
887
888         be_ifg_foreach_node(ifg, irn) {
889                 arch_register_req_t const *const req = arch_get_irn_register_req(irn);
890                 if (arch_register_req_is(req, ignore))
891                         continue;
892
893                 int              idx = node_map[get_irn_idx(irn)];
894                 affinity_node_t *a   = get_affinity_info(co, irn);
895
896                 if (arch_register_req_is(req, limited)) {
897                         for (i = 0; i < co->cls->n_regs; ++i) {
898                                 if (!rbitset_is_set(req->limited, i) && color_map[i] >= 0)
899                                         fprintf(f, "%d %d -1\n", color_map[i], idx);
900                         }
901                 }
902
903                 be_ifg_foreach_neighbour(ifg, &nit, irn, adj) {
904                         if (!arch_irn_is_ignore(adj) &&
905                                         !co_dump_appel_disjoint_constraints(co, irn, adj)) {
906                                 int adj_idx = node_map[get_irn_idx(adj)];
907                                 if (idx < adj_idx)
908                                         fprintf(f, "%d %d -1\n", idx, adj_idx);
909                         }
910                 }
911
912                 if (a) {
913                         co_gs_foreach_neighb(a, n) {
914                                 if (!arch_irn_is_ignore(n->irn)) {
915                                         int n_idx = node_map[get_irn_idx(n->irn)];
916                                         if (idx < n_idx)
917                                                 fprintf(f, "%d %d %d\n", idx, n_idx, (int) n->costs);
918                                 }
919                         }
920                 }
921         }
922
923         xfree(node_map);
924 }
925
926 static FILE *my_open(const be_chordal_env_t *env, const char *prefix,
927                      const char *suffix)
928 {
929         FILE *result;
930         char buf[1024];
931         size_t i, n;
932         char *tu_name;
933         const char *cup_name = be_get_irg_main_env(env->irg)->cup_name;
934
935         n = strlen(cup_name);
936         tu_name = XMALLOCN(char, n + 1);
937         strcpy(tu_name, cup_name);
938         for (i = 0; i < n; ++i)
939                 if (tu_name[i] == '.')
940                         tu_name[i] = '_';
941
942
943         ir_snprintf(buf, sizeof(buf), "%s%s_%F_%s%s", prefix, tu_name, env->irg, env->cls->name, suffix);
944         xfree(tu_name);
945         result = fopen(buf, "wt");
946         if (result == NULL) {
947                 panic("Couldn't open '%s' for writing.", buf);
948         }
949
950         return result;
951 }
952
953 void co_driver(be_chordal_env_t *cenv)
954 {
955         ir_timer_t          *timer = ir_timer_new();
956         co_complete_stats_t before, after;
957         copy_opt_t          *co;
958         int                 was_optimal = 0;
959
960         assert(selected_copyopt);
961
962         /* skip copymin if algo is 'none' */
963         if (selected_copyopt->copyopt == void_algo)
964                 return;
965
966         be_assure_live_chk(cenv->irg);
967
968         co = new_copy_opt(cenv, cost_func);
969         co_build_ou_structure(co);
970         co_build_graph_structure(co);
971
972         co_complete_stats(co, &before);
973
974         stat_ev_ull("co_aff_nodes",    before.aff_nodes);
975         stat_ev_ull("co_aff_edges",    before.aff_edges);
976         stat_ev_ull("co_max_costs",    before.max_costs);
977         stat_ev_ull("co_inevit_costs", before.inevit_costs);
978         stat_ev_ull("co_aff_int",      before.aff_int);
979
980         stat_ev_ull("co_init_costs",   before.costs);
981         stat_ev_ull("co_init_unsat",   before.unsatisfied_edges);
982
983         if (dump_flags & DUMP_BEFORE) {
984                 FILE *f = my_open(cenv, "", "-before.vcg");
985                 be_dump_ifg_co(f, co, style_flags & CO_IFG_DUMP_LABELS, style_flags & CO_IFG_DUMP_COLORS);
986                 fclose(f);
987         }
988
989         /* if the algo can improve results, provide an initial solution with heur1 */
990         if (improve && selected_copyopt->can_improve_existing) {
991                 co_complete_stats_t stats;
992
993                 /* produce a heuristic solution */
994                 co_solve_heuristic(co);
995
996                 /* do the stats and provide the current costs */
997                 co_complete_stats(co, &stats);
998                 stat_ev_ull("co_prepare_costs", stats.costs);
999         }
1000
1001         /* perform actual copy minimization */
1002         ir_timer_reset_and_start(timer);
1003         was_optimal = selected_copyopt->copyopt(co);
1004         ir_timer_stop(timer);
1005
1006         stat_ev_dbl("co_time", ir_timer_elapsed_msec(timer));
1007         stat_ev_ull("co_optimal", was_optimal);
1008         ir_timer_free(timer);
1009
1010         if (dump_flags & DUMP_AFTER) {
1011                 FILE *f = my_open(cenv, "", "-after.vcg");
1012                 be_dump_ifg_co(f, co, style_flags & CO_IFG_DUMP_LABELS, style_flags & CO_IFG_DUMP_COLORS);
1013                 fclose(f);
1014         }
1015
1016         co_complete_stats(co, &after);
1017
1018         if (do_stats) {
1019                 unsigned long long optimizable_costs = after.max_costs - after.inevit_costs;
1020                 unsigned long long evitable          = after.costs     - after.inevit_costs;
1021
1022                 ir_printf("%30F ", cenv->irg);
1023                 printf("%10s %10llu%10llu%10llu", cenv->cls->name, after.max_costs, before.costs, after.inevit_costs);
1024
1025                 if (optimizable_costs > 0)
1026                         printf("%10llu %5.2f\n", after.costs, (evitable * 100.0) / optimizable_costs);
1027                 else
1028                         printf("%10llu %5s\n", after.costs, "-");
1029         }
1030
1031         /* Dump the interference graph in Appel's format. */
1032         if (dump_flags & DUMP_APPEL) {
1033                 FILE *f = my_open(cenv, "", ".apl");
1034                 fprintf(f, "# %llu %llu\n", after.costs, after.unsatisfied_edges);
1035                 co_dump_appel_graph(co, f);
1036                 fclose(f);
1037         }
1038
1039         stat_ev_ull("co_after_costs", after.costs);
1040         stat_ev_ull("co_after_unsat", after.unsatisfied_edges);
1041
1042         co_free_graph_structure(co);
1043         co_free_ou_structure(co);
1044         free_copy_opt(co);
1045 }