improve fix some statistics in bechordal
[libfirm] / ir / be / bechordal_main.c
1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19
20 /**
21  * @file
22  * @brief       Driver for the chordal register allocator.
23  * @author      Sebastian Hack
24  * @date        29.11.2005
25  * @version     $Id$
26  */
27 #ifdef HAVE_CONFIG_H
28 #include "config.h"
29 #endif
30
31 #include <stdlib.h>
32 #include <time.h>
33
34 #include "obst.h"
35 #include "pset.h"
36 #include "list.h"
37 #include "bitset.h"
38 #include "iterator.h"
39 #include "firm_config.h"
40
41 #include "lc_opts.h"
42 #include "lc_opts_enum.h"
43
44 #include "ircons_t.h"
45 #include "irmode_t.h"
46 #include "irgraph_t.h"
47 #include "irprintf_t.h"
48 #include "irgwalk.h"
49 #include "ircons.h"
50 #include "irdump.h"
51 #include "irdom.h"
52 #include "ircons.h"
53 #include "irbitset.h"
54 #include "irnode.h"
55 #include "ircons.h"
56 #include "debug.h"
57 #include "xmalloc.h"
58 #include "execfreq.h"
59 #include "iredges_t.h"
60
61 #include "bechordal_t.h"
62 #include "beabi.h"
63 #include "bejavacoal.h"
64 #include "beutil.h"
65 #include "besched.h"
66 #include "besched_t.h"
67 #include "belive_t.h"
68 #include "bearch_t.h"
69 #include "beifg_t.h"
70 #include "beifg_impl.h"
71 #include "benode_t.h"
72 #include "bestatevent.h"
73 #include "bestat.h"
74 #include "bemodule.h"
75 #include "be_t.h"
76 #include "bera.h"
77 #include "beirg_t.h"
78
79 #include "bespillslots.h"
80 #include "bespilloptions.h"
81 #include "belower.h"
82
83 #ifdef WITH_ILP
84 #include "bespillremat.h"
85 #endif /* WITH_ILP */
86
87 #include "bejavacoal.h"
88 #include "becopystat.h"
89 #include "becopyopt.h"
90 #include "bessadestr.h"
91 #include "beverify.h"
92 #include "benode_t.h"
93
94 static be_ra_chordal_opts_t options = {
95         BE_CH_DUMP_NONE,
96         BE_CH_LOWER_PERM_SWAP,
97         BE_CH_VRFY_WARN,
98         "",
99         ""
100 };
101
102 typedef struct _post_spill_env_t {
103         be_chordal_env_t            cenv;
104         be_irg_t                    *birg;
105         const arch_register_class_t *cls;
106         double                      pre_spill_cost;
107 } post_spill_env_t;
108
109 static be_options_t  *main_opts;
110
111 static const lc_opt_enum_int_items_t lower_perm_items[] = {
112         { "copy", BE_CH_LOWER_PERM_COPY },
113         { "swap", BE_CH_LOWER_PERM_SWAP },
114         { NULL, 0 }
115 };
116
117 static const lc_opt_enum_int_items_t lower_perm_stat_items[] = {
118         { NULL, 0 }
119 };
120
121 static const lc_opt_enum_int_items_t dump_items[] = {
122         { "none",       BE_CH_DUMP_NONE       },
123         { "spill",      BE_CH_DUMP_SPILL      },
124         { "live",       BE_CH_DUMP_LIVE       },
125         { "color",      BE_CH_DUMP_COLOR      },
126         { "copymin",    BE_CH_DUMP_COPYMIN    },
127         { "ssadestr",   BE_CH_DUMP_SSADESTR   },
128         { "tree",       BE_CH_DUMP_TREE_INTV  },
129         { "constr",     BE_CH_DUMP_CONSTR     },
130         { "lower",      BE_CH_DUMP_LOWER      },
131         { "spillslots", BE_CH_DUMP_SPILLSLOTS },
132         { "appel",      BE_CH_DUMP_APPEL      },
133         { "all",        BE_CH_DUMP_ALL        },
134         { NULL, 0 }
135 };
136
137 static const lc_opt_enum_int_items_t be_ch_vrfy_items[] = {
138         { "off",    BE_CH_VRFY_OFF    },
139         { "warn",   BE_CH_VRFY_WARN   },
140         { "assert", BE_CH_VRFY_ASSERT },
141         { NULL, 0 }
142 };
143
144 static lc_opt_enum_int_var_t lower_perm_var = {
145         &options.lower_perm_opt, lower_perm_items
146 };
147
148 static lc_opt_enum_int_var_t dump_var = {
149         &options.dump_flags, dump_items
150 };
151
152 static lc_opt_enum_int_var_t be_ch_vrfy_var = {
153         &options.vrfy_option, be_ch_vrfy_items
154 };
155
156 static const lc_opt_table_entry_t be_chordal_options[] = {
157         LC_OPT_ENT_ENUM_PTR ("perm",          "perm lowering options", &lower_perm_var),
158         LC_OPT_ENT_ENUM_MASK("dump",          "select dump phases", &dump_var),
159         LC_OPT_ENT_ENUM_PTR ("vrfy",          "verify options", &be_ch_vrfy_var),
160         LC_OPT_LAST
161 };
162
163 static void dump(unsigned mask, ir_graph *irg,
164                                  const arch_register_class_t *cls,
165                                  const char *suffix,
166                                  void (*dump_func)(ir_graph *, const char *))
167 {
168         if((options.dump_flags & mask) == mask) {
169                 if (cls) {
170                         char buf[256];
171                         snprintf(buf, sizeof(buf), "-%s%s", cls->name, suffix);
172                         be_dump(irg, buf, dump_func);
173                 }
174                 else
175                         be_dump(irg, suffix, dump_func);
176         }
177 }
178
179 /**
180  * Checks for every reload if it's user can perform the load on itself.
181  */
182 static void memory_operand_walker(ir_node *irn, void *env) {
183         be_chordal_env_t *cenv = env;
184         const arch_env_t *aenv = cenv->birg->main_env->arch_env;
185         const ir_edge_t  *edge, *ne;
186         ir_node          *block;
187         ir_node          *spill;
188
189         if (! be_is_Reload(irn))
190                 return;
191
192         /* only use memory operands, if the reload is only used by 1 node */
193         if(get_irn_n_edges(irn) > 1)
194                 return;
195
196         spill = be_get_Reload_mem(irn);
197         block = get_nodes_block(irn);
198
199         foreach_out_edge_safe(irn, edge, ne) {
200                 ir_node *src = get_edge_src_irn(edge);
201                 int     pos  = get_edge_src_pos(edge);
202
203                 assert(src && "outedges broken!");
204
205                 if (get_nodes_block(src) == block && arch_possible_memory_operand(aenv, src, pos)) {
206                         arch_perform_memory_operand(aenv, src, spill, pos);
207                 }
208         }
209
210         /* kill the Reload */
211         if (get_irn_n_edges(irn) == 0) {
212                 sched_remove(irn);
213                 set_irn_n(irn, be_pos_Reload_mem, new_Bad());
214                 set_irn_n(irn, be_pos_Reload_frame, new_Bad());
215         }
216 }
217
218 /**
219  * Starts a walk for memory operands if supported by the backend.
220  */
221 static INLINE void check_for_memory_operands(be_chordal_env_t *chordal_env) {
222         irg_walk_graph(chordal_env->irg, NULL, memory_operand_walker, chordal_env);
223 }
224
225 /**
226  * Sorry for doing stats again...
227  */
228 typedef struct _node_stat_t {
229         unsigned int n_phis;      /**< Phis of the current register class. */
230         unsigned int n_mem_phis;  /**< Memory Phis (Phis with spill operands). */
231         unsigned int n_copies;    /**< Copies */
232         unsigned int n_perms;     /**< Perms */
233         unsigned int n_spills;    /**< Spill nodes */
234         unsigned int n_reloads;   /**< Reloads */
235 } node_stat_t;
236
237 struct node_stat_walker {
238         node_stat_t      *stat;
239         const arch_env_t *arch_env;
240 };
241
242 static void node_stat_walker(ir_node *irn, void *data)
243 {
244         struct node_stat_walker *env  = data;
245         const arch_env_t        *aenv = env->arch_env;
246
247         /* if the node is a normal phi */
248         if(is_Phi(irn)) {
249                 if (get_irn_mode(irn) == mode_M) {
250                         env->stat->n_mem_phis++;
251                 } else {
252                         env->stat->n_phis++;
253                 }
254         } else {
255                 arch_irn_class_t classify = arch_irn_classify(aenv, irn);
256
257                 if(classify & arch_irn_class_spill)
258                         ++env->stat->n_spills;
259                 if(classify & arch_irn_class_reload)
260                         ++env->stat->n_reloads;
261                 if(classify & arch_irn_class_copy)
262                         ++env->stat->n_copies;
263                 if(classify & arch_irn_class_perm)
264                         ++env->stat->n_perms;
265         }
266 }
267
268 static void node_stats(be_irg_t *birg, node_stat_t *stat)
269 {
270         struct node_stat_walker env;
271
272         memset(stat, 0, sizeof(*stat));
273         env.arch_env = birg->main_env->arch_env;
274         env.stat     = stat;
275         irg_walk_graph(birg->irg, NULL, node_stat_walker, &env);
276 }
277
278 static void insn_count_walker(ir_node *irn, void *data)
279 {
280         unsigned long *cnt = data;
281
282         switch(get_irn_opcode(irn)) {
283         case iro_Proj:
284         case iro_Phi:
285         case iro_Start:
286         case iro_End:
287                 break;
288         default:
289                 (*cnt)++;
290         }
291 }
292
293 static unsigned long count_insns(ir_graph *irg)
294 {
295         unsigned long cnt = 0;
296         irg_walk_graph(irg, insn_count_walker, NULL, &cnt);
297         return cnt;
298 }
299
300 static void block_count_walker(ir_node *node, void *data)
301 {
302         unsigned long *cnt = data;
303         if (node == get_irg_end_block(current_ir_graph))
304                 return;
305         (*cnt)++;
306 }
307
308 static unsigned long count_blocks(ir_graph *irg)
309 {
310         unsigned long cnt = 0;
311         irg_block_walk_graph(irg, block_count_walker, NULL, &cnt);
312         return cnt;
313 }
314
315 static node_stat_t last_node_stat;
316
317 /**
318  * Perform things which need to be done per register class before spilling.
319  */
320 static void pre_spill(post_spill_env_t *pse, const arch_register_class_t *cls)
321 {
322         be_chordal_env_t    *chordal_env = &pse->cenv;
323         be_irg_t            *birg        = pse->birg;
324         ir_graph            *irg         = be_get_birg_irg(birg);
325         const be_main_env_t *main_env    = birg->main_env;
326
327         pse->cls                   = cls;
328         chordal_env->cls           = cls;
329         chordal_env->border_heads  = pmap_create();
330         chordal_env->ignore_colors = bitset_malloc(chordal_env->cls->n_regs);
331
332         be_assure_liveness(birg);
333         be_liveness_assure_chk(be_get_birg_liveness(birg));
334
335         stat_ev_do(pse->pre_spill_cost = be_estimate_irg_costs(irg, main_env->arch_env, birg->exec_freq));
336
337         /* put all ignore registers into the ignore register set. */
338         be_put_ignore_regs(birg, pse->cls, chordal_env->ignore_colors);
339
340         BE_TIMER_PUSH(t_ra_constr);
341         be_pre_spill_prepare_constr(chordal_env);
342         BE_TIMER_POP(t_ra_constr);
343
344         dump(BE_CH_DUMP_CONSTR, birg->irg, pse->cls, "-constr-pre", dump_ir_block_graph_sched);
345 }
346
347 /**
348  * Perform things which need to be done per register class after spilling.
349  */
350 static void post_spill(post_spill_env_t *pse, int iteration) {
351         be_chordal_env_t    *chordal_env = &pse->cenv;
352         be_irg_t            *birg        = pse->birg;
353         ir_graph            *irg         = birg->irg;
354         const be_main_env_t *main_env    = birg->main_env;
355         int                  colors_n     = arch_register_class_n_regs(chordal_env->cls);
356         int             allocatable_regs = colors_n - be_put_ignore_regs(birg, chordal_env->cls, NULL);
357
358         /* some special classes contain only ignore regs, no work to be done */
359         if (allocatable_regs > 0) {
360                 stat_ev_dbl("bechordal_spillcosts", be_estimate_irg_costs(irg, main_env->arch_env, birg->exec_freq) - pse->pre_spill_cost);
361
362                 /*
363                         If we have a backend provided spiller, post spill is
364                         called in a loop after spilling for each register class.
365                         But we only need to fix stack nodes once in this case.
366                 */
367                 BE_TIMER_PUSH(t_ra_spill);
368                 check_for_memory_operands(chordal_env);
369                 if (iteration == 0) {
370                         be_abi_fix_stack_nodes(birg->abi);
371                 }
372                 BE_TIMER_POP(t_ra_spill);
373
374                 BE_TIMER_PUSH(t_verify);
375
376                 /* verify schedule and register pressure */
377                 if (chordal_env->opts->vrfy_option == BE_CH_VRFY_WARN) {
378                         be_verify_schedule(birg);
379                         be_verify_register_pressure(birg, pse->cls, irg);
380                 } else if (chordal_env->opts->vrfy_option == BE_CH_VRFY_ASSERT) {
381                         assert(be_verify_schedule(birg) && "Schedule verification failed");
382                         assert(be_verify_register_pressure(birg, pse->cls, irg)
383                                 && "Register pressure verification failed");
384                 }
385                 BE_TIMER_POP(t_verify);
386
387                 /* Color the graph. */
388                 BE_TIMER_PUSH(t_ra_color);
389                 be_ra_chordal_color(chordal_env);
390                 BE_TIMER_POP(t_ra_color);
391
392                 dump(BE_CH_DUMP_CONSTR, irg, pse->cls, "-color", dump_ir_block_graph_sched);
393
394                 /* Create the ifg with the selected flavor */
395                 BE_TIMER_PUSH(t_ra_ifg);
396                 chordal_env->ifg = be_create_ifg(chordal_env);
397                 BE_TIMER_POP(t_ra_ifg);
398
399                 stat_ev_if {
400                         be_ifg_stat_t stat;
401                         node_stat_t   node_stat;
402
403                         be_ifg_stat(birg, chordal_env->ifg, &stat);
404                         stat_ev_dbl("bechordal_ifg_nodes", stat.n_nodes);
405                         stat_ev_dbl("bechordal_ifg_edges", stat.n_edges);
406                         stat_ev_dbl("bechordal_ifg_comps", stat.n_comps);
407
408                         node_stats(birg, &node_stat);
409                         stat_ev_dbl("bechordal_perms_before_coal",
410                                         node_stat.n_perms - last_node_stat.n_perms);
411                         stat_ev_dbl("bechordal_copies_before_coal",
412                                         node_stat.n_copies - last_node_stat.n_copies);
413                 }
414
415                 /* copy minimization */
416                 BE_TIMER_PUSH(t_ra_copymin);
417                 co_driver(chordal_env);
418                 BE_TIMER_POP(t_ra_copymin);
419
420                 dump(BE_CH_DUMP_COPYMIN, irg, pse->cls, "-copymin", dump_ir_block_graph_sched);
421
422
423                 /* ssa destruction */
424                 BE_TIMER_PUSH(t_ra_ssa);
425                 be_ssa_destruction(chordal_env);
426                 BE_TIMER_POP(t_ra_ssa);
427
428                 dump(BE_CH_DUMP_SSADESTR, irg, pse->cls, "-ssadestr", dump_ir_block_graph_sched);
429
430                 if (chordal_env->opts->vrfy_option != BE_CH_VRFY_OFF) {
431                         BE_TIMER_PUSH(t_verify);
432                         be_ssa_destruction_check(chordal_env);
433                         BE_TIMER_POP(t_verify);
434                 }
435
436                 /* the ifg exists only if there are allocatable regs */
437                 be_ifg_free(chordal_env->ifg);
438         }
439
440         /* free some always allocated data structures */
441         pmap_destroy(chordal_env->border_heads);
442         bitset_free(chordal_env->ignore_colors);
443 }
444
445 /**
446  * Performs chordal register allocation for each register class on given irg.
447  *
448  * @param birg  Backend irg object
449  * @return Structure containing timer for the single phases or NULL if no timing requested.
450  */
451 static void be_ra_chordal_main(be_irg_t *birg)
452 {
453         const be_main_env_t *main_env = birg->main_env;
454         const arch_env_t    *arch_env = main_env->arch_env;
455         ir_graph            *irg      = birg->irg;
456         int                 j, m;
457         be_chordal_env_t    chordal_env;
458         struct obstack      obst;
459
460         main_opts = main_env->options;
461
462         BE_TIMER_PUSH(t_ra_other);
463
464         BE_TIMER_PUSH(t_ra_prolog);
465
466         be_assure_liveness(birg);
467
468         chordal_env.obst          = &obst;
469         chordal_env.opts          = &options;
470         chordal_env.irg           = irg;
471         chordal_env.birg          = birg;
472         chordal_env.border_heads  = NULL;
473         chordal_env.ifg           = NULL;
474         chordal_env.ignore_colors = NULL;
475
476         obstack_init(&obst);
477
478         BE_TIMER_POP(t_ra_prolog);
479
480         stat_ev_if {
481                 be_stat_ev("bechordal_insns_before", count_insns(irg));
482                 be_stat_ev("bechordal_blocks_before", count_blocks(irg));
483                 node_stats(birg, &last_node_stat);
484         }
485
486         if (! arch_code_generator_has_spiller(birg->cg)) {
487                 /* use one of the generic spiller */
488
489                 /* Perform the following for each register class. */
490                 for (j = 0, m = arch_env_get_n_reg_class(arch_env); j < m; ++j) {
491                         post_spill_env_t pse;
492                         const arch_register_class_t *cls
493                                 = arch_env_get_reg_class(arch_env, j);
494
495                         if(arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
496                                 continue;
497
498
499                         stat_ev_ctx_push_str("bechordal_cls", cls->name);
500
501                         memcpy(&pse.cenv, &chordal_env, sizeof(chordal_env));
502                         pse.birg = birg;
503                         pre_spill(&pse, cls);
504
505                         BE_TIMER_PUSH(t_ra_spill);
506                         be_do_spill(birg, cls);
507                         BE_TIMER_POP(t_ra_spill);
508
509                         dump(BE_CH_DUMP_SPILL, irg, pse.cls, "-spill",
510                              dump_ir_block_graph_sched);
511
512                         post_spill(&pse, 0);
513
514                         stat_ev_if {
515                                 node_stat_t node_stat;
516
517                                 node_stats(birg, &node_stat);
518                                 stat_ev_dbl("bechordal_phis",
519                                                 node_stat.n_phis - last_node_stat.n_phis);
520                                 stat_ev_dbl("bechordal_mem_phis",
521                                                 node_stat.n_mem_phis - last_node_stat.n_mem_phis);
522                                 stat_ev_dbl("bechordal_reloads",
523                                                 node_stat.n_reloads - last_node_stat.n_reloads);
524                                 stat_ev_dbl("bechordal_spills",
525                                                 node_stat.n_spills - last_node_stat.n_spills);
526                                 stat_ev_dbl("bechordal_perms_after_coal",
527                                                 node_stat.n_perms - last_node_stat.n_perms);
528                                 stat_ev_dbl("bechordal_copies_after_coal",
529                                                 node_stat.n_copies - last_node_stat.n_copies);
530
531                                 last_node_stat = node_stat;
532                                 stat_ev_ctx_pop("bechordal_cls");
533                         }
534                 }
535         } else {
536                 post_spill_env_t *pse;
537
538                 /* the backend has its own spiller */
539                 m = arch_env_get_n_reg_class(arch_env);
540
541                 pse = alloca(m * sizeof(pse[0]));
542
543                 for (j = 0; j < m; ++j) {
544                         memcpy(&pse[j].cenv, &chordal_env, sizeof(chordal_env));
545                         pse[j].birg = birg;
546                         pre_spill(&pse[j], pse[j].cls);
547                 }
548
549                 BE_TIMER_PUSH(t_ra_spill);
550                 arch_code_generator_spill(birg->cg, birg);
551                 BE_TIMER_POP(t_ra_spill);
552                 dump(BE_CH_DUMP_SPILL, irg, NULL, "-spill", dump_ir_block_graph_sched);
553
554                 for (j = 0; j < m; ++j) {
555                         post_spill(&pse[j], j);
556                 }
557         }
558
559         BE_TIMER_PUSH(t_verify);
560         be_verify_register_allocation(birg);
561         BE_TIMER_POP(t_verify);
562
563         BE_TIMER_PUSH(t_ra_epilog);
564         lower_nodes_after_ra(birg, options.lower_perm_opt & BE_CH_LOWER_PERM_COPY ? 1 : 0);
565         dump(BE_CH_DUMP_LOWER, irg, NULL, "-belower-after-ra", dump_ir_block_graph_sched);
566
567         obstack_free(&obst, NULL);
568         be_liveness_invalidate(be_get_birg_liveness(birg));
569         BE_TIMER_POP(t_ra_epilog);
570
571         BE_TIMER_POP(t_ra_other);
572
573         stat_ev_if {
574                 be_stat_ev("bechordal_insns_after", count_insns(irg));
575         }
576 }
577
578 static be_ra_t be_ra_chordal_allocator = {
579         be_ra_chordal_main,
580 };
581
582 void be_init_chordal_main(void)
583 {
584         lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
585         lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
586         lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
587
588         lc_opt_add_table(chordal_grp, be_chordal_options);
589
590         be_register_allocator("chordal", &be_ra_chordal_allocator);
591 }
592
593 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_chordal_main);