besched: Change sched_foreach_from(sched_next(x), y) to sched_foreach_after(x, y).
[libfirm] / ir / be / bechordal_main.c
1 /*
2  * This file is part of libFirm.
3  * Copyright (C) 2012 University of Karlsruhe.
4  */
5
6 /**
7  * @file
8  * @brief       Driver for the chordal register allocator.
9  * @author      Sebastian Hack
10  * @date        29.11.2005
11  */
12 #include "config.h"
13
14 #include <stdlib.h>
15 #include <time.h>
16
17 #include "obst.h"
18 #include "pset.h"
19 #include "list.h"
20 #include "bitset.h"
21
22 #include "lc_opts.h"
23 #include "lc_opts_enum.h"
24
25 #include "ircons_t.h"
26 #include "irmode_t.h"
27 #include "irgraph_t.h"
28 #include "irprintf.h"
29 #include "irgwalk.h"
30 #include "ircons.h"
31 #include "irdump.h"
32 #include "irdom.h"
33 #include "ircons.h"
34 #include "irnode.h"
35 #include "ircons.h"
36 #include "irtools.h"
37 #include "debug.h"
38 #include "execfreq.h"
39 #include "iredges_t.h"
40 #include "error.h"
41
42 #include "bechordal_t.h"
43 #include "beabi.h"
44 #include "beutil.h"
45 #include "besched.h"
46 #include "besched.h"
47 #include "belive_t.h"
48 #include "bearch.h"
49 #include "beifg.h"
50 #include "benode.h"
51 #include "statev_t.h"
52 #include "bestat.h"
53 #include "bemodule.h"
54 #include "be_t.h"
55 #include "bera.h"
56 #include "beirg.h"
57 #include "bestack.h"
58
59 #include "bespillslots.h"
60 #include "bespill.h"
61 #include "belower.h"
62
63 #include "becopystat.h"
64 #include "becopyopt.h"
65 #include "bessadestr.h"
66 #include "beverify.h"
67 #include "benode.h"
68
69 #include "bepbqpcoloring.h"
70
71 static be_ra_chordal_opts_t options = {
72         BE_CH_DUMP_NONE,
73         BE_CH_LOWER_PERM_SWAP,
74         BE_CH_VRFY_WARN
75 };
76
77 static const lc_opt_enum_int_items_t lower_perm_items[] = {
78         { "copy", BE_CH_LOWER_PERM_COPY },
79         { "swap", BE_CH_LOWER_PERM_SWAP },
80         { NULL, 0 }
81 };
82
83 static const lc_opt_enum_mask_items_t dump_items[] = {
84         { "none",       BE_CH_DUMP_NONE       },
85         { "spill",      BE_CH_DUMP_SPILL      },
86         { "live",       BE_CH_DUMP_LIVE       },
87         { "color",      BE_CH_DUMP_COLOR      },
88         { "copymin",    BE_CH_DUMP_COPYMIN    },
89         { "ssadestr",   BE_CH_DUMP_SSADESTR   },
90         { "tree",       BE_CH_DUMP_TREE_INTV  },
91         { "split",      BE_CH_DUMP_SPLIT      },
92         { "constr",     BE_CH_DUMP_CONSTR     },
93         { "lower",      BE_CH_DUMP_LOWER      },
94         { "spillslots", BE_CH_DUMP_SPILLSLOTS },
95         { "appel",      BE_CH_DUMP_APPEL      },
96         { "all",        BE_CH_DUMP_ALL        },
97         { NULL, 0 }
98 };
99
100 static const lc_opt_enum_int_items_t be_ch_vrfy_items[] = {
101         { "off",    BE_CH_VRFY_OFF    },
102         { "warn",   BE_CH_VRFY_WARN   },
103         { "assert", BE_CH_VRFY_ASSERT },
104         { NULL, 0 }
105 };
106
107 static lc_opt_enum_int_var_t lower_perm_var = {
108         &options.lower_perm_opt, lower_perm_items
109 };
110
111 static lc_opt_enum_mask_var_t dump_var = {
112         &options.dump_flags, dump_items
113 };
114
115 static lc_opt_enum_int_var_t be_ch_vrfy_var = {
116         &options.vrfy_option, be_ch_vrfy_items
117 };
118
119 static const lc_opt_table_entry_t be_chordal_options[] = {
120         LC_OPT_ENT_ENUM_INT ("perm",          "perm lowering options", &lower_perm_var),
121         LC_OPT_ENT_ENUM_MASK("dump",          "select dump phases", &dump_var),
122         LC_OPT_ENT_ENUM_INT ("verify",        "verify options", &be_ch_vrfy_var),
123         LC_OPT_LAST
124 };
125
126 static be_module_list_entry_t *colorings = NULL;
127 static const be_ra_chordal_coloring_t *selected_coloring = NULL;
128
129 void be_register_chordal_coloring(const char *name, be_ra_chordal_coloring_t *coloring)
130 {
131         if (selected_coloring == NULL)
132                 selected_coloring = coloring;
133
134         be_add_module_to_list(&colorings, name, coloring);
135 }
136
137 static void be_ra_chordal_coloring(be_chordal_env_t *env)
138 {
139         selected_coloring->allocate(env);
140 }
141
142 static void dump(unsigned mask, ir_graph *irg,
143                                  const arch_register_class_t *cls,
144                                  const char *suffix)
145 {
146         if ((options.dump_flags & mask) == mask) {
147                 if (cls) {
148                         char buf[256];
149                         snprintf(buf, sizeof(buf), "%s-%s", cls->name, suffix);
150                         dump_ir_graph(irg, buf);
151                 } else {
152                         dump_ir_graph(irg, suffix);
153                 }
154         }
155 }
156
157 /**
158  * Post-Walker: Checks for the given reload if has only one user that can perform the
159  * reload as part of its address mode.
160  * Fold the reload into the user it that is possible.
161  */
162 static void memory_operand_walker(ir_node *irn, void *env)
163 {
164         ir_node *block;
165         ir_node *spill;
166
167         (void)env;
168
169         if (! be_is_Reload(irn))
170                 return;
171
172         /* only use memory operands, if the reload is only used by 1 node */
173         if (get_irn_n_edges(irn) > 1)
174                 return;
175
176         spill = be_get_Reload_mem(irn);
177         block = get_nodes_block(irn);
178
179         foreach_out_edge_safe(irn, edge) {
180                 ir_node *src = get_edge_src_irn(edge);
181                 int     pos  = get_edge_src_pos(edge);
182
183                 assert(src && "outedges broken!");
184
185                 if (get_nodes_block(src) == block && arch_possible_memory_operand(src, pos)) {
186                         arch_perform_memory_operand(src, spill, pos);
187                 }
188         }
189
190         /* kill the Reload if it was folded */
191         if (get_irn_n_edges(irn) == 0) {
192                 ir_graph *irg = get_irn_irg(irn);
193                 ir_mode  *frame_mode = get_irn_mode(get_irn_n(irn, n_be_Reload_frame));
194                 sched_remove(irn);
195                 set_irn_n(irn, n_be_Reload_mem, new_r_Bad(irg, mode_X));
196                 set_irn_n(irn, n_be_Reload_frame, new_r_Bad(irg, frame_mode));
197         }
198 }
199
200 /**
201  * Starts a walk for memory operands if supported by the backend.
202  */
203 void check_for_memory_operands(ir_graph *irg)
204 {
205         irg_walk_graph(irg, NULL, memory_operand_walker, NULL);
206 }
207
208
209 static be_node_stats_t last_node_stats;
210
211 /**
212  * Perform things which need to be done per register class before spilling.
213  */
214 static void pre_spill(be_chordal_env_t *const chordal_env, arch_register_class_t const *const cls, ir_graph *const irg)
215 {
216         chordal_env->cls              = cls;
217         chordal_env->border_heads     = pmap_create();
218         chordal_env->allocatable_regs = bitset_malloc(cls->n_regs);
219
220         be_assure_live_chk(irg);
221
222         /* put all ignore registers into the ignore register set. */
223         be_put_allocatable_regs(irg, cls, chordal_env->allocatable_regs);
224
225         be_timer_push(T_RA_CONSTR);
226         be_pre_spill_prepare_constr(irg, cls);
227         be_timer_pop(T_RA_CONSTR);
228
229         dump(BE_CH_DUMP_CONSTR, irg, cls, "constr-pre");
230 }
231
232 /**
233  * Perform things which need to be done per register class after spilling.
234  */
235 static void post_spill(be_chordal_env_t *const chordal_env, ir_graph *const irg)
236 {
237         /* some special classes contain only ignore regs, no work to be done */
238         int const allocatable_regs = be_get_n_allocatable_regs(irg, chordal_env->cls);
239         if (allocatable_regs > 0) {
240                 /*
241                         If we have a backend provided spiller, post spill is
242                         called in a loop after spilling for each register class.
243                         But we only need to fix stack nodes once in this case.
244                 */
245                 be_timer_push(T_RA_SPILL_APPLY);
246                 check_for_memory_operands(irg);
247                 be_abi_fix_stack_nodes(irg);
248                 be_timer_pop(T_RA_SPILL_APPLY);
249
250
251                 /* verify schedule and register pressure */
252                 be_timer_push(T_VERIFY);
253                 if (chordal_env->opts->vrfy_option == BE_CH_VRFY_WARN) {
254                         be_verify_schedule(irg);
255                         be_verify_register_pressure(irg, chordal_env->cls);
256                 } else if (chordal_env->opts->vrfy_option == BE_CH_VRFY_ASSERT) {
257                         assert(be_verify_schedule(irg) && "Schedule verification failed");
258                         assert(be_verify_register_pressure(irg, chordal_env->cls)
259                                 && "Register pressure verification failed");
260                 }
261                 be_timer_pop(T_VERIFY);
262
263                 /* Color the graph. */
264                 be_timer_push(T_RA_COLOR);
265                 be_ra_chordal_coloring(chordal_env);
266                 be_timer_pop(T_RA_COLOR);
267
268                 dump(BE_CH_DUMP_CONSTR, irg, chordal_env->cls, "color");
269
270                 /* Create the ifg with the selected flavor */
271                 be_timer_push(T_RA_IFG);
272                 chordal_env->ifg = be_create_ifg(chordal_env);
273                 be_timer_pop(T_RA_IFG);
274
275                 if (stat_ev_enabled) {
276                         be_ifg_stat_t   stat;
277                         be_node_stats_t node_stats;
278
279                         be_ifg_stat(irg, chordal_env->ifg, &stat);
280                         stat_ev_dbl("bechordal_ifg_nodes", stat.n_nodes);
281                         stat_ev_dbl("bechordal_ifg_edges", stat.n_edges);
282                         stat_ev_dbl("bechordal_ifg_comps", stat.n_comps);
283
284                         be_collect_node_stats(&node_stats, irg);
285                         be_subtract_node_stats(&node_stats, &last_node_stats);
286
287                         stat_ev_dbl("bechordal_perms_before_coal",
288                                         node_stats[BE_STAT_PERMS]);
289                         stat_ev_dbl("bechordal_copies_before_coal",
290                                         node_stats[BE_STAT_COPIES]);
291                 }
292
293                 be_timer_push(T_RA_COPYMIN);
294                 co_driver(chordal_env);
295                 be_timer_pop(T_RA_COPYMIN);
296
297                 dump(BE_CH_DUMP_COPYMIN, irg, chordal_env->cls, "copymin");
298
299                 /* ssa destruction */
300                 be_timer_push(T_RA_SSA);
301                 be_ssa_destruction(chordal_env);
302                 be_timer_pop(T_RA_SSA);
303
304                 dump(BE_CH_DUMP_SSADESTR, irg, chordal_env->cls, "ssadestr");
305
306                 if (chordal_env->opts->vrfy_option != BE_CH_VRFY_OFF) {
307                         be_timer_push(T_VERIFY);
308                         be_ssa_destruction_check(chordal_env);
309                         be_timer_pop(T_VERIFY);
310                 }
311
312                 /* the ifg exists only if there are allocatable regs */
313                 be_ifg_free(chordal_env->ifg);
314         }
315
316         /* free some always allocated data structures */
317         pmap_destroy(chordal_env->border_heads);
318         bitset_free(chordal_env->allocatable_regs);
319 }
320
321 /**
322  * Performs chordal register allocation for each register class on given irg.
323  *
324  * @param irg    the graph
325  * @return Structure containing timer for the single phases or NULL if no
326  *         timing requested.
327  */
328 static void be_ra_chordal_main(ir_graph *irg)
329 {
330         const arch_env_t *arch_env = be_get_irg_arch_env(irg);
331         int               j;
332         int               m;
333
334         be_timer_push(T_RA_OTHER);
335
336         be_chordal_env_t chordal_env;
337         obstack_init(&chordal_env.obst);
338         chordal_env.opts             = &options;
339         chordal_env.irg              = irg;
340         chordal_env.border_heads     = NULL;
341         chordal_env.ifg              = NULL;
342         chordal_env.allocatable_regs = NULL;
343
344         if (stat_ev_enabled) {
345                 be_collect_node_stats(&last_node_stats, irg);
346         }
347
348         /* use one of the generic spiller */
349
350         /* Perform the following for each register class. */
351         for (j = 0, m = arch_env->n_register_classes; j < m; ++j) {
352                 const arch_register_class_t *cls = &arch_env->register_classes[j];
353
354                 if (arch_register_class_flags(cls) & arch_register_class_flag_manual_ra)
355                         continue;
356
357
358                 stat_ev_ctx_push_str("bechordal_cls", cls->name);
359
360                 double pre_spill_cost = 0;
361                 if (stat_ev_enabled) {
362                         be_do_stat_reg_pressure(irg, cls);
363                         pre_spill_cost = be_estimate_irg_costs(irg);
364                 }
365
366                 pre_spill(&chordal_env, cls, irg);
367
368                 be_timer_push(T_RA_SPILL);
369                 be_do_spill(irg, cls);
370                 be_timer_pop(T_RA_SPILL);
371
372                 dump(BE_CH_DUMP_SPILL, irg, cls, "spill");
373
374                 stat_ev_dbl("bechordal_spillcosts", be_estimate_irg_costs(irg) - pre_spill_cost);
375
376                 post_spill(&chordal_env, irg);
377
378                 if (stat_ev_enabled) {
379                         be_node_stats_t node_stats;
380
381                         be_collect_node_stats(&node_stats, irg);
382                         be_subtract_node_stats(&node_stats, &last_node_stats);
383                         be_emit_node_stats(&node_stats, "bechordal_");
384
385                         be_copy_node_stats(&last_node_stats, &node_stats);
386                         stat_ev_ctx_pop("bechordal_cls");
387                 }
388         }
389
390         be_timer_push(T_VERIFY);
391         if (chordal_env.opts->vrfy_option == BE_CH_VRFY_WARN) {
392                 be_verify_register_allocation(irg);
393         } else if (chordal_env.opts->vrfy_option == BE_CH_VRFY_ASSERT) {
394                 assert(be_verify_register_allocation(irg)
395                                 && "Register allocation invalid");
396         }
397         be_timer_pop(T_VERIFY);
398
399         be_timer_push(T_RA_EPILOG);
400         lower_nodes_after_ra(irg, options.lower_perm_opt == BE_CH_LOWER_PERM_COPY);
401         dump(BE_CH_DUMP_LOWER, irg, NULL, "belower-after-ra");
402
403         obstack_free(&chordal_env.obst, NULL);
404         be_invalidate_live_sets(irg);
405         be_timer_pop(T_RA_EPILOG);
406
407         be_timer_pop(T_RA_OTHER);
408 }
409
410 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_chordal_main)
411 void be_init_chordal_main(void)
412 {
413         static be_ra_t be_ra_chordal_allocator = {
414                 be_ra_chordal_main,
415         };
416
417         lc_opt_entry_t *be_grp = lc_opt_get_grp(firm_opt_get_root(), "be");
418         lc_opt_entry_t *ra_grp = lc_opt_get_grp(be_grp, "ra");
419         lc_opt_entry_t *chordal_grp = lc_opt_get_grp(ra_grp, "chordal");
420
421         be_register_allocator("chordal", &be_ra_chordal_allocator);
422
423         lc_opt_add_table(chordal_grp, be_chordal_options);
424         be_add_module_list_opt(chordal_grp, "coloring", "select coloring method", &colorings, (void**) &selected_coloring);
425 }